Wrote out chapters 2-4
This commit is contained in:
@@ -1 +1,29 @@
|
||||
\section{Elementary Matrix Operations and Elementary Matrices}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix. Any one of the following three operations on the rows [columns] of $A$ is called an \textbf{elementary row [column] operation}:
|
||||
|
||||
\begin{enumerate}
|
||||
\item interchanging any two rows [columns] of $A$;
|
||||
\item multiplying any row [column] of $A$ by a nonzero scalar;
|
||||
\item adding any scalar multiple of a row [column] of $A$ to another row [column].
|
||||
\end{enumerate}
|
||||
|
||||
Any of these three operations are called an \textbf{elementary operation}. Elementary operations are of \textbf{type 1}, \textbf{type 2}, or \textbf{type 3} depending on whether they are obtained by (1), (2), or (3).
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
An $n \times n$ \textbf{elementary matrix} is a matrix obtained by performing an elementary operation on $I_n$. The elementary matrix is said to be of \textbf{type 1}, \textbf{2}, or \textbf{3} according to whether the elementary operation performed on $I_n$ is a type 1, 2, or 3 operation, respectively.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A \in M_{m \times n}(\F)$, and suppose that $B$ is obtained from $A$ by performing an elementary row [column] operation. Then there exists an $m \times m$ [$n \times n$] elementary matrix $E$ such that $B = EA$ [$B = AE]$. In fact, $E$ is obtained from $I_m$ [$I_n]$ by performing the same elementary row [column] operation as that which was performed on $A$ to obtain $B$. Conversely, if $E$ is an elementary $m \times m$ [$n \times n$] matrix, then $EA$ [$AE$] is the matrix obtained from $A$ by performing the same elementary row [column] operation as that which produces $E$ from $I_m$ [$I_n$].
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Elementary matrices are invertible, and the inverse of an elementary matrix is an elementary matrix of the same type.
|
||||
\end{theorem}
|
||||
@@ -1 +1,84 @@
|
||||
\section{Systems of Linear Equations -- Computational Aspects}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Two systems of linear equations are called \textbf{equivalent} if they have the same solution set.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $Ax = b$ be a system of $m$ linear equations in $n$ unknowns, and let $C$ be an invertible $m \times n$ matrix. Then the system $(CA)x = Cb$ is equivalent to $Ax = b$.
|
||||
\end{theorem}a
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Let $Ax = b$ be a system of $m$ linear equations in $n$ unknowns. If $(A'|b')$ is obtained from $(A|b)$ by a finite number of elementary row operations, then the system $A'x = b'$ is equivalent to the original system.
|
||||
\end{corollary}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A matrix is said to be in \textbf{reduced row echelon form} if the following three conditions are satisfied.
|
||||
|
||||
\begin{enumerate}
|
||||
\item Any row containing a nonzero entry precedes any row in which all the entries are zero (if any).
|
||||
|
||||
\item The first nonzero entry in each row is the only nonzero entry in its column.
|
||||
|
||||
\item The first nonzero entry in each row is 1 and it occurs in a column to the right of the first nonzero entry in the preceding row.
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The following procedure for reducing an augmented matrix to reduced row echelon form is called \textbf{Gaussian elimination}. It consists of two separate parts.
|
||||
|
||||
\begin{enumerate}
|
||||
\item In the \textit{forward pass}, the augmented matrix is transformed into an upper triangular matrix in which the first nonzero entry of each row is $1$, and it occurs in a column to the right of the first nonzero entry in the preceding row.
|
||||
|
||||
\item In the \textit{backward pass} or \textit{back-substitution}, the upper triangular matrix is transformed into reduced row echelon form by making the first nonzero entry of each row the only nonzero entry of its column.
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Gaussian elimination transforms any matrix into its reduced row echelon form.
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
A solution to a system of equations of the form
|
||||
|
||||
\[s = s_0 + t_1u_1 + t_2u_2 + \dots +t_{n-r}u_{n-r},\]
|
||||
|
||||
where $r$ is the number of nonzero solutions in $A'$ ($r \leq m$), is called a \textbf{general solution} of the system $Ax = b$. It expresses an arbitrary solution $s$ of $Ax = b$ in terms of $n - r$ parameters.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $Ax = b$ be a system of $r$ nonzero equations in $n$ unknowns. Suppose that $\rank{A} = \rank{A|b}$ and that $(A|b)$ is in reduced row echelon form. Then
|
||||
|
||||
\begin{enumerate}
|
||||
\item $\rank{A} = r$.
|
||||
\item If the general solution obtained by the procedure above is of the form
|
||||
|
||||
\[s = s_0 + t_1u_1 + t_2u_2 + \dots + t_{n-r}u_{n-r},\]
|
||||
|
||||
then $\{u_1, u_2, \dots, u_{n-r}\}$ is a basis for the solution set of the corresponding homogeneous system, and $s_0$ is a solution to the original system.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix of rank $r$, where $r > 0$, and let $B$ be the reduced row echelon form of $A$. Then
|
||||
|
||||
\begin{enumerate}
|
||||
\item The number of nonzero rows in $B$ is $r$.
|
||||
\item For each $i = 1, 2, \dots, r$, there is a column $b_{j_i}$ of $B$ such that $b_{j_i} = e_i$.
|
||||
\item The columns of $A$ numbered $j_1, j_2, \dots, j_r$ are linearly independent.
|
||||
\item For each $k = 1, 2, \dots, n$, if column $k$ of $B$ is $d_1e_1+d_2e_2+\dots+d_re_r$, then column $k$ of $A$ is $d_1a_{j_1} + d_2a_{j_2} + \dots + d_ra_{j_r}$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
The reduced row echelon form of a matrix is unique.
|
||||
\end{corollary}
|
||||
@@ -1 +1,128 @@
|
||||
\section{Systems of Linear Equations -- Theoretical Aspects}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The system of equations
|
||||
|
||||
\begin{equation}\label{eq:S}
|
||||
\tag{S}
|
||||
\begin{split}
|
||||
a_{11}x_1 + a_{12}x_2 + \dots + a_{1n}x_n = b_1\\
|
||||
a_{21}x_1 + a_{22}x_2 + \dots + a_{2n}x_n = b_2\\
|
||||
\dots \\
|
||||
a_{m1}x_1 + a_{m2}x_2 + \dots + a_{mn}x_n = b_m,
|
||||
\end{split}
|
||||
\end{equation}
|
||||
|
||||
where $a_{ij}$ and $b_i$ ($1 \leq i \leq m$ and $1 \leq j \leq n$) are scalars in a field $\F$ and $x_1, x_2, \dots, x_n$ are $n$ variables taking values in $\F$, is a called a \textbf{system of $m$ linear equations in $n$ unknowns over the field $\F$}.
|
||||
|
||||
The $m \times n$ matrix
|
||||
|
||||
\[\begin{pmatrix}
|
||||
a_{11} & a_{12} & \dots & a_{1n} \\
|
||||
a_{21} & a_{22} & \dots & a_{2n} \\
|
||||
\vdots & \vdots & & \vdots \\
|
||||
a_{m1} & a_{m2} & \dots & a_{mn}
|
||||
\end{pmatrix}\]
|
||||
|
||||
is called the \textbf{coefficient matrix} of the system \eqref{eq:S}.
|
||||
|
||||
If we let
|
||||
|
||||
\[x = \begin{pmatrix}
|
||||
x_1 \\ x_2 \\ \vdots \\ x_n
|
||||
\end{pmatrix}\ \ \text{and}\ \ b = \begin{pmatrix}
|
||||
b_1 \\ b_2 \\ \vdots \\ b_m
|
||||
\end{pmatrix},\]
|
||||
|
||||
then the system \eqref{eq:S} may be rewritten as a single matrix equation
|
||||
|
||||
\[Ax = b.\]
|
||||
|
||||
To exploit the results that we have developed, we often consider a system of linear equations as a single matrix equation.
|
||||
|
||||
A \textbf{solution} to the system \eqref{eq:S} is an $n$-tuple
|
||||
|
||||
\[s = \begin{pmatrix}
|
||||
s_1 \\ s_2 \\ \vdots \\ s_n
|
||||
\end{pmatrix} \in \F^n\]
|
||||
|
||||
such that $As = b$. The set of all solutions to the system \eqref{eq:S} is called the \textbf{solution set} of the system. System \eqref{eq:S} is called \textbf{consistent} if its solution set is nonempty; otherwise it is called \textbf{inconsistent}.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A system $Ax = b$ of $m$ linear equations in $n$ unknowns is said to be \textbf{homogeneous} if $b = 0$. Otherwise the system is said to be \textbf{nonhomogeneous}.\\
|
||||
|
||||
Any homogeneous system has at least one solution, namely, the zero vector.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $Ax = 0$ be a homogeneous system of $m$ linear equations in $n$ unknowns over a field $\F$. Let $K$ denote the set of all solutions to $Ax = 0$. Then $K = \n{L_A}$; hence $K$ is a subspace of $\F^n$ of dimension $n - \rank{L_A} = n - \rank{A}$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
If $m < n$, the system $Ax = 0$ has a nonzero solution.
|
||||
\end{corollary}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
We refer to the equation $Ax = 0$ as the \textbf{homogeneous system corresponding to $Ax = b$}.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $K$ be the solution set of a system of linear equations $Ax = b$, and let $\mathsf{K}_\mathsf{H}$ be the solution set of the corresponding homogeneous system $Ax = 0$. Then for any solution $s$ to $Ax = b$
|
||||
|
||||
\[K = \{s\} + \mathsf{K}_\mathsf{H} = \{s + k: k \in \mathsf{K}_\mathsf{H}\}.\]
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $Ax = b$ be a system of $n$ linear equations in $n$ unknowns. If $A$ is invertible, then the system has exactly one solution, namely, $A^{-1}b$. Conversely, if the system has exactly one solution, then $A$ is invertible.
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The matrix $(A|b)$ is called the \textbf{augmented matrix of the system $Ax = b$}.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $Ax = b$ be a system of linear equations. Then the system is consistent if and only if $\rank{A} = \rank{A|b}$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
Consider a system of linear equations
|
||||
|
||||
\[\begin{split}
|
||||
a_{11}p_1 + a_{12}p_2 + \dots + a_{1m}p_m = p_1 \\
|
||||
a_{21}p_1 + a_{22}p_2 + \dots + a_{2m}p_m = p_2 \\
|
||||
\dots \\
|
||||
a_{n1}p_1 + a_{n2}p_2 + \dots + a_{nm}p_m = p_m \\
|
||||
\end{split}\]
|
||||
|
||||
This system can be written as $Ap = p$, where
|
||||
|
||||
\[p = \begin{pmatrix}
|
||||
p_1 \\ p_2 \\ \vdots \\ p_m
|
||||
\end{pmatrix}\]
|
||||
|
||||
and $A$ is the coefficient matrix of the system. In this context, $A$ is called the \textbf{input-ouput (or consumption) matrix}, and $Ap = p$ is called the \textbf{equilibrium condition}.
|
||||
|
||||
For vectors $b = (b_1, b_2, \dots, b_n)$ and $c = (c_1, c_2, \dots, c_n)$ in $\R^n$, we use the notation $b \geq c$ [$b > c$] to mean $b_i \geq c_i$ [$b_i > c_i$] for all $i$. The vector $b$ is called \textbf{nonnegative [positive]} if $b \geq 0$ [$b > 0$].
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $n \times n$ input-output matrix having the form
|
||||
|
||||
\[A = \begin{pmatrix}
|
||||
B & C \\
|
||||
D & E
|
||||
\end{pmatrix},\]
|
||||
|
||||
where $D$ is a $1 \times (n -1)$ positive vector and $C$ is an $(n-1)\times 1$ positive vector. Then $(I -A)x = 0$ has a one-dimensional solution set that is generated by a nonnegative vector.
|
||||
\end{theorem}
|
||||
@@ -1 +1,88 @@
|
||||
\section{The Rank of a Matrix and Matrix Inverses}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
If $A \in M_{m \times n}(\F)$, we define the \textbf{rank} of $A$, denoted $\rank{A}$, to be the rank of the linear transformation $L_A: \F^n \to \F^m$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T: V \to W$ be a linear transformation between finite-dimensional vector spaces, and let $\beta$ and $\gamma$ be ordered bases for $V$ and $W$, respectively. Then $\rank{T} = \rank{[T]_\beta^\gamma}$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix. if $P$ and $Q$ are invertible $m \times m$ and $n \times n$ matrices, respectively, then
|
||||
|
||||
\begin{enumerate}
|
||||
\item $\rank{AQ} = \rank{A}$,
|
||||
\item $\rank{PA} = \rank{A}$,\\ and therefore
|
||||
\item $\rank{PAQ} = \rank{A}$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Elementary row and column operations on a matrix are rank preserving.
|
||||
\end{corollary}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
The rank of any matrix equals the maximum number of its linearly independent columns; that is, the rank of a matrix is the dimension of the subspace generated by its columns.
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix of rank $r$. Then $r \leq m$, $r \leq n$, and, by means of a finite number of elementary row and column operations, $A$ can be transformed into the matrix
|
||||
|
||||
\[D = \begin{pmatrix}
|
||||
I_r & O_1 \\
|
||||
O_2 & O_3
|
||||
\end{pmatrix}\]
|
||||
|
||||
where $O_1$, $O_2$ and $O_3$ are the zero matrices. Thus $D_{ii} = 1$ for $i \leq r$ and $D_{ij} = 0$ otherwise.
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix of rank $r$. Then there exist invertible matrices $B$ and $C$ of sizes $m \times m$ and $n \times n$, respectively, such that $D=BAC$, where
|
||||
|
||||
\[D = \begin{pmatrix}
|
||||
I_r & O_1 \\
|
||||
O_2 & O_3
|
||||
\end{pmatrix}\]
|
||||
is the $m \times n$ matrix in which $O_1$, $O_2$, and $O_3$ are zero matrices.
|
||||
\end{corollary}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix. Then
|
||||
|
||||
\begin{enumerate}
|
||||
\item $\rank{A^t} = \rank{A}$.
|
||||
\item The rank of any matrix equals the maximum number of its linearly independent rows; that is, the rank of a matrix is the dimension of the subspace generated by its rows.
|
||||
\item The rows and columns of any matrix generate subspaces of the same dimension, numerically equal to the rank of the matrix.
|
||||
\end{enumerate}
|
||||
\end{corollary}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Every invertible matrix is a product of elementary matrices.
|
||||
\end{corollary}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T: V \to W$ and $U: W \to Z$ be linear transformations on finite-dimensional vector spaces $V$, $W$, and $Z$, and let $A$ and $B$ be matrices such that the product $AB$ is defined. Then
|
||||
|
||||
\begin{enumerate}
|
||||
\item $\rank{UT} \leq \rank{U}$.
|
||||
\item $\rank{UT} \leq \rank{T}$.
|
||||
\item $\rank{AB} \leq \rank{A}$.
|
||||
\item $\rank{AB} \leq \rank{B}$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ and $B$ be $m \times n$ and $m \times p$ matrices, respectively. By the \textbf{augmented matrix} $(A|B)$, we mean the $m \times (n \times p)$ matrix $(A\ B)$, that is, the matrix whose first $n$ columns are the columns of $A$, and whose last $p$ columns are the columns of $B$.
|
||||
\end{definition}
|
||||
Reference in New Issue
Block a user