Wrote out chapters 2-4

This commit is contained in:
2024-02-20 16:06:34 -07:00
parent 33101973d4
commit 85f421fca3
22 changed files with 1440 additions and 14 deletions
@@ -1 +1,120 @@
\section{Compositions of Linear Transformations and Matrix Multiplication}
\begin{theorem}
\hfill\\
Let $V$, $W$, and $Z$ be vector spaces over the same field $\F$, and let $T: V \to U$ and $U: W \to Z$ be linear. Then $UT: V \to Z$ is linear.
\end{theorem}
\begin{theorem}
\hfill\\
Let $V$ be a vector space. Let $T, U_1, U_2 \in \LL(V)$. Then
\begin{enumerate}
\item $T(U_1 + U_2) = TU_1 + TU_2$ and $(U_1 + U_2)T = U_1T + U_2T$
\item $T(U_1U_2) = (TU_1)U_2$
\item $TI = IT = T$
\item $a(U_1U_2) = (aU_1)U_2 = U_1(aU_2)$ for all scalars $a$.
\end{enumerate}
\end{theorem}
\begin{definition}
\hfill\\
Let $A$ be an $m \times n$ matrix and $B$ be an $n \times p$ matrix. We define the \textbf{product} of $A$ and $B$, denoted $AB$, to be the $m \times p$ matrix such that
\[(AB)_{ij} = \sum_{k=1}^{n}A_{ik}B_{kj}\ \ \text{for}\ \ 1 \leq i \leq m,\ \ 1 \leq j \leq p.\]
Notice that $(AB)_{ij}$ is the sum of products of corresponding entries from the $i$th row of $A$ and the $j$th column of $B$.\\
The reader should observe that in order for the product $AB$ to be defined, there are restrictions regarding the relative sizes of $A$ and $B$. The following mnemonic device is helpful: ``$(m \times n) \cdot (n \times p) = (m \times p)$"; that is, in order for the product $AB$ to be defined, the two ``inner" dimensions must be equal, and the two ``outer" dimensions yield the size of the product.
\end{definition}
\begin{theorem}
\hfill\\
Let $V$, $W$, and $Z$ be finite-dimensional vector spaces with ordered bases $\alpha$, $\beta$, and $\gamma$, respectively. Let $T: V \to W$ and $U: W \to Z$ be linear transformations. Then
\[[UT]_\alpha^\gamma = [U]_\beta^\gamma[T]_\alpha^\beta\]
\end{theorem}
\begin{corollary}
\hfill\\
Let $V$ be a finite-dimensional vector space with an ordered basis $\beta$. Let $T, U \in \LL(V)$. Then $[UT]_\beta = [U]_\beta [T]_\beta$.
\end{corollary}
\begin{definition}
\hfill\\
We define the \textbf{Kronecker delta} $\delta_{ij}$ by $\delta_{ij}=1$ if $i = j$ and $\delta_{ij}=0$ if $i \neq j$. The $n \times n$ \textbf{identity matrix} $I_n$ is defined by $(I_n)_{ij} = \delta_{ij}$.
\end{definition}
\begin{theorem}
\hfill\\
Let $A$ be an $m \times n$ matrix, $B$ and $C$ be $n \times p$ matrices, and $D$ and $E$ be $q \times m$ matrices. Then
\begin{enumerate}
\item $A(B + C) = AB + AC$ and $(D + E)A = DA + EA$.
\item $a(AB) = (aA)B = A(aB)$ for any scalar $a$.
\item $I_mA = A = AI_n$.
\item If $V$ is an $n$-dimensional vector space with an ordered basis $\beta$, then $[I_V]_\beta = I_n$.
\end{enumerate}
\end{theorem}
\begin{corollary}
\hfill\\
Let $A$ be an $m \times n$ matrix, $B_1, B_2, \dots, B_k$ be $n \times p$ matrices, $C_1, C_2, \dots, C_k$ be $q \times m$ matrices, and $a_1, a_2, \dots, a_k$ be scalars. Then
\[A\left(\sum_{i=1}^{k}a_iB_i\right) = \sum_{i=1}^{k}a_iAB_i\]
and
\[\left(\sum_{i=1}^{k}a_iC_i\right)A = \sum_{i=1}^{k}a_iC_iA.\]
\end{corollary}
\begin{theorem}
\hfill\\
Let $A$ be an $m \times n$ matrix and $B$ be an $n \times p$ matrix. For each $j$ ($1 \leq j \leq p$) let $u_j$ and $v_j$ denote the $j$th columns of $AB$ and $B$, respectively. Then
\begin{enumerate}
\item $u_j = Av_j$.
\item $v_j = Be_j$, where $e_j$ is the $j$th standard vector of $\F^p$.
\end{enumerate}
\end{theorem}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces having ordered bases $\beta$ and $\gamma$, respectively, and let $T: V \to W$ be linear. Then, for each $u \in V$, we have
\[[T(u)]_\gamma = [T]_\beta^\gamma [u]_\beta.\]
\end{theorem}
\begin{definition}
\hfill\\
Let $A$ be an $m \times n$ matrix with entries from a field $\F$. We denote $L_A: \F^n \to \F^m$ defined by $L_A(x) = Ax$ (the matrix product of $A$ and $x$) for each column vector $x \in \F^n$. We call $L_A$ a \textbf{left-multiplication transformation}.
\end{definition}
\begin{theorem}
\hfill\\
Let $A$ be an $m \times n$ matrix with entries from $\F$. Then the left-multiplication transformation $L_A: \F^n \to \F^m$ is linear. Furthermore, if $B$ is any other $m \times n$ matrix (with entries from $\F$) and $\beta$ and $\gamma$ are the standard ordered bases for $\F^n$ and $\F^m$, respectively, then we have the following properties.
\begin{enumerate}
\item $[L_A]_\beta^\gamma = A$.
\item $L_A = L_B$ if and only if $A = B$.
\item $L_{A + B} = L_A + L_B$ and $L_{aA} = aL_A$ for all $a \in \F$.
\item If $T: \F^n \to \F^m$ is linear, then there exists a unique $m \times n$ matrix $C$ such that $T = L_C$. In fact, $C = [T]_\beta^\gamma$.
\item If $E$ is an $n \times p$ matrix, then $L_{AE} = L_AL_E$.
\item If $m = n$, then $L_{I_n} = I_{\F^n}$.
\end{enumerate}
\end{theorem}
\begin{theorem}
\hfill\\
Let $A$, $B$, and $C$ be matrices such that $A(BC)$ is defined. Then $(AB)C$ is also defined and $A(BC)=(AB)C$; that is, matrix multiplication is associative.
\end{theorem}
\begin{definition}
\hfill\\
An \textbf{incidence matrix} is a square matrix in which all the entries are either zero or one and, for convenience, all the diagonal entries are zero. If we have a relationship on a set of $n$ objects that we denote $1, 2, \dots, n$, then we define the associated incidence matrix $A$ by $A_{ij} = 1$ if $i$ is related to $j$, and $A_{ij} = 0$ otherwise.
\end{definition}
\begin{definition}
\hfill\\
A relationship among a group of people is called a \textbf{dominance relation} if the associated incidence matrix $A$ has the property that for all distinct pairs $i$ and $j$, $A_{ij} = 1$ if and only if $A_{ji} = 0$, that is, given any two people, exactly one of them \textit{dominates} the other.
\end{definition}
+85
View File
@@ -1 +1,86 @@
\section{Dual Spaces}
\begin{definition}
\hfill\\
A linear transformation from a vector space $V$ into its field of scalars $\F$, which is itself a vector space of dimension 1 over $\F$, is called a \textbf{linear functional} on $V$. We generally use the letters $\mathsf{f}, \mathsf{g}, \mathsf{h}, \dots$. to denote linear functionals.
\end{definition}
\begin{definition}
\hfill\\
Let $V$ be a vector space of continuous real-valued functions on the interval $[0, 2\pi]$. Fix a function $g \in V$. The function $\mathsf{h}: V \to \R$, defined by
\[\mathsf{h}(x) = \frac{1}{2\pi} \int_{0}^{2\pi}x(t)g(t) dt\]
is a linear functional on $V$. In the cases that $g(t)$ equals $\sin(nt)$ or $\cos (nt)$, $\mathsf{h}(x)$ is often called the \textbf{\textit{n}th Fourier coefficient of $x$}.
\end{definition}
\begin{definition}
\hfill\\
Let $V$ be a finite dimensional vector space, and let $\beta = \{x_1, x_2, \dots, x_n\}$ be an ordered basis for $V$. For each $i = 1, 2, \dots, n$, define $\mathsf{f}_i(x) = a_i$, where
\[[x]_\beta = \begin{pmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{pmatrix}\]
is the coordinate vector of $x$ relative to $\beta$. Then $\mathsf{f}$ is a linear function on $V$ called the \textbf{\textit{i}th coordinate function with respect to the basis $\beta$}. Note that $\mathsf{f}_i(x_j) = \delta_{ij}$, where $\delta_{ij}$ is the Kronecker delta. These linear functionals play an important role in the theory of dual spaces (see \autoref{Theorem 2.24}).
\end{definition}
\begin{definition}
\hfill\\
For a vector space $V$ over $\F$, we define the \textbf{dual space} of $V$ to be the vector space $\LL(V, \F)$, denoted by $V^*$.\\
Thus $V^*$ is the vector space consisting of all linear functionals on $V$ with the operations of addition and scalar multiplication. Note that if $V$ is finite-dimensional, then by \autoref{Corollary 2.7}
\[\ldim{V^*}= \ldim{\LL(V,\F)} = \ldim{V} \cdot \ldim{\F} = \ldim{V}.\]
Hence by \autoref{Theorem 2.19}, $V$ and $V^*$ are isomorphic. We also define the \textbf{double dual} $V^{**}$ of $V$ to be the dual of $V^*$. In \autoref{Theorem 2.26}, we show, in fact, that there is a natural identification of $V$ and $V^{**}$ in the case that $V$ is finite-dimensional.
\end{definition}
\begin{theorem}\label{Theorem 2.24}
\hfill\\
Suppose that $V$ is a finite-dimensional vector space with the ordered basis $\beta = \{x_1, x_2, \dots, x_n\}$. Let $\mathsf{f}_i$ ($1 \leq i \leq n$) be the $i$th coordinate function with respect to $\beta$ as just defined, and let $\beta^*=\{\mathsf{f}_1, \mathsf{f}_2, \dots, \mathsf{f}_n\}$. Then $\beta^*$ is an ordered basis for $V^*$, and, for any $\mathsf{f} \in V^*$, we have
\[\mathsf{f} = \sum_{i=1}^{n}\mathsf{f}(x_i)\mathsf{f}_i.\]
\end{theorem}
\begin{definition}
\hfill\\
Using the notation of \autoref{Theorem 2.24}, we call the ordered basis $\beta^* = \{\mathsf{f}_1, \mathsf{f}_2, \dots, \mathsf{f}_n\}$ of $V^*$ that satisfies $\mathsf{f}_i(x_j) = \delta_{ij}$ ($1 \leq i,\ j \leq n$) the \textbf{dual basis} of $\beta$.
\end{definition}
\begin{theorem}\label{Theorem 2.25}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces over $\F$ with ordered bases $\beta$ and $\gamma$, respectively. For any linear transformation $T: V \to W$, the mapping $T^t: W^* \to V^*$ defined by $T^t(\mathsf{g}) = \mathsf{g}T$ for all $\mathsf{g} \in W^*$ is a linear transformation with the property that $[T^t]_{\gamma^*}^{\beta^*} = ([T]_\beta^\gamma)^t$.
\end{theorem}
\begin{definition}
\hfill\\
The linear transformation $T^t$ defined in \autoref{Theorem 2.25} is called the \textbf{transpose} of $T$. It is clear that $T^t$ is the unique linear transformation $U$ such that $[U]_{\gamma^*}^{\beta^*} = ([T]_\beta^\gamma)^t$.
\end{definition}
\begin{definition}
\hfill\\
For a vector $x$ in a finite-dimensional vector space $V$, we define the linear functional $\hat{x}: V^* \to \F$ on $V^*$ by $\hat{x}(\mathsf{f}) = \mathsf{f}(x)$ for every $\mathsf{f} \in V^*$. Since $\hat{x}$ is a linear functional on $V^*$, $\hat{x} \in V^{**}$.\\
The correspondence $x \leftrightarrow \hat{x}$ allows us to define the desired isomorphism between $V^*$ and $V^{**}$.
\end{definition}
\begin{lemma}
\hfill\\
Let $V$ be a finite-dimensional vector space, and let $x \in V$. If $\hat{x}(\mathsf{f})=0$ for all $\mathsf{f} \in V^*$, then $x = 0$.
\end{lemma}
\begin{theorem}\label{Theorem 2.26}
\hfill\\
Let $V$ be a finite-dimensional vector space, and define $\psi: V \to V^{**}$ by $\psi(x) = \hat{x}$. Then $\psi$ is an isomorphism.
\end{theorem}
\begin{corollary}
\hfill\\
Let $V$ be a finite-dimensional vector space with dual space $V^*$. Then every ordered basis for $V^*$ is the dual basis for some basis $V$.
\end{corollary}
\begin{definition}
\hfill\\
Let $V$ be a finite-dimensional vector space over $\F$. For every subset $S$ of $V$, define the \textbf{annihilator} $S^0$ of $S$ as
\[S^0 = \{\mathsf{f} \in V^*\ |\ \mathsf{f}(x) = 0,\ \text{for all}\ x \in S\}\]
\end{definition}
@@ -1 +1,184 @@
\section{Homogeneous Linear Differential Equations with Constant Coefficients}
\begin{definition}
\hfill\\
A \textbf{differential equation} in an unknown function $y = y(t)$ is an equation involving $y$, $t$, and derivatives of $y$. If the differential equation is of the form
\begin{equation}
a_ny^{(n)}+a_{n-1}y^{(n-)} + \dots + a_1y^{(1)}+a_0y = f,
\end{equation}
where $a_0, a_1, \dots, a_n$ and $f$ are functions of $t$ and $y^{(k)}$ denotes the $k$th derivative of $y$, then the equation is said to be \textbf{linear}. The functions $a_i$ are called the \textbf{coefficients} of the differential equation. When $f$ is identically zero, (2.1) is called \textbf{homogeneous}.\\
If $a_n \neq 0$, we say that differential equation (2.1) is of \textbf{order \textit{n}}. In this case, we divide both sides by $a_n$ to obtain a new, but equivalent, equation
\[y^{(n)} + b_{n-1}y^{(n-1)} + \dots + b_1y^{(1)} + b_0y = 0,\]
where $b_i = a_i/a_n$ for $i=0, 1, \dots, n-1$. Because of this observation, we always assume that the coefficient $a_n$ in (2.1) is $1$.\\
A \textbf{solution} to (2.1) is a function that when substituted for $y$ reduces (2.1) to an identity.
\end{definition}
\begin{definition}
\hfill\\
Given a complex-valued function $x \in \mathcal{F}(\R, \C)$ of a real variable $t$ (where $\mathcal{F}(\R, \C)$ is the vector space defined in \autoref{Definition 1.7}), there exist unique real-valued functions $x_1$ and $x_2$ of $t$, such that
\[x(t) = x_1(t) + ix_2(t)\ \ \ \text{for}\ \ \ t \in \R,\]
where $i$ is the imaginary number such that $i^2 = -1$. We call $x_1$ the \textbf{real part} and $x_2$ the \textbf{imaginary part} of $x$.
\end{definition}
\begin{definition}
\hfill\\
Given a function $x \in \mathcal{F}(\R, \C)$ with real part $x_1$ and imaginary part $x_2$, we say that $x$ is \textbf{differentiable} if $x_1$ and $x_2$ are differentiable. If $x$ is differentiable, we define the \textbf{derivative} $x'$ of $x$ by
\[x' = x'_1 + ix'_2\]
\end{definition}
\begin{theorem}
\hfill\\
Any solution to a homogeneous linear differential equation with constant coefficients has derivatives of all orders; that is, if $x$ is a solution to such an equation, then $x^(k)$ exists for every positive integer $k$.
\end{theorem}
\begin{definition}
\hfill\\
We use $\C^\infty$ to denote the set of all functions in $\mathcal{F}(\R, \C)$ that have derivatives of all orders.
\end{definition}
\begin{definition}
\hfill\\
For any polynomial $p(t)$ over $\C$ of positive degree, $p(D)$ is called a \textbf{differential operator}. The \textbf{order} of the differential operator $p(D)$ is the degree of the polynomial $p(t)$.
\end{definition}
\begin{definition}
\hfill\\
Given the differential equation
\[y^{(n)} + a_{n-1}y^{(n-1)}+ \dots + a_1y^{(1)} + a_0y = 0,\]
the complex polynomial
\[p(t) = t^n + a_{n-1}t^{n-1} + \dots + a_1t + a_0\]
is called the \textbf{auxiliary polynomial} associated with the equation.
\end{definition}
\begin{theorem}
\hfill\\
The set of all solutions to a homogeneous linear differential equation with constant coefficients coincides with the null space of $p(D)$ where $p(t)$ is the auxiliary polynomial associated with the equation.
\end{theorem}
\begin{corollary}
\hfill\\
The set of all solutions to a homogeneous linear differential equation with constant coefficients is a subspace of $\C^\infty$.
\end{corollary}
\begin{definition}
\hfill\\
We call the set of solutions to a homogeneous linear differential equation with constant coefficients the \textbf{solution space} of the equation.
\end{definition}
\begin{definition}
\hfill\\
Let $c = a+ib$ be a complex number with real part $a$ and imaginary part $b$. Define
\[e^c = e^a(\cos(b) + i\sin(b)).\]
The special case
\[e^{ib} = \cos(b) + i\sin(a)\]
is called \textbf{Euler's formula}.
\end{definition}
\begin{definition}
\hfill\\
A function $f: \R \to \C$ defined by $f(t) = e^{ct}$ for a fixed complex number $c$ is called an \textbf{exponential function}.
\end{definition}
\begin{theorem}
\hfill\\
For any exponential function $f(t) = e^{ct}$, $f'(t) = ce^{ct}$.
\end{theorem}
\begin{theorem}
\hfill\\
Recall that the \textbf{order} of a homogeneous linear differential equation is the degree of its auxiliary polynomial. Thus, an equation of order 1 is of the form
\begin{equation}
y' + a_0y = 0.
\end{equation}
The solution space for (2.2) is of dimension 1 and has $\{e^{-a_0t}\}$ as a basis.
\end{theorem}
\begin{corollary}
\hfill\\
For any complex number $c$, the null space of the differential operator $D-c\mathsf{l}$ has $\{e^{ct}\}$ as a basis.
\end{corollary}
\begin{theorem}
\hfill\\
Let $p(t)$ be the auxiliary polynomial for a homogeneous linear differential equation with constant coefficients. For any complex number $c$, if $c$ is a zero of $p(t)$, then $e^{ct}$ is a solution to the differential equation.
\end{theorem}
\begin{theorem}
\hfill\\
For any differential operator $p(D)$ of order $n$, the null space of $p(D)$ is an $n$-dimensional subspace of $\C^\infty$.
\end{theorem}
\begin{lemma}
\hfill\\
The differential operator $D - c\mathsf{l}: \C^\infty \to \C^\infty$ is onto for any complex number $c$.
\end{lemma}
\begin{lemma}
\hfill\\
Let $V$ be a vector space, and suppose that $T$ and $U$ are linear operators on $V$ such that $U$ is onto and the null spaces of $T$ and $U$ are finite-dimensional. Then the null space of $TU$ is finite-dimensional, and
\[\ldim{\n{TU}} = \ldim{\n{T}} + \ldim{\n{U}}\]
\end{lemma}
\begin{corollary}
\hfill\\
The solution space of any $n$th-order homogeneous linear differential equation with constant coefficients is an $n$-dimensional subspace of $\C^\infty$.
\end{corollary}
\begin{theorem}
\hfill\\
Given $n$ distinct complex numbers $c_1, c_2, \dots, c_n$, the set of exponential functions $\{e^{c_1t},e^{c_2t},\dots,e^{c_nt}\}$ is linearly independent.
\end{theorem}
\begin{corollary}
\hfill\\
For any $n$th-order homogeneous linear differential equation with constant coefficients, if the auxiliary polynomial has $n$ distinct zeros $c_1, c_2, \dots, c_n$, then $\{e^{c_1t}, e^{c_2t}, \dots, e^{c_nt}\}$ is a basis for the solution space of the differential equation.
\end{corollary}
\begin{lemma}
\hfill\\
For a given complex number $c$ and a positive integer $n$, suppose that $(t-c)^n$ is the auxiliary polynomial of a homogeneous linear differential equation with constant coefficients. Then the set
\[\beta = \{e^{ct}, te^{ct}, \dots, t^{n-1}e^{ct}\}\]
is a basis for the solution space of the equation.
\end{lemma}
\begin{theorem}
\hfill\\
Given a homogeneous linear differential equation with constant coefficients and auxiliary polynomial
\[(t-c_1)^{n_1}(t-c_2)^{n_2}\dots(t-c_k)^{n_k},\]
where $n_1, n_2, \dots, n_k$ are positive integers and $c_1, c_2, \dots, c_k$ are distinct complex numbers, the following set is a basis for the solution space of the equation:
\[\{e^{c_1t}, te^{c_1t},\dots, t^{n_1-1}e^{c_1t}, \dots, e^{c_kt}, te^{c_kt}, \dots, t^{n_k-1}e^{c_kt}\}\]
\end{theorem}
\begin{definition}
\hfill\\
A differential equation
\[y^{(n)} + a_{n-1}y^{(n-1)} + \dots + a_1y^{(1)} + a_0y = x\]
is called a \textbf{nonhomogeneous} linear differential equation with constant coefficients if the $a_i$'s are constant and $x$ is a function that is not identically zero.
\end{definition}
@@ -1 +1,87 @@
\section{Invertibility and Isomorphisms}
\begin{definition}
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. A function $U: W \to V$ is said to be an \textbf{inverse} of $T$ if $TU = I_W$ and $UT = I_V$. If $T$ has an inverse, then $T$ is said to be \textbf{invertible}. If $T$ is invertible, then the inverse of $T$ is unique and is denoted by $T^{-1}$.\\
The following facts hold for invertible functions $T$ and $U$.
\begin{enumerate}
\item $(TU)^{-1} = U^{-1}T^{-1}$.
\item $(T^{-1})^{-1} = T$; in particular, $T^{-1}$ is invertible.
\end{enumerate}
We often use the fact that a function is invertible if and only if it is one-to-one and onto. We can therefore restate \autoref{Theorem 2.5} as follows:
\begin{enumerate}
\setcounter{enumi}{2}
\item Let $T: V \to W$ be a linear transformation, where $V$ and $W$ are finite-dimensional vector spaces of equal dimension. then $T$ is invertible if and only if $\rank{T} = \ldim{T}$.
\end{enumerate}
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear and invertible. Then $T^{-1}: W \to V$ is linear.
\end{theorem}
\begin{definition}
\hfill\\
Let $A$ be an $n \times n$ matrix. Then $A$ is \textbf{invertible} if there exists an $n \times n$ matrix $B$ such that $AB = BA = I$.\\
If $A$ is invertible, then the matrix $B$ such that $AB = BA = I$ is unique. (If $C$ were another such matrix, then $C = CI = C(AB) = (CA)B = IB = B$.) The matrix $B$ is called the \textbf{inverse} of $A$ and is denoted by $A^{-1}$.
\end{definition}
\begin{lemma}
\hfill\\
Let $T$ be an invertible linear transformation from $V$ to $W$. Then $V$ is finite-dimensional if and only if $W$ is finite-dimensional. In this case, $\ldim{V} = \ldim{W}$
\end{lemma}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces with ordered bases $\beta$ and $\gamma$, respectively. Let $T: V \to W$ be linear. Then $T$ is invertible if and only if $[T]_\beta^\gamma$ is invertible. Furthermore, $[T^{-1}]_\gamma^\beta = ([T]_\beta^\gamma)^{-1}$.
\end{theorem}
\begin{corollary}
\hfill\\
Let $V$ be a finite-dimensional vector space with an ordered bases $\beta$, and let $T: V \to V$ be linear. Then $T$ is invertible if and only if $[T]_\beta$ is invertible. Furthermore, $[T^{-1}]_\beta = ([T]_\beta)^{-1}$.
\end{corollary}
\begin{corollary}
\hfill\\
Let $A$ be and $n \times n$ matrix. Then $A$ is invertible if and only if $L_A$ is invertible. Furthermore, $(L_A)^{-1} = L_{A^{-1}}$.
\end{corollary}
\begin{definition}
\hfill\\
Let $V$ and $W$ be vector spaces. We say that $V$ is \textbf{isomorphic} to $W$ if there exists a linear transformation $T: V \to W$ that is invertible. Such a linear transformation is called an \textbf{isomorphism} from $V$ onto $W$.
\end{definition}
\begin{theorem}\label{Theorem 2.19}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces (over the same field). Then $V$ is isomorphic to $W$ if and only if $\ldim{V} = \ldim{W}$.
\end{theorem}
\begin{corollary}
\hfill\\
Let $V$ be a vector space over $\F$. Then $V$ is isomorphic to $\F^n$ if and only if $\ldim{V} = n$.
\end{corollary}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces over $\F$ of dimensions $n$ and $m$, respectively, and let $\beta$ and $\gamma$ be ordered bases for $V$ and $W$, respectively. Then the function $\Phi: \LL(V,W) \to M_{m \times n}(\F)$, defined by $\Phi(T) = [T]_\beta^\gamma$ for $T \in \LL(V,W)$ is an isomorphism.
\end{theorem}
\begin{corollary}\label{Corollary 2.7}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces of dimension $n$ and $m$, respectively. Then $\LL(V,W)$ is finite-dimensional of dimension $mn$.
\end{corollary}
\begin{definition}
\hfill\\
Let $\beta$ be an ordered basis for an $n$-dimensional vector space $V$ over the field $\F$. The \textbf{standard representation of $V$ with respect to $\beta$} is the function $\phi_\beta: V \to \F^n$ defined by $\phi_\beta(x) = [x]_\beta$ for each $x \in V$.
\end{definition}
\begin{theorem}
\hfill\\
For any finite-dimensional vector space $V$ with ordered basis $\beta$, $\phi_\beta$ is an isomorphism.
\end{theorem}
@@ -1 +1,117 @@
\section{Linear Transformations, Null Spaces, and Ranges}
\begin{definition}
\hfill\\
Let $V$ and $W$ be vector spaces (over $\F$). We call a function $T: V \to W$ a \textbf{linear transformation from $V$ to $W$} if, for all $x,y \in V$, and $c \in \F$, we have
\begin{enumerate}
\item $T(x + y) = T(x) + T(y)$, and
\item $T(cx) = cT(x)$
\end{enumerate}
If the underlying field $\F$ is the field of rational numbers, then (1) implies (2), but, in general (1) and (2) are logically independent.\\
We often simply call $T$ \textbf{linear}.
\end{definition}
\begin{remark}
\hfill\\
Let $V$ and $W$ be vector spaces (over $\F$). Let $T: V \to W$ be a linear transformation. Then the following properties hold:
\begin{enumerate}
\item If $T$ is linear, then $T(0) = 0$.
\item $T$ is linear if and only if $T(cx + y) = cT(x) + T(y)$ for all $x,y \in V$ and $c \in \F$.
\item If $T$ is linear, then $T(x-y)=T(x)-T(y)$ for all $x,y \in V$.
\item $T$ is linear if and only if, for $x_1, x_2, \dots, x_n \in V$ and $a_1, a_2, \dots, a_n \in \F$, we have
\[T\left(\sum_{i=1}^{n}a_ix_i\right)=\sum_{i=1}^{n}a_iT(x_i).\]
\end{enumerate}
We generally use property 2 to prove that a given transformation is linear.
\end{remark}
\begin{definition}
\hfill\\
For any angle $\theta$, define $T_\theta: \R^2 \to \R^2$ by the rule: $T_\theta(a_1, a_2)$ is the vector obtained by rotating $(a_1, a_2)$ counterclockwise by $\theta$ if $(a_1, a_2) \neq (0, 0)$, and $T_\theta(0,0) = (0,0)$. Then $T_\theta: \R^2 \to \R^2$ is a linear transformation that is called the \textbf{rotation by $\theta$}.
\end{definition}
\begin{definition}
\hfill\\
Define $T: \R^2 \to \R^2$ by $T(a_1, a_2) = (a_1, -a_2)$. $T$ is called the \textbf{reflection about the \textit{x}-axis}.
\end{definition}
\begin{definition}
\hfill\\
For vector spaces $V$ and $W$ (over $\F$), we define the \textbf{identity transformation} $I_V: V \to V$ by $I_V(x) = x$ for all $x \in V$.\\
We define the \textbf{zero transformation} $T_0: V \to W$ by $T_0(x) = 0$ for all $x \in V$.\\
\textbf{Note:} We often write $I$ instead of $I_V$.
\end{definition}
\begin{definition}
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. We define the \textbf{null space} (or \textbf{kernel}) $\n{T}$ to be the set of all vectors $x \in V$ such that $T(x)=0$; that is, \\$\n{T} = \{x \in V\ |\ T(x) = 0\}$.
We define the \textbf{range} (or \textbf{image}) $\range{T}$ of $T$ to be the subset of $W$ consisting of all images (under $T$) of vectors in $V$; that is, $\range{T} = \{T(x)\ |\ x \in V\}$.
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be vector spaces and $T: V \to W$ be linear. Then $\n{T}$ and $\range{T}$ are subspaces of $V$ and $W$, respectively.
\end{theorem}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. If $\beta = \{v_1, v_2, \dots, v_n\}$ is a basis for $V$, then
\[\range{T} = \lspan{T(\beta)} = \lspan{\{T(v_1), T(v_2), \dots, T(v_n)\}}.\]
\end{theorem}
\begin{definition}
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. If $\n{T}$ and $\range{T}$ are finite-dimensional, then we define the \textbf{nullity} of $T$, denoted $\nullity{T}$, and the \textbf{rank} of $T$, denoted $\rank{T}$, to be the dimensions of $\n{T}$ and $\range{T}$, respectively.
\end{definition}
\begin{theorem}[\textbf{Dimension Theorem}]
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. If $V$ is finite-dimensional, then
\[\nullity{T} + \rank{T} = \ldim{V}\]
\end{theorem}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. Then $T$ is one-to-one if and only if $\n{T} = \{0\}$.
\end{theorem}
\begin{theorem}\label{Theorem 2.5}
\hfill\\
Let $V$ and $W$ be vector spaces of equal (finite) dimension, and let $T: V \to W$ be linear. Then the following are equivalent.
\begin{enumerate}
\item $T$ is one-to-one.
\item $T$ is onto.
\item $\rank{T} = \ldim{V}$.
\end{enumerate}
\end{theorem}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be vector spaces over $\F$, and suppose that $\{v_1, v_2, \dots, v_n\}$ is a basis for $V$. For $w_1, w_2, \dots, w_n$ in $W$, there exists exactly one linear transformation $T: V \to W$ such that $T(v_i) = w_i$ for $i = 1, 2, \dots, n$.
\end{theorem}
\begin{corollary}\label{Corollary 2.1}
\hfill\\
Let $V$ and $W$ be vector spaces, and suppose that $V$ has a finite basis $\{v_1, v_2, \dots, v_n\}$. If $U,T: V \to W$ are linear and $U(v_i) = T(v_i)$, for $i = 1, 2, \dots, n$, then $U = T$.
\end{corollary}
\begin{definition}
\hfill\\
Let $V$ be a vector space and $W_1$ and $W_2$ be subspaces of $V$ such that $V = W_1 \oplus W_2$. A function $T: V \to V$ is called the \textbf{projection on $W_1$ along $W_2$} if, for $x = x_1 + x_2$ with $x_1 \in W$ and $x_2 \in W_2$, we have $T(x) = x_1$.
\end{definition}
\begin{definition}
\hfill\\
Let $V$ be a vector space, and let $T: V \to W$ be linear. A subspace $W$ of $V$ is said to be \textbf{$T$-invariant} if $T(x) \in W$ for every $x \in W$, that is, $T(W) \subseteq W$. If $W$ is $T$-invariant, we define the \textbf{restriction of $T$ on $W$} to be the function $T_W: W \to W$ defined by $T_W(x) = T(x)$ for all $x \in W$.
\end{definition}
@@ -1 +1,40 @@
\section{The Change of Coordinate Matrix}
\begin{theorem}\label{Theorem 2.22}
\hfill\\
Let $\beta$ and $\beta'$ be two ordered bases for a finite-dimensional vector pace $V$, and let $Q = [I_V]_{\beta'}^\beta$. Then
\begin{enumerate}
\item $Q$ is invertible.
\item For any $v \in V$, $[v]_\beta = Q[v]_{\beta'}$.
\end{enumerate}
\end{theorem}
\begin{definition}
\hfill\\
The matrix $Q=[I_V]_{\beta'}^\beta$, defined in \autoref{Theorem 2.22}, is called a \textbf{change of coordinate matrix}. Because of part (2) of the theorem, we say that \textbf{$Q$ changes $\beta'$-coordinates into $\beta$-coordinates}.
\end{definition}
\begin{definition}
\hfill\\
A linear transformation that maps a vector space $V$ into itself is called a \textbf{linear operator on $V$}.
\end{definition}
\begin{theorem}
\hfill\\
Let $T$ be a linear operator on a finite-dimensional vector space $V$, and let $\beta$ and $\beta'$ be ordered bases for $V$. Suppose that $Q$ is the change of coordinate matrix that changes $\beta'$-coordinates into $\beta$-coordinates. Then
\[[T]_{\beta'}=Q^{-1}[T]_\beta Q\]
\end{theorem}
\begin{corollary}
\hfill\\
Let $A \in M_{n \times n}(\F)$, and let $\gamma$ be an ordered basis for $\F^n$. Then $[L_A]_\gamma = Q^{-1}AQ$, where $Q$ is the $n \times n$ matrix whose $j$th column is the $j$th vector of $\gamma$.
\end{corollary}
\begin{definition}
\hfill\\
Let $A$ and $B$ be matrices in $M_{n \times n}(\F)$. We say that $B$ is \textbf{similar} to $A$ if there exists an invertible matrix $Q$ such that $B = Q^{-1}AQ$.\\
Notice that the relation of similarity is an equivalence relation. So we need only say that $A$ and $B$ are similar.
\end{definition}
@@ -1 +1,67 @@
\section{The Matrix Representation of a Linear Transformation}
\begin{definition}
\hfill\\
Let $V$ be a finite-dimensional vector space. An \textbf{ordered basis} for $V$ is a basis for $V$ endowed with a specific order; that is, an ordered basis for $V$ is a finite sequence of linearly independent vectors in $V$ that generates $V$.\\
For the vector space $\F^n$, we call $\{e_1, e_2, \dots, e_n\}$ the \textbf{standard ordered basis} for $\F^n$. Similarly, for the vector space $P_n(\F)$, we call $\{1, x, \dots, x^n\}$ the \textbf{standard ordered basis} for $P_n(\F)$.
\end{definition}
\begin{definition}
\hfill\\
Let $\beta = \{v_1, v_2, \dots, v_n\}$ be an ordered basis for a finite-dimensional vector space $V$. For $x \in V$, let $a_1, a_2, \dots, a_n$ be the unique scalar values such that
\[x = \sum_{i=1}^{n}a_iv_i.\]
We define the \textbf{coordinate vector of $x$ relative to $\beta$}, denoted by $[x]_\beta$, by
\[[x]_\beta = \begin{pmatrix} a_1 \\ a_2 \\ \vdots \\ a_n\end{pmatrix}.\]
Notice that $[v_i]_\beta = e_i$ in the preceding definition. It can be shown that the correspondence $x \to [x]_\beta$ provides us with a linear transformation from $V$ to $\F^n$.
\end{definition}
\begin{notation}
\hfill\\
The following notation is used to construct a matrix representation of a linear transformation in the following definition.\\
Suppose that $V$ and $W$ are finite-dimensional vector spaces with ordered bases $\beta = \{v_1, v_2, \dots, v_n\}$ and $\gamma = \{w_1, w_2, \dots, w_m\}$, respectively. Let $T: V \to W$ be linear. Then for each $j$, $1 \leq j \leq n$, there exist unique scalars $a_{ij} \in \F$, $1 \leq i \leq m$, such that
\[T(v_j) = \sum_{i=1}^{m}a_{ij}w_i\ \ \text{for}\ 1 \leq j \leq n.\]
\end{notation}
\begin{definition}
\hfill\\
Using the notation above, we call the $m \times n$ matrix $A$ defined by $A_{ij} = a_{ij}$ the \textbf{matrix representation of $T$ in the ordered bases $\beta$ and $\gamma$.} and write $A = [T]_\beta^\gamma$. If $V = W$ and $\beta = \gamma$, then we write $A = [T]_\beta$.
Notice that the $j$th column of $A$ is simply $[T(v_j)]_\gamma$. Also observe that if $U: V \to W$ is a linear transformation such that $[U]_\beta^\gamma = [T]_\beta^\gamma$, then $U=T$ by the corollary to Theorem 2.6 (\autoref{Corollary 2.1}).
\end{definition}
\begin{definition}
\hfill\\
Let $T,U: V \to W$ be arbitrary functions, where $V$ and $W$ are vector spaces over $\F$, and let $a \in \F$. We define $T + U: V \to W$ by $(T+U)(x) = T(x) + U(x)$ for all $x \in V$, and $aT: V \to W$ by $(aT)(x) = aT(x)$ for all $x \in V$.
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be vector spaces over a field $\F$, and let $T,U: V \to W$ be linear.
\begin{enumerate}
\item For all $a \in \F$, $aT+U$ is linear.
\item Using the operations of addition and scalar multiplication in the preceding definition, the collection of all linear transformations from $V$ to $W$ is a vector space over $\F$.
\end{enumerate}
\end{theorem}
\begin{definition}
\hfill\\
Let $V$ and $W$ be vector spaces over $\F$. We denote the vector space of all linear transformations from $V$ to $W$ by $\LL(V, W)$. In the case that $V = W$, we write $\LL(V)$ instead of $\LL(V, W)$.
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ and $W$ be finite-dimensional vector spaces with ordered bases $\beta$ and $\gamma$, respectively, and let $T,U: V \to W$ be linear transformations. Then
\begin{enumerate}
\item $[T+U]_\beta^\gamma = [T]_\beta^\gamma + [U]_\beta^\gamma$ and
\item $[aT]_\beta^\gamma = a[T]_\beta^\gamma$ for all scalars $a$.
\end{enumerate}
\end{theorem}