Finished all chapters and definitions. I need to add subsections and see if there's any theorems or definitions in the appendicies that are worth adding to this as well.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space. Let $T, U_1, U_2 \in \LL(V)$. Then
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $T(U_1 + U_2) = TU_1 + TU_2$ and $(U_1 + U_2)T = U_1T + U_2T$
|
||||
\item $T(U_1U_2) = (TU_1)U_2$
|
||||
@@ -20,18 +20,18 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix and $B$ be an $n \times p$ matrix. We define the \textbf{product} of $A$ and $B$, denoted $AB$, to be the $m \times p$ matrix such that
|
||||
|
||||
|
||||
\[(AB)_{ij} = \sum_{k=1}^{n}A_{ik}B_{kj}\ \ \text{for}\ \ 1 \leq i \leq m,\ \ 1 \leq j \leq p.\]
|
||||
|
||||
|
||||
Notice that $(AB)_{ij}$ is the sum of products of corresponding entries from the $i$th row of $A$ and the $j$th column of $B$.\\
|
||||
|
||||
|
||||
The reader should observe that in order for the product $AB$ to be defined, there are restrictions regarding the relative sizes of $A$ and $B$. The following mnemonic device is helpful: ``$(m \times n) \cdot (n \times p) = (m \times p)$"; that is, in order for the product $AB$ to be defined, the two ``inner" dimensions must be equal, and the two ``outer" dimensions yield the size of the product.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$, $W$, and $Z$ be finite-dimensional vector spaces with ordered bases $\alpha$, $\beta$, and $\gamma$, respectively. Let $T: V \to W$ and $U: W \to Z$ be linear transformations. Then
|
||||
|
||||
|
||||
\[[UT]_\alpha^\gamma = [U]_\beta^\gamma[T]_\alpha^\beta\]
|
||||
\end{theorem}
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix, $B$ and $C$ be $n \times p$ matrices, and $D$ and $E$ be $q \times m$ matrices. Then
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $A(B + C) = AB + AC$ and $(D + E)A = DA + EA$.
|
||||
\item $a(AB) = (aA)B = A(aB)$ for any scalar $a$.
|
||||
@@ -60,18 +60,18 @@
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix, $B_1, B_2, \dots, B_k$ be $n \times p$ matrices, $C_1, C_2, \dots, C_k$ be $q \times m$ matrices, and $a_1, a_2, \dots, a_k$ be scalars. Then
|
||||
|
||||
|
||||
\[A\left(\sum_{i=1}^{k}a_iB_i\right) = \sum_{i=1}^{k}a_iAB_i\]
|
||||
|
||||
|
||||
and
|
||||
|
||||
|
||||
\[\left(\sum_{i=1}^{k}a_iC_i\right)A = \sum_{i=1}^{k}a_iC_iA.\]
|
||||
\end{corollary}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix and $B$ be an $n \times p$ matrix. For each $j$ ($1 \leq j \leq p$) let $u_j$ and $v_j$ denote the $j$th columns of $AB$ and $B$, respectively. Then
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $u_j = Av_j$.
|
||||
\item $v_j = Be_j$, where $e_j$ is the $j$th standard vector of $\F^p$.
|
||||
@@ -81,7 +81,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be finite-dimensional vector spaces having ordered bases $\beta$ and $\gamma$, respectively, and let $T: V \to W$ be linear. Then, for each $u \in V$, we have
|
||||
|
||||
|
||||
\[[T(u)]_\gamma = [T]_\beta^\gamma [u]_\beta.\]
|
||||
\end{theorem}
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix with entries from $\F$. Then the left-multiplication transformation $L_A: \F^n \to \F^m$ is linear. Furthermore, if $B$ is any other $m \times n$ matrix (with entries from $\F$) and $\beta$ and $\gamma$ are the standard ordered bases for $\F^n$ and $\F^m$, respectively, then we have the following properties.
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $[L_A]_\beta^\gamma = A$.
|
||||
\item $L_A = L_B$ if and only if $A = B$.
|
||||
@@ -117,4 +117,4 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A relationship among a group of people is called a \textbf{dominance relation} if the associated incidence matrix $A$ has the property that for all distinct pairs $i$ and $j$, $A_{ij} = 1$ if and only if $A_{ji} = 0$, that is, given any two people, exactly one of them \textit{dominates} the other.
|
||||
\end{definition}
|
||||
\end{definition}
|
||||
|
||||
+12
-12
@@ -8,36 +8,36 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space of continuous real-valued functions on the interval $[0, 2\pi]$. Fix a function $g \in V$. The function $\mathsf{h}: V \to \R$, defined by
|
||||
|
||||
|
||||
\[\mathsf{h}(x) = \frac{1}{2\pi} \int_{0}^{2\pi}x(t)g(t) dt\]
|
||||
|
||||
|
||||
is a linear functional on $V$. In the cases that $g(t)$ equals $\sin(nt)$ or $\cos (nt)$, $\mathsf{h}(x)$ is often called the \textbf{\textit{n}th Fourier coefficient of $x$}.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a finite dimensional vector space, and let $\beta = \{x_1, x_2, \dots, x_n\}$ be an ordered basis for $V$. For each $i = 1, 2, \dots, n$, define $\mathsf{f}_i(x) = a_i$, where
|
||||
|
||||
|
||||
\[[x]_\beta = \begin{pmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{pmatrix}\]
|
||||
|
||||
|
||||
is the coordinate vector of $x$ relative to $\beta$. Then $\mathsf{f}$ is a linear function on $V$ called the \textbf{\textit{i}th coordinate function with respect to the basis $\beta$}. Note that $\mathsf{f}_i(x_j) = \delta_{ij}$, where $\delta_{ij}$ is the Kronecker delta. These linear functionals play an important role in the theory of dual spaces (see \autoref{Theorem 2.24}).
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
For a vector space $V$ over $\F$, we define the \textbf{dual space} of $V$ to be the vector space $\LL(V, \F)$, denoted by $V^*$.\\
|
||||
|
||||
|
||||
Thus $V^*$ is the vector space consisting of all linear functionals on $V$ with the operations of addition and scalar multiplication. Note that if $V$ is finite-dimensional, then by \autoref{Corollary 2.7}
|
||||
|
||||
|
||||
\[\ldim{V^*}= \ldim{\LL(V,\F)} = \ldim{V} \cdot \ldim{\F} = \ldim{V}.\]
|
||||
|
||||
|
||||
Hence by \autoref{Theorem 2.19}, $V$ and $V^*$ are isomorphic. We also define the \textbf{double dual} $V^{**}$ of $V$ to be the dual of $V^*$. In \autoref{Theorem 2.26}, we show, in fact, that there is a natural identification of $V$ and $V^{**}$ in the case that $V$ is finite-dimensional.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}\label{Theorem 2.24}
|
||||
\hfill\\
|
||||
Suppose that $V$ is a finite-dimensional vector space with the ordered basis $\beta = \{x_1, x_2, \dots, x_n\}$. Let $\mathsf{f}_i$ ($1 \leq i \leq n$) be the $i$th coordinate function with respect to $\beta$ as just defined, and let $\beta^*=\{\mathsf{f}_1, \mathsf{f}_2, \dots, \mathsf{f}_n\}$. Then $\beta^*$ is an ordered basis for $V^*$, and, for any $\mathsf{f} \in V^*$, we have
|
||||
|
||||
|
||||
\[\mathsf{f} = \sum_{i=1}^{n}\mathsf{f}(x_i)\mathsf{f}_i.\]
|
||||
\end{theorem}
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
For a vector $x$ in a finite-dimensional vector space $V$, we define the linear functional $\hat{x}: V^* \to \F$ on $V^*$ by $\hat{x}(\mathsf{f}) = \mathsf{f}(x)$ for every $\mathsf{f} \in V^*$. Since $\hat{x}$ is a linear functional on $V^*$, $\hat{x} \in V^{**}$.\\
|
||||
|
||||
|
||||
The correspondence $x \leftrightarrow \hat{x}$ allows us to define the desired isomorphism between $V^*$ and $V^{**}$.
|
||||
\end{definition}
|
||||
|
||||
@@ -81,6 +81,6 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a finite-dimensional vector space over $\F$. For every subset $S$ of $V$, define the \textbf{annihilator} $S^0$ of $S$ as
|
||||
|
||||
\[S^0 = \{\mathsf{f} \in V^*\ |\ \mathsf{f}(x) = 0,\ \text{for all}\ x \in S\}\]
|
||||
\end{definition}
|
||||
|
||||
\[S^0 = \{\mathsf{f} \in V^* : \mathsf{f}(x) = 0,\ \text{for all}\ x \in S\}\]
|
||||
\end{definition}
|
||||
|
||||
+34
-34
@@ -3,35 +3,35 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A \textbf{differential equation} in an unknown function $y = y(t)$ is an equation involving $y$, $t$, and derivatives of $y$. If the differential equation is of the form
|
||||
|
||||
|
||||
\begin{equation}
|
||||
a_ny^{(n)}+a_{n-1}y^{(n-)} + \dots + a_1y^{(1)}+a_0y = f,
|
||||
\end{equation}
|
||||
|
||||
|
||||
where $a_0, a_1, \dots, a_n$ and $f$ are functions of $t$ and $y^{(k)}$ denotes the $k$th derivative of $y$, then the equation is said to be \textbf{linear}. The functions $a_i$ are called the \textbf{coefficients} of the differential equation. When $f$ is identically zero, (2.1) is called \textbf{homogeneous}.\\
|
||||
|
||||
|
||||
If $a_n \neq 0$, we say that differential equation (2.1) is of \textbf{order \textit{n}}. In this case, we divide both sides by $a_n$ to obtain a new, but equivalent, equation
|
||||
|
||||
|
||||
\[y^{(n)} + b_{n-1}y^{(n-1)} + \dots + b_1y^{(1)} + b_0y = 0,\]
|
||||
|
||||
|
||||
where $b_i = a_i/a_n$ for $i=0, 1, \dots, n-1$. Because of this observation, we always assume that the coefficient $a_n$ in (2.1) is $1$.\\
|
||||
|
||||
|
||||
A \textbf{solution} to (2.1) is a function that when substituted for $y$ reduces (2.1) to an identity.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Given a complex-valued function $x \in \mathcal{F}(\R, \C)$ of a real variable $t$ (where $\mathcal{F}(\R, \C)$ is the vector space defined in \autoref{Definition 1.7}), there exist unique real-valued functions $x_1$ and $x_2$ of $t$, such that
|
||||
|
||||
|
||||
\[x(t) = x_1(t) + ix_2(t)\ \ \ \text{for}\ \ \ t \in \R,\]
|
||||
|
||||
|
||||
where $i$ is the imaginary number such that $i^2 = -1$. We call $x_1$ the \textbf{real part} and $x_2$ the \textbf{imaginary part} of $x$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Given a function $x \in \mathcal{F}(\R, \C)$ with real part $x_1$ and imaginary part $x_2$, we say that $x$ is \textbf{differentiable} if $x_1$ and $x_2$ are differentiable. If $x$ is differentiable, we define the \textbf{derivative} $x'$ of $x$ by
|
||||
|
||||
|
||||
\[x' = x'_1 + ix'_2\]
|
||||
\end{definition}
|
||||
|
||||
@@ -47,25 +47,25 @@
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
For any polynomial $p(t)$ over $\C$ of positive degree, $p(D)$ is called a \textbf{differential operator}. The \textbf{order} of the differential operator $p(D)$ is the degree of the polynomial $p(t)$.
|
||||
For any polynomial $p(t)$ over $\C$ of positive degree, $p(\mathsf{D})$ is called a \textbf{differential operator}. The \textbf{order} of the differential operator $p(\mathsf{D})$ is the degree of the polynomial $p(t)$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Given the differential equation
|
||||
|
||||
|
||||
\[y^{(n)} + a_{n-1}y^{(n-1)}+ \dots + a_1y^{(1)} + a_0y = 0,\]
|
||||
|
||||
the complex polynomial
|
||||
|
||||
|
||||
the complex polynomial
|
||||
|
||||
\[p(t) = t^n + a_{n-1}t^{n-1} + \dots + a_1t + a_0\]
|
||||
|
||||
|
||||
is called the \textbf{auxiliary polynomial} associated with the equation.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
The set of all solutions to a homogeneous linear differential equation with constant coefficients coincides with the null space of $p(D)$ where $p(t)$ is the auxiliary polynomial associated with the equation.
|
||||
The set of all solutions to a homogeneous linear differential equation with constant coefficients coincides with the null space of $p(\mathsf{D})$ where $p(t)$ is the auxiliary polynomial associated with the equation.
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
@@ -81,13 +81,13 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $c = a+ib$ be a complex number with real part $a$ and imaginary part $b$. Define
|
||||
|
||||
|
||||
\[e^c = e^a(\cos(b) + i\sin(b)).\]
|
||||
|
||||
|
||||
The special case
|
||||
|
||||
|
||||
\[e^{ib} = \cos(b) + i\sin(a)\]
|
||||
|
||||
|
||||
is called \textbf{Euler's formula}.
|
||||
\end{definition}
|
||||
|
||||
@@ -104,17 +104,17 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Recall that the \textbf{order} of a homogeneous linear differential equation is the degree of its auxiliary polynomial. Thus, an equation of order 1 is of the form
|
||||
|
||||
|
||||
\begin{equation}
|
||||
y' + a_0y = 0.
|
||||
\end{equation}
|
||||
|
||||
|
||||
The solution space for (2.2) is of dimension 1 and has $\{e^{-a_0t}\}$ as a basis.
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
For any complex number $c$, the null space of the differential operator $D-c\mathsf{l}$ has $\{e^{ct}\}$ as a basis.
|
||||
For any complex number $c$, the null space of the differential operator $\mathsf{D}-cI$ has $\{e^{ct}\}$ as a basis.
|
||||
\end{corollary}
|
||||
|
||||
\begin{theorem}
|
||||
@@ -124,18 +124,18 @@
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
For any differential operator $p(D)$ of order $n$, the null space of $p(D)$ is an $n$-dimensional subspace of $\C^\infty$.
|
||||
For any differential operator $p(\mathsf{D})$ of order $n$, the null space of $p(\mathsf{D})$ is an $n$-dimensional subspace of $\C^\infty$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
The differential operator $D - c\mathsf{l}: \C^\infty \to \C^\infty$ is onto for any complex number $c$.
|
||||
The differential operator $\mathsf{D} - cI: \C^\infty \to \C^\infty$ is onto for any complex number $c$.
|
||||
\end{lemma}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space, and suppose that $T$ and $U$ are linear operators on $V$ such that $U$ is onto and the null spaces of $T$ and $U$ are finite-dimensional. Then the null space of $TU$ is finite-dimensional, and
|
||||
|
||||
|
||||
\[\ldim{\n{TU}} = \ldim{\n{T}} + \ldim{\n{U}}\]
|
||||
\end{lemma}
|
||||
|
||||
@@ -157,28 +157,28 @@
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
For a given complex number $c$ and a positive integer $n$, suppose that $(t-c)^n$ is the auxiliary polynomial of a homogeneous linear differential equation with constant coefficients. Then the set
|
||||
|
||||
|
||||
\[\beta = \{e^{ct}, te^{ct}, \dots, t^{n-1}e^{ct}\}\]
|
||||
|
||||
|
||||
is a basis for the solution space of the equation.
|
||||
\end{lemma}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Given a homogeneous linear differential equation with constant coefficients and auxiliary polynomial
|
||||
|
||||
|
||||
\[(t-c_1)^{n_1}(t-c_2)^{n_2}\dots(t-c_k)^{n_k},\]
|
||||
|
||||
|
||||
where $n_1, n_2, \dots, n_k$ are positive integers and $c_1, c_2, \dots, c_k$ are distinct complex numbers, the following set is a basis for the solution space of the equation:
|
||||
|
||||
|
||||
\[\{e^{c_1t}, te^{c_1t},\dots, t^{n_1-1}e^{c_1t}, \dots, e^{c_kt}, te^{c_kt}, \dots, t^{n_k-1}e^{c_kt}\}\]
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A differential equation
|
||||
|
||||
|
||||
\[y^{(n)} + a_{n-1}y^{(n-1)} + \dots + a_1y^{(1)} + a_0y = x\]
|
||||
|
||||
|
||||
is called a \textbf{nonhomogeneous} linear differential equation with constant coefficients if the $a_i$'s are constant and $x$ is a function that is not identically zero.
|
||||
\end{definition}
|
||||
\end{definition}
|
||||
|
||||
@@ -3,16 +3,16 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. A function $U: W \to V$ is said to be an \textbf{inverse} of $T$ if $TU = I_W$ and $UT = I_V$. If $T$ has an inverse, then $T$ is said to be \textbf{invertible}. If $T$ is invertible, then the inverse of $T$ is unique and is denoted by $T^{-1}$.\\
|
||||
|
||||
|
||||
The following facts hold for invertible functions $T$ and $U$.
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $(TU)^{-1} = U^{-1}T^{-1}$.
|
||||
\item $(T^{-1})^{-1} = T$; in particular, $T^{-1}$ is invertible.
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
We often use the fact that a function is invertible if and only if it is one-to-one and onto. We can therefore restate \autoref{Theorem 2.5} as follows:
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\setcounter{enumi}{2}
|
||||
\item Let $T: V \to W$ be a linear transformation, where $V$ and $W$ are finite-dimensional vector spaces of equal dimension. then $T$ is invertible if and only if $\rank{T} = \ldim{T}$.
|
||||
@@ -27,7 +27,7 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ be an $n \times n$ matrix. Then $A$ is \textbf{invertible} if there exists an $n \times n$ matrix $B$ such that $AB = BA = I$.\\
|
||||
|
||||
|
||||
If $A$ is invertible, then the matrix $B$ such that $AB = BA = I$ is unique. (If $C$ were another such matrix, then $C = CI = C(AB) = (CA)B = IB = B$.) The matrix $B$ is called the \textbf{inverse} of $A$ and is denoted by $A^{-1}$.
|
||||
\end{definition}
|
||||
|
||||
@@ -84,4 +84,4 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
For any finite-dimensional vector space $V$ with ordered basis $\beta$, $\phi_\beta$ is an isomorphism.
|
||||
\end{theorem}
|
||||
\end{theorem}
|
||||
|
||||
@@ -3,30 +3,30 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces (over $\F$). We call a function $T: V \to W$ a \textbf{linear transformation from $V$ to $W$} if, for all $x,y \in V$, and $c \in \F$, we have
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $T(x + y) = T(x) + T(y)$, and
|
||||
\item $T(cx) = cT(x)$
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
If the underlying field $\F$ is the field of rational numbers, then (1) implies (2), but, in general (1) and (2) are logically independent.\\
|
||||
|
||||
|
||||
We often simply call $T$ \textbf{linear}.
|
||||
\end{definition}
|
||||
|
||||
\begin{remark}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces (over $\F$). Let $T: V \to W$ be a linear transformation. Then the following properties hold:
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item If $T$ is linear, then $T(0) = 0$.
|
||||
\item $T$ is linear if and only if $T(cx + y) = cT(x) + T(y)$ for all $x,y \in V$ and $c \in \F$.
|
||||
\item If $T$ is linear, then $T(x-y)=T(x)-T(y)$ for all $x,y \in V$.
|
||||
\item $T$ is linear if and only if, for $x_1, x_2, \dots, x_n \in V$ and $a_1, a_2, \dots, a_n \in \F$, we have
|
||||
|
||||
\[T\left(\sum_{i=1}^{n}a_ix_i\right)=\sum_{i=1}^{n}a_iT(x_i).\]
|
||||
|
||||
\[T\left(\sum_{i=1}^{n}a_ix_i\right)=\sum_{i=1}^{n}a_iT(x_i).\]
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
We generally use property 2 to prove that a given transformation is linear.
|
||||
\end{remark}
|
||||
|
||||
@@ -43,17 +43,17 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
For vector spaces $V$ and $W$ (over $\F$), we define the \textbf{identity transformation} $I_V: V \to V$ by $I_V(x) = x$ for all $x \in V$.\\
|
||||
|
||||
|
||||
We define the \textbf{zero transformation} $T_0: V \to W$ by $T_0(x) = 0$ for all $x \in V$.\\
|
||||
|
||||
|
||||
\textbf{Note:} We often write $I$ instead of $I_V$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. We define the \textbf{null space} (or \textbf{kernel}) $\n{T}$ to be the set of all vectors $x \in V$ such that $T(x)=0$; that is, \\$\n{T} = \{x \in V\ |\ T(x) = 0\}$.
|
||||
|
||||
We define the \textbf{range} (or \textbf{image}) $\range{T}$ of $T$ to be the subset of $W$ consisting of all images (under $T$) of vectors in $V$; that is, $\range{T} = \{T(x)\ |\ x \in V\}$.
|
||||
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. We define the \textbf{null space} (or \textbf{kernel}) $\n{T}$ to be the set of all vectors $x \in V$ such that $T(x)=0$; that is, \\$\n{T} = \{x \in V : T(x) = 0\}$.
|
||||
|
||||
We define the \textbf{range} (or \textbf{image}) $\range{T}$ of $T$ to be the subset of $W$ consisting of all images (under $T$) of vectors in $V$; that is, $\range{T} = \{T(x) : x \in V\}$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
@@ -64,7 +64,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. If $\beta = \{v_1, v_2, \dots, v_n\}$ is a basis for $V$, then
|
||||
|
||||
|
||||
\[\range{T} = \lspan{T(\beta)} = \lspan{\{T(v_1), T(v_2), \dots, T(v_n)\}}.\]
|
||||
\end{theorem}
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
\begin{theorem}[\textbf{Dimension Theorem}]
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces, and let $T: V \to W$ be linear. If $V$ is finite-dimensional, then
|
||||
|
||||
|
||||
\[\nullity{T} + \rank{T} = \ldim{V}\]
|
||||
\end{theorem}
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
\begin{theorem}\label{Theorem 2.5}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces of equal (finite) dimension, and let $T: V \to W$ be linear. Then the following are equivalent.
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $T$ is one-to-one.
|
||||
\item $T$ is onto.
|
||||
@@ -114,4 +114,4 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space, and let $T: V \to W$ be linear. A subspace $W$ of $V$ is said to be \textbf{$T$-invariant} if $T(x) \in W$ for every $x \in W$, that is, $T(W) \subseteq W$. If $W$ is $T$-invariant, we define the \textbf{restriction of $T$ on $W$} to be the function $T_W: W \to W$ defined by $T_W(x) = T(x)$ for all $x \in W$.
|
||||
\end{definition}
|
||||
\end{definition}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
\begin{theorem}\label{Theorem 2.22}
|
||||
\hfill\\
|
||||
Let $\beta$ and $\beta'$ be two ordered bases for a finite-dimensional vector pace $V$, and let $Q = [I_V]_{\beta'}^\beta$. Then
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $Q$ is invertible.
|
||||
\item For any $v \in V$, $[v]_\beta = Q[v]_{\beta'}$.
|
||||
@@ -23,11 +23,11 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on a finite-dimensional vector space $V$, and let $\beta$ and $\beta'$ be ordered bases for $V$. Suppose that $Q$ is the change of coordinate matrix that changes $\beta'$-coordinates into $\beta$-coordinates. Then
|
||||
|
||||
|
||||
\[[T]_{\beta'}=Q^{-1}[T]_\beta Q\]
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\begin{corollary}\label{Corollary 2.8}
|
||||
\hfill\\
|
||||
Let $A \in M_{n \times n}(\F)$, and let $\gamma$ be an ordered basis for $\F^n$. Then $[L_A]_\gamma = Q^{-1}AQ$, where $Q$ is the $n \times n$ matrix whose $j$th column is the $j$th vector of $\gamma$.
|
||||
\end{corollary}
|
||||
@@ -35,6 +35,6 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ and $B$ be matrices in $M_{n \times n}(\F)$. We say that $B$ is \textbf{similar} to $A$ if there exists an invertible matrix $Q$ such that $B = Q^{-1}AQ$.\\
|
||||
|
||||
|
||||
Notice that the relation of similarity is an equivalence relation. So we need only say that $A$ and $B$ are similar.
|
||||
\end{definition}
|
||||
\end{definition}
|
||||
|
||||
@@ -3,36 +3,36 @@
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a finite-dimensional vector space. An \textbf{ordered basis} for $V$ is a basis for $V$ endowed with a specific order; that is, an ordered basis for $V$ is a finite sequence of linearly independent vectors in $V$ that generates $V$.\\
|
||||
|
||||
|
||||
For the vector space $\F^n$, we call $\{e_1, e_2, \dots, e_n\}$ the \textbf{standard ordered basis} for $\F^n$. Similarly, for the vector space $P_n(\F)$, we call $\{1, x, \dots, x^n\}$ the \textbf{standard ordered basis} for $P_n(\F)$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $\beta = \{v_1, v_2, \dots, v_n\}$ be an ordered basis for a finite-dimensional vector space $V$. For $x \in V$, let $a_1, a_2, \dots, a_n$ be the unique scalar values such that
|
||||
|
||||
|
||||
\[x = \sum_{i=1}^{n}a_iv_i.\]
|
||||
|
||||
|
||||
We define the \textbf{coordinate vector of $x$ relative to $\beta$}, denoted by $[x]_\beta$, by
|
||||
|
||||
|
||||
\[[x]_\beta = \begin{pmatrix} a_1 \\ a_2 \\ \vdots \\ a_n\end{pmatrix}.\]
|
||||
|
||||
|
||||
Notice that $[v_i]_\beta = e_i$ in the preceding definition. It can be shown that the correspondence $x \to [x]_\beta$ provides us with a linear transformation from $V$ to $\F^n$.
|
||||
\end{definition}
|
||||
|
||||
\begin{notation}
|
||||
\hfill\\
|
||||
The following notation is used to construct a matrix representation of a linear transformation in the following definition.\\
|
||||
|
||||
|
||||
Suppose that $V$ and $W$ are finite-dimensional vector spaces with ordered bases $\beta = \{v_1, v_2, \dots, v_n\}$ and $\gamma = \{w_1, w_2, \dots, w_m\}$, respectively. Let $T: V \to W$ be linear. Then for each $j$, $1 \leq j \leq n$, there exist unique scalars $a_{ij} \in \F$, $1 \leq i \leq m$, such that
|
||||
|
||||
|
||||
\[T(v_j) = \sum_{i=1}^{m}a_{ij}w_i\ \ \text{for}\ 1 \leq j \leq n.\]
|
||||
\end{notation}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Using the notation above, we call the $m \times n$ matrix $A$ defined by $A_{ij} = a_{ij}$ the \textbf{matrix representation of $T$ in the ordered bases $\beta$ and $\gamma$.} and write $A = [T]_\beta^\gamma$. If $V = W$ and $\beta = \gamma$, then we write $A = [T]_\beta$.
|
||||
|
||||
|
||||
Notice that the $j$th column of $A$ is simply $[T(v_j)]_\gamma$. Also observe that if $U: V \to W$ is a linear transformation such that $[U]_\beta^\gamma = [T]_\beta^\gamma$, then $U=T$ by the corollary to Theorem 2.6 (\autoref{Corollary 2.1}).
|
||||
\end{definition}
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be vector spaces over a field $\F$, and let $T,U: V \to W$ be linear.
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item For all $a \in \F$, $aT+U$ is linear.
|
||||
\item Using the operations of addition and scalar multiplication in the preceding definition, the collection of all linear transformations from $V$ to $W$ is a vector space over $\F$.
|
||||
@@ -59,7 +59,7 @@
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be finite-dimensional vector spaces with ordered bases $\beta$ and $\gamma$, respectively, and let $T,U: V \to W$ be linear transformations. Then
|
||||
|
||||
|
||||
\begin{enumerate}
|
||||
\item $[T+U]_\beta^\gamma = [T]_\beta^\gamma + [U]_\beta^\gamma$ and
|
||||
\item $[aT]_\beta^\gamma = a[T]_\beta^\gamma$ for all scalars $a$.
|
||||
|
||||
Reference in New Issue
Block a user