Files
linear-algebra-theorems-and…/chapter-1/bases-and-dimension.tex

107 lines
5.1 KiB
TeX

\section{Bases and Dimension}
\begin{definition}
\hfill\\
A \textbf{basis} $\beta$ for a vector space $V$ is a linearly independent subset of $V$ that generates $V$. If $\beta$ is a basis for $V$, we also say that the vectors of $\beta$ form a basis for $V$.
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ be a vector space and $\beta = \{v_1, v_2, \dots, v_n\}$ be a subset of $V$. Then $\beta$ is a basis for $V$ if and only if each $v \in V$ can be uniquely expressed as a linear combination of vectors of $\beta$, that is, can be expressed in the form
\[v = a_1v_1 + a_2v_2 + \dots + a_nv_n\]
for unique scalars $a_1, a_2, \dots, a_n$.
\end{theorem}
\begin{theorem}
\hfill\\
If a vector space $V$ is generated by a finite set $S$, then some subset of $S$ is a basis for $V$. Hence $V$ has a finite basis.
\end{theorem}
\begin{theorem}[\textbf{Replacement Theorem}]
\hfill\\
Let $V$ be a vector space that is generated by a set $G$ containing exactly $n$ vectors, and let $L$ be a linearly independent subset of $V$ containing exactly $m$ vectors. Then $m \leq n$ and there exists a subset $H$ of $G$ containing exactly $n-m$ vectors such that $L \cup H$ generates $V$.
\end{theorem}
\begin{corollary}
Let $V$ be a vector space having a finite basis. Then every basis for $V$ contains the same number of vectors.
\end{corollary}
\begin{definition}
\hfill\\
A vector space is called \textbf{finite-dimensional} if it has a basis consisting of a finite number of vectors. The unique number of vectors in each basis for $V$ is called the \textbf{dimension} of $V$ and is denoted by $\text{dim}(V)$. A vector space that is not finite-dimensional is called \textbf{infinite-dimensional}.
\end{definition}
\begin{corollary}
\hfill\\
Let $V$ be a vector space with dimension $n$.
\begin{enumerate}
\item Any finite generating set for $V$ contains at least $n$ vectors, and a generating set for $V$ that contains exactly $n$ vectors is a basis for $V$.
\item Any linearly independent subset of $V$ that contains exactly $n$ vectors is a basis for $V$.
\item Every linearly independent subset of $V$ can be extended to a basis for $V$.
\end{enumerate}
\end{corollary}
\begin{theorem}
\hfill\\
Let $W$ be a subspace of a finite-dimensional vector space $V$. Then $W$ is finite-dimensional and $\text{dim}(W) \leq \text{dim}(V)$. Moreover, if $\text{dim}(W) = \text{dim}(V)$, then $V = W$.
\end{theorem}
\begin{corollary}
\hfill\\
If $W$ is a subspace of a finite-dimensional vector space $V$, then any basis for $W$ can be extended to a basis for $V$.
\end{corollary}
\begin{definition}[\textbf{The Lagrange Interpolation Formula}]
\hfill\\
Corollary 2 of the replacement theorem can be applied to obtain a useful formula. Let $c_0, c_1, \dots, c_n$ be distinct scalars in an infinite field $\F$. The polynomials $f_0(x), f_1(x), \dots, f_n(x)$ defined by
\[f_i(x) = \frac{(x-c_0)\dots(x-c_{i-1})(x-c_{i+1})\dots(x-c_n)}{(c_i - c_0)\dots(c_i-c_{i-1})(c_i-c_{i+1})\dots(c_i-c_n)} = \prod_{\substack{k=0 \\ k \neq i}}^{n} \frac{x-c_k}{c_i - c_k}\]
are called the \textbf{Lagrange polynomials} (associated with $c_0, c_1, \dots, c_n$). Note that each $f_i(x)$ is a polynomial of degree $n$ and hence is in $P_n(\F)$. By regarding $f_i(x)$ as a polynomial function $f_i: \F \to \F$, we see that
\begin{equation}
f_i(c_j) = \begin{cases}
0 &\text{if}\ i \neq j,\\
1 &\text{if}\ i = j.
\end{cases}
\end{equation}
This property of Lagrange polynomials can be used to show that $\beta = \{f_0, f_1, \dots, f_n\}$ is a linearly independent subset of $P_n(\F)$. Suppose that
\[\sum_{i=0}^{n}a_if_i = 0\ \ \text{for some scalars}\ a_0, a_1, \dots, a_n,\]
where $0$ denotes the zero function. Then
\[\sum_{i=0}^{n}a_if_i(c_j)=0\ \ \text{for}\ j=0, 1, \dots, n.\]
But also
\[\sum_{i=0}^{n}a_if_i(c_j)=a_j\]
by (1.1). Hence $a_j = 0$ for $j = 0, 1, \dots, n$; so $\beta$ is linearly independent. Since the dimension of $P_n(\F)$ is $n + 1$, it follows from Corollary 2 of the replacement theorem that $\beta$ is a basis for $P_n(\F)$.
Because $\beta$ is a basis for $P_n(\F)$, every polynomial function $g$ in $P_n(\F)$ is a linear combination of polynomial functions of $\beta$, say,
\[g = \sum_{i=0}^{n}b_if_i.\]
It follows that
\[g(c_j)=\sum_{i=0}^{n}b_if_i(c_j)=b_j;\]
so
\[g=\sum_{i=0}^{n}g(c_i)f_i\]
is the unique representation of $g$ as a linear combination of elements of $\beta$. This representation is called the \textbf{Lagrange interpolation formula}. Notice that the preceding argument shows that if $b_0, b_1, \dots, b_n$ are any $n+1$ scalars in $\F$ (not necessarily distinct), then the polynomial function
\[g = \sum_{i=0}^{n}b_if_i\]
is the unique polynomial in $P_n(\F)$ such that $g(c_j) = b_j$. Thus we have found the unique polynomial of degree not exceeding $n$ that has specified values $b_j$ at given points $c_j$ in its domain ($j = 0, 1, \dots, n$).\\
An important consequence of the Lagrange interpolation formula is the following result: If $f \in P_n(\F)$ and $f(c_i) = 0$, for $n+1$ distinct scalars $c_0, c_1, \dots, c_n$ in $\F$, then $f$ is the zero function.
\end{definition}