Added subsections when they appear, added all of the appendices, and finished the packet
This commit is contained in:
Binary file not shown.
@@ -37,5 +37,6 @@
|
||||
\import{chapter-5/}{chapter-5.tex}
|
||||
\import{chapter-6/}{chapter-6.tex}
|
||||
\import{chapter-7/}{chapter-7.tex}
|
||||
\import{appendices/}{appendices.tex}
|
||||
|
||||
\end{document}
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
\chapter{Appendices}
|
||||
\subimport{./}{sets.tex}
|
||||
\subimport{./}{functions.tex}
|
||||
\subimport{./}{fields.tex}
|
||||
\subimport{./}{complex-numbers.tex}
|
||||
\subimport{./}{polynomials.tex}
|
||||
@@ -0,0 +1,83 @@
|
||||
\begin{alphasection}
|
||||
\setcounter{alphasect}{3}
|
||||
\section{Complex Numbers}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A \textbf{complex number} is an expression of the form $z = a + bi$, where $a$ and $b$ are real numbers called the \textbf{real part} and the \textbf{imaginary part} of $z$, respectively.
|
||||
|
||||
The \textbf{sum} and \textbf{product} of two complex numbers $z = a + bi$ and $w = c+di$ (where $a$, $b$, $c$, and $d$ are real numbers) are defined, respectively, as follows:
|
||||
|
||||
\[z+w = (a + bi) + (c+di) = (a+c) + (b+d)i\]
|
||||
|
||||
and
|
||||
|
||||
\[zw = (a+bi)(c+di) = (ac-bd)+(bc+ad)i\]
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Any complex number of the form $bi=0 + bi$, where $b$ is a nonzero real number, is called \textbf{imaginary}.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
The set of complex numbers with the operations of addition and multiplication previously defined is a field.
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The (\textbf{complex}) \textbf{conjugate} of a complex number $a+bi$ is the complex number $a-bi$. We denote the conjugate of a complex number $z$ by $\overline{z}$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $z$ and $w$ be complex numbers. Then the following statements are true.
|
||||
|
||||
\begin{enumerate}
|
||||
\item $\overline{\overline{z}} = z$.
|
||||
\item $\overline{(z+ w)} = \overline{z}+ \overline{w}$.
|
||||
\item $\overline{zw} = \overline{z}\cdot\overline{w}$.
|
||||
\item $\overline{(\frac{z}{w})} = \frac{\overline{z}}{\overline{w}}$ if $w \neq 0$.
|
||||
\item $z$ is a real number if and only if $\overline{z} = z$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $z = a + bi$, where $a,b \in \R$. The \textbf{absolute value} (or \textbf{modulus}) of $z$ is the real number $\sqrt{a^2 + b^2}$. We denote the absolute value of $z$ as $|z|$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $z$ and $w$ denote any two complex numbers. Then the following statements are true.
|
||||
|
||||
\begin{enumerate}
|
||||
\item $|zw| = |z| \cdot |w|$.
|
||||
\item $\abs{\frac{z}{w}} = \frac{|z|}{|w|}$ if $w \neq 0$.
|
||||
\item $|z + w| \leq |z| + |w|$.
|
||||
\item $|z| - |w| \leq |z + w|$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Notice that, as in $\R^2$, there are two axes, the \textbf{real axis} and the \textbf{imaginary axis}.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}[\textbf{The Fundamental Theorem of Algebra}]
|
||||
\hfill\\
|
||||
Suppose that $p(z) = a_nz^n + a_{n-1}z^{n-1} + \dots + a_1z + a_0$ is a polynomial in $P(\C)$ degree $n \geq 1$. Then $p(z)$ has a zero.
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
If $p(z) = a_nz^n + a_{n-1}z^{n-1} + \dots + a_1z + a_0$ is a polynomial of degree $n \geq 1$ with complex coefficients, then there exists complex numbers $c_1, c_2, \dots, c_n$ (not necessarily distinct) such that
|
||||
|
||||
\[p(z) = a_n(z-c_1)(z-c_2)\dots(z-c_n).\]
|
||||
\end{corollary}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A field is called \textbf{algebraically closed} if it has the property that every polynomial of positive degree 1. Thus the preceding corollary asserts that the field of complex numbers is algebraically closed.
|
||||
\end{definition}
|
||||
\end{alphasection}
|
||||
@@ -0,0 +1,65 @@
|
||||
\begin{alphasection}
|
||||
\setcounter{alphasect}{2}
|
||||
\section{Fields}
|
||||
|
||||
\begin{definition}
|
||||
A field $\F$ is a set on which two operations $+$ and $\cdot$ (called \textbf{addition} and \textbf{multiplication}, respectively) are defined so that, for each pair of elements $x,y \in \F$, there are unique elements $x+y$ and $x \cdot y$ in $\F$ for which the following conditions hold for all elements $a,b,c \in \F$.
|
||||
|
||||
\begin{enumerate}
|
||||
\item[(F 1)] $a + b = b + a$ and $a\cdot b = b \cdot a$\\
|
||||
(commutativity of addition and multiplication)
|
||||
\item[(F 2)] $(a + b) + c = a + (b + c)$ and $(a \cdot b)\cdot c = a \cdot (b \cdot c)$\\
|
||||
(associativity of addition and multiplication)
|
||||
\item[(F 3)] There exist distinct elements $0$ and $1$ in $\F$ such that
|
||||
|
||||
\[0+a = a\ \ \ \ \text{and}\ \ \ \ 1\cdot a = a\]
|
||||
(existence of identity elements for addition and multiplication)
|
||||
|
||||
\item[(F 4)] For each element $a$ in $\F$ and each nonzero element $b$ in $\F$, there exists elements $c$ and $d$ in $\F$ such that
|
||||
|
||||
\[a+c = 0\ \ \ \ \text{and}\ \ \ \ b\cdot d = 1\]
|
||||
(existence of inverses for addition and multiplication)
|
||||
|
||||
\item[(F 5)] $a \cdot(b + c) = a\cdot b + a \cdot c$\\
|
||||
(distributivity of multiplication over addition)
|
||||
\end{enumerate}
|
||||
|
||||
The elements $x + y$ and $x \cdot y$ are called the \textbf{sum} and \textbf{product}, respectively, of $x$ and $y$. The elements $0$ (read ``\textbf{zero}") and 1 (read ``\textbf{one}") mentioned in (F 3) are called \textbf{identity elements} for addition and multiplication, respectively, and the elements $c$ and $d$ referred to in (F 4) are called an \textbf{additive inverse} for $a$ and a \textbf{multiplicative inverse} for $b$, respectively.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}[\textbf{Cancellation Laws}]
|
||||
\hfill\\
|
||||
For arbitrary elements $a$, $b$, and $c$ in a field, the following statements are true.
|
||||
|
||||
\begin{enumerate}
|
||||
\item If $a + b = c + b$, then $a=c$.
|
||||
\item If $a \cdot b = \cdot b$ and $b\ neq 0$, then $a=c$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
The elements $0$ and $1$ mentioned in (F 3), and the elements $c$ and $d$ mentioned in (F 4), are unique.
|
||||
\end{corollary}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $a$ and $b$ be arbitrary elements of a field. Then each of the following statements are true.
|
||||
|
||||
\begin{enumerate}
|
||||
\item $a \cdot 0 = 0$.
|
||||
\item $(-a)\cdot b = a \cdot(-b) = -(a \cdot b)$.
|
||||
\item $(-a) \cdot (-b) = a \cdot b$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
The additive identity of a field has no multiplicative inverse.
|
||||
\end{corollary}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
In an arbitrary field $\F$, the smallest positive integer $p$ for which a sum of $p$ 1's equals 0 is called the \textbf{characteristic} of $\F$; if no such positive integer exists, then $\F$ is said to have \textbf{characteristic zero}.
|
||||
\end{definition}
|
||||
\end{alphasection}
|
||||
@@ -0,0 +1,36 @@
|
||||
\begin{alphasection}
|
||||
\setcounter{alphasect}{1}
|
||||
\section{Functions}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
If $A$ and $B$ are sets, then a \textbf{function} $f$ from $A$ to $B$, written $f: A \to B$, is a rule that associates to each element of $x$ in $A$ a unique element denoted $f(x)$ in $B$.\\
|
||||
|
||||
The element $f(x)$ is called the \textbf{image} of $x$ (under $f$), and $x$ is called a \textbf{preimage} of $f(x)$ (under $f$).\\
|
||||
|
||||
If $f: A \to B$, then $A$ is called the \textbf{domain} of $f$, $B$ is called the \textbf{codomain} of $f$, and the set $\{f(x) : x \in A\}$ is called the \textbf{range} of $f$.\\
|
||||
|
||||
Two functions $f: A \to B$ and $g: A \to B$ are \textbf{equal}, written $f=g$, if $f(x) = g(x)$ for all $x \in A$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Functions such that each element of the range has a unique preimage are called \textbf{one-to-one}; that is, $f: A \to B$ is one-to-one if $f(x) = f(y)$ implies $x=y$ or, equivalently, if $x \neq y$ implies $f(x) \neq f(y)$.\\
|
||||
|
||||
If $f: A \to B$ is a function with range $B$, that is, if $f(A) = B$, then $f$ is called \textbf{onto}. So $f$ is onto if and only if the range of $f$ equals the codomain of $f$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $f: A \to B$ be a function and $S \subseteq A$. Then a function $f_S: S \to B$, called the \textbf{restriction} of $f$ to $S$, can be formed by defining $f_S(x) = f(x)$ for each $x \in S$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A function $f: A \to B$ is said to be \textbf{invertible} if there exists a function $g: B \to A$ such that $(f \circ g)(y) = y$ for all $y \in B$ and $(g \circ f)(x)=x$ for all $x \in A$.
|
||||
|
||||
If such a function $g$ exists, then it is unique and is called the \textbf{inverse} of $f$. We denote the inverse of $f$ (when it exists) by $f^{-1}$.
|
||||
|
||||
It can be shown that $f$ is invertible if and only if $f$ is both one-to-one and onto.
|
||||
\end{definition}
|
||||
\end{alphasection}
|
||||
@@ -0,0 +1,133 @@
|
||||
\begin{alphasection}
|
||||
\setcounter{alphasect}{4}
|
||||
\section{Polynomials}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A polynomial $f(x)$ \textbf{divides} a polynomial $g(x)$ if there exists a polynomial $q(x)$ such that $g(x) = f(x)q(x)$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}[\textbf{The Division Algorithm for Polynomials}]\label{Theorem 8.7}
|
||||
Let $f(x)$ be a polynomial of degree $n$, and let $g(x)$ be a polynomial of degree $m \geq 0$. Then there exists unique polynomials $q(x)$ and $r(x)$ such that
|
||||
|
||||
\[f(x) = q(x)g(x)+r(x),\]
|
||||
|
||||
where the degree of $r(x)$ is less than $x$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
In the context of \autoref{Theorem 8.7}, we call $q(x)$ and $r(x)$ the \textbf{quotient} and \textbf{remainder}, respectively, for the division of $f(x)$ by $g(x)$.
|
||||
\end{definition}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Let $f(x)$ be a polynomial of positive degree, and let $a \in \F$. Then $f(a) = 0$ if and only if $x-a$ divides $f(x)$.
|
||||
\end{corollary}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
For any polynomial $f(x)$ with coefficients from a field $\F$, an element $a \in \F$ is called a \textbf{zero} of $f(x)$ if $f(a) =0$. With this terminology, the preceding corollary states that $a$ is a zero of $f(x)$ if and only if $x-a$ divides $f(x)$.
|
||||
\end{definition}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Any polynomial of degree $n \geq 1$ has at most $n$ distinct zeros.
|
||||
\end{corollary}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Two nonzero polynomials are called \textbf{relatively prime} if no polynomial of positive degree divides each of them.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
If $f_1(x)$ and $f_2(x)$ are relatively prime polynomials, there exist polynomials $q_1(x)$ and $q_2(x)$ such that
|
||||
|
||||
\[q_1(x)f_1(x) + q_2(x)f_2(x) = 1,\]
|
||||
|
||||
where $1$ denoted the constant polynomial with value $1$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let
|
||||
|
||||
\[f(x) = a_0 + a_1(x) + \dots + a_nx^n\]
|
||||
|
||||
be a polynomial with coefficients from a field $\F$. If $T$ is a linear operator on a vector space $V$ over $\F$, we define
|
||||
|
||||
\[f(T) = a_0I + a_1T + \dots + a_nT^n.\]
|
||||
|
||||
Similarly, if $A$ is an $n \times n$ matrix with entries from $\F$, we define
|
||||
|
||||
\[f(A) = a_0I+ a_1A + \dots + a_nA^n.\]
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $f(x)$ be a polynomial with coefficients from a field $\F$, and let $T$ be a linear operator on a vector space $V$ over $\F$. Then the following statements are true.
|
||||
|
||||
\begin{enumerate}
|
||||
\item $f(T)$ is a linear operator on $V$.
|
||||
\item If $\beta$ is a finite ordered basis for $V$ and $A=[T]_\beta$, then $[f(T)]_\beta = f(A)$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on a vector space $V$ over a field $\F$, and let $A$ be a square matrix with entries from $\F$. Then, for any polynomials $f_1(x)$ and $f_2(x)$ with coefficients $\F$,
|
||||
|
||||
\begin{enumerate}
|
||||
\item $f_1(T)f_2(T) = f_2(T)f_1(T)$
|
||||
\item $f_1(A)f_2(A) = f_2(A)f_1(A)$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on a vector space $V$ over a field $\F$, and let $A$ be an $n \times n$ matrix with entries from $\F$. If $f_1(x)$ and $f_2(x)$ are relatively prime polynomials with entries from $\F$, then there exist polynomials $q_1(x)$ and $q_2(x)$ with entries from $\F$ such that
|
||||
|
||||
\begin{enumerate}
|
||||
\item $q_1(T)f_1(T) + q_2(T)f_2(T) = I$
|
||||
\item $q_1(A)f_1(A) + q_2(A)f_2(A) = I$.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A polynomial $f(x)$ with coefficients from a field $\F$ is called \textbf{monic} if its leading coefficient is 1. If $f(x)$ has positive degree and cannot be expressed as a product of polynomials with coefficients from $\F$ each having positive degree, then $f(x)$ is called \textbf{irreducible}.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $\phi(x)$ and $f(x)$ be polynomials. If $\phi(x)$ is irreducible and $\phi(x)$ does not divide $f(x)$, then $\phi(x)$ and $f(x)$ are relatively prime.
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Any two distinct irreducible monic polynomials are relatively prime.
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $f(x)$, $g(x)$, and $\phi(x)$ be polynomials. If $\phi(x)$ is irreducible and divides the product $f(x)g(x)$, then $\phi(x)$ divides $f(x)$ or $\phi(x)$ divides $g(x)$.
|
||||
\end{theorem}
|
||||
|
||||
\begin{corollary}
|
||||
\hfill\\
|
||||
Let $\phi(x),\phi_1(x)\phi_2(x), \dots, \phi_n(x)$ be irreducible monic polynomials. If $\phi(x)$ divides the product $\phi_1(x) \phi_2(x) \dots \phi_n(x)$, then $\phi(x) = \phi_i(x)$ for some $i$ ($i = 1, 2, \dots n$).
|
||||
\end{corollary}
|
||||
|
||||
\begin{theorem}[\textbf{Unique Factorization Theorem for Polynomials}]
|
||||
\hfil\\
|
||||
For any polynomial $f(x)$ of positive degree, there exist a unique constant $c$; unique distinct irreducible monic polynomials $\phi_1(x),\phi_2(x), \dots, \phi_n(x)$; and unique positive integers $n_1, n_2, \dots, n_k$ such that
|
||||
|
||||
\[f(x) = c[\phi_1(x)]^{n_1} [\phi_2(x)]^{n_2} \dots [\phi_k(x)]^{n_k}.\]
|
||||
\end{theorem}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $f(x)$ and $g(x)$ be polynomials with coefficients from an infinite field $\F$. If $f(a)= g(a)$ for all $a \in \F$, then $f(x)$ and $g(x)$ are equal.
|
||||
\end{theorem}
|
||||
\end{alphasection}
|
||||
@@ -0,0 +1,68 @@
|
||||
\begin{alphasection}
|
||||
\section{Sets}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A \textbf{set} is a collection of objects, called \textbf{elements} of the set. If $x$ is an element of the set $A$, then we write $x \in A$; otherwise, we write $x \notin A$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Two sets $A$ and $B$ are called \textbf{equal}, written $A = B$, if they contain exactly the same elements.
|
||||
\end{definition}
|
||||
|
||||
\begin{remark}
|
||||
\hfill\\
|
||||
Sets may be described in one of two ways
|
||||
|
||||
\begin{enumerate}
|
||||
\item By listing the elements of the set between set braces $\{$ $\}$.
|
||||
\item By describing the elements of the set in terms of some characteristic property.
|
||||
\end{enumerate}
|
||||
\end{remark}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A set $B$ is called a \textbf{subset} of $A$, written $B \supseteq A$ or $A \supseteq B$, if every element of $B$ is an element of $A$. If $B \subseteq A$, and $B \neq A$, then $B$ is called a \textbf{proper subset} of $A$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The \textbf{empty set}, denoted by $\emptyset$, is the set containing no elements. The empty set is a subset of every set.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The \textbf{union} of two sets $A$ and $B$, denoted $A \cup B$, is the set of elements that are in $A$, or $B$, or both; that is,
|
||||
|
||||
\[A \cup B = \{x : x \in A\ \text{or}\ B\}.\]
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The \textbf{intersection} of two sets $A$ and $B$, denoted $A \cap B$, is the set of elements that are in both $A$ and $B$; that is,
|
||||
|
||||
\[A \cap B = \{x : x \in A\ \text{and}\ B\}.\]
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Two sets are called \textbf{disjoint} if their intersection equals the empty set.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A \textbf{relation} on a set $A$ is a set $S$ of ordered pairs of elements of $A$ such that $(x,y) \in S$ if and only if $x$ stands in the given relationship to $y$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A relation $S$ on a set $A$ is called an \textbf{equivalence relation} on $A$ if the following three conditions hold:
|
||||
|
||||
\begin{enumerate}
|
||||
\item For each $x \in A$, $x \sim x$ (reflexivity).
|
||||
\item If $x \sim y$, then $y \sim x$ (symmetry).
|
||||
\item If $x \sim y$ and $y \sim z$, then $x \sim z$ (transitivity).
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
\end{alphasection}
|
||||
@@ -33,7 +33,7 @@
|
||||
A vector space is called \textbf{finite-dimensional} if it has a basis consisting of a finite number of vectors. The unique number of vectors in each basis for $V$ is called the \textbf{dimension} of $V$ and is denoted by $\text{dim}(V)$. A vector space that is not finite-dimensional is called \textbf{infinite-dimensional}.
|
||||
\end{definition}
|
||||
|
||||
\begin{corollary}
|
||||
\begin{corollary}\label{Corollary 1.5}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space with dimension $n$.
|
||||
\begin{enumerate}
|
||||
@@ -45,6 +45,9 @@
|
||||
\end{enumerate}
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{The Dimension of Subspaces}
|
||||
\addcontentsline{toc}{subsection}{The Dimension of Subspaces}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $W$ be a subspace of a finite-dimensional vector space $V$. Then $W$ is finite-dimensional and $\text{dim}(W) \leq \text{dim}(V)$. Moreover, if $\text{dim}(W) = \text{dim}(V)$, then $V = W$.
|
||||
@@ -55,10 +58,12 @@
|
||||
If $W$ is a subspace of a finite-dimensional vector space $V$, then any basis for $W$ can be extended to a basis for $V$.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{The Lagrange Interpolation Formula}
|
||||
\addcontentsline{toc}{subsection}{The Lagrange Interpolation Formula}
|
||||
|
||||
\begin{definition}[\textbf{The Lagrange Interpolation Formula}]
|
||||
\hfill\\
|
||||
Corollary 2 of the replacement theorem can be applied to obtain a useful formula. Let $c_0, c_1, \dots, c_n$ be distinct scalars in an infinite field $\F$. The polynomials $f_0(x), f_1(x), \dots, f_n(x)$ defined by
|
||||
\autoref{Corollary 1.5} of the replacement theorem can be applied to obtain a useful formula. Let $c_0, c_1, \dots, c_n$ be distinct scalars in an infinite field $\F$. The polynomials $f_0(x), f_1(x), \dots, f_n(x)$ defined by
|
||||
|
||||
\[f_i(x) = \frac{(x-c_0)\dots(x-c_{i-1})(x-c_{i+1})\dots(x-c_n)}{(c_i - c_0)\dots(c_i-c_{i-1})(c_i-c_{i+1})\dots(c_i-c_n)} = \prod_{\substack{k=0 \\ k \neq i}}^{n} \frac{x-c_k}{c_i - c_k}\]
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
\section{Introduction}
|
||||
|
||||
\begin{theorem}[\textbf{Parallelogram Law for Vector Addition}]
|
||||
\begin{definition}[\textbf{Parallelogram Law for Vector Addition}]
|
||||
\hfill\\
|
||||
The sum of two vectors $x$ and $y$ that act at the same point $P$ is the vector beginning at $P$ that is represented by the diagonal of parallelogram having $x$ and $y$ as adjacent sides.
|
||||
\end{theorem}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
|
||||
@@ -109,6 +109,9 @@
|
||||
Let $A$, $B$, and $C$ be matrices such that $A(BC)$ is defined. Then $(AB)C$ is also defined and $A(BC)=(AB)C$; that is, matrix multiplication is associative.
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Applications}
|
||||
\addcontentsline{toc}{subsection}{Applications}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
An \textbf{incidence matrix} is a square matrix in which all the entries are either zero or one and, for convenience, all the diagonal entries are zero. If we have a relationship on a set of $n$ objects that we denote $1, 2, \dots, n$, then we define the associated incidence matrix $A$ by $A_{ij} = 1$ if $i$ is related to $j$, and $A_{ij} = 0$ otherwise.
|
||||
|
||||
@@ -66,6 +66,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{An Interpretation of the Reduced Row Echelon Form}
|
||||
\addcontentsline{toc}{subsection}{An Interpretation of the Reduced Row Echelon Form}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix of rank $r$, where $r > 0$, and let $B$ be the reduced row echelon form of $A$. Then
|
||||
|
||||
@@ -94,6 +94,9 @@
|
||||
Let $Ax = b$ be a system of linear equations. Then the system is consistent if and only if $\rank{A} = \rank{A|b}$.
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{An Application}
|
||||
\addcontentsline{toc}{subsection}{An Application}
|
||||
|
||||
\begin{definition}
|
||||
Consider a system of linear equations
|
||||
|
||||
|
||||
@@ -82,6 +82,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{The Inverse of a Matrix}
|
||||
\addcontentsline{toc}{subsection}{The Inverse of a Matrix}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ and $B$ be $m \times n$ and $m \times p$ matrices, respectively. By the \textbf{augmented matrix} $(A|B)$, we mean the $m \times (n \times p)$ matrix $(A\ B)$, that is, the matrix whose first $n$ columns are the columns of $A$, and whose last $p$ columns are the columns of $B$.
|
||||
|
||||
@@ -50,6 +50,9 @@
|
||||
By the \textbf{angle} between two vectors in $\R^2$, we mean the angle with measure $\theta$ ($0 \leq \theta < \pi$) that is formed by the vectors having the same magnitude and direction as the given vectors by emanating from the origin.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{The Area of a Parallelogram}
|
||||
\addcontentsline{toc}{subsection}{The Area of a Parallelogram}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
If $\beta = \{u,v\}$ is an ordered basis for $\R^2$, we define the \textbf{orientation} of $\beta$ to be the real number
|
||||
|
||||
@@ -63,7 +63,10 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\begin{remark}[\textbf{Test for Diagonalization}]
|
||||
\subsection*{Test for Diagonalization}
|
||||
\addcontentsline{toc}{subsection}{Test for Diagonalization}
|
||||
|
||||
\begin{remark}
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on an $n$-dimensional vector space $V$. Then $T$ is diagonalizable if and only if both of the following conditions hold.
|
||||
|
||||
|
||||
@@ -29,6 +29,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{The Cayley-Hamilton Theorem}
|
||||
\addcontentsline{toc}{subsection}{The Cayley-Hamilton Theorem}
|
||||
|
||||
\begin{theorem}[\textbf{Cayley-Hamilton}]
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on a finite-dimensional vector space $V$, and let $f(t)$ be the characteristic polynomial of $T$. Then $f(T) = T_0$, the zero transformation. That is, $T$ ``satisfies" its characteristic equation.
|
||||
@@ -39,6 +42,9 @@
|
||||
Let $A$ be an $n \times n$ matrix, and let $f(t)$ be the characteristic polynomial of $A$. Then $f(A) = O$, the $n \times n$ zero matrix.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Invariant Subspaces and Direct Sums}
|
||||
\addcontentsline{toc}{subsection}{Invariant Subspaces and Direct Sums}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on a finite-dimensional vector space $V$, and suppose that $V = W_1 \oplus W_2 \oplus \dots \oplus W_k$, where $W_i$ is a $T$-invariant subspace of $V$ for each $i$ ($1 \leq i \leq k$). Suppose that $f_i(t)$ is the characteristic polynomial of $T_{W_i}$ ($1 \leq i \leq k$). Then $f_1(t)\cdot f_2(t) \cdot \dots \cdot f_k(t)$ is the characteristic polynomial of $T$.
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
\section{Bilinear and Quadratic Forms}
|
||||
|
||||
\subsection*{Bilinear Forms}
|
||||
\addcontentsline{toc}{subsection}{Bilinear Forms}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space over a field $\F$. A function $H$ from the set $V \times V$ if ordered pairs of vectors to $\F$ is called a \textbf{bilinear form} on $V$ if $H$ is linear in each variable when the other variable is held fixed; that is, $H$ is a bilinear form on $V$ if
|
||||
@@ -76,6 +79,9 @@
|
||||
Let $V$ be an $n$-dimensional vector space with ordered basis $\beta$, and let $H$ be a bilinear form on $V$. For any $n \times n$ matrix $B$, if $B$ is congruent to $\psi_\beta(H)$, then there exists an ordered basis $\gamma$ for $V$ such that $\psi_\gamma(H) = B$. Furthermore, if $B = Q^t\psi_\beta(H)Q$ for some invertible matrix $Q$, then $Q$ changes $\gamma$-coordinates into $\beta$-coordinates.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Symmetric Bilinear Forms}
|
||||
\addcontentsline{toc}{subsection}{Symmetric Bilinear Forms}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A bilinear form $H$ on a vector space $V$ is \textbf{symmetric} if $H(x,y) = H(y,x)$ for all $x,y \in V$.
|
||||
@@ -116,6 +122,9 @@
|
||||
Let $\F$ be a field that is not of characteristic two. If $A \in M_{n \times n}(\F)$ is a symmetric matrix, then $A$ is congruent to a diagonal matrix.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Quadratic Forms}
|
||||
\addcontentsline{toc}{subsection}{Quadratic Forms}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space over $\F$. A function $K: V \to \F$ is called a \textbf{quadratic form} if there exists a symmetric bilinear form $H \in \mathcal{B}(V)$ such that
|
||||
@@ -123,6 +132,9 @@
|
||||
\[K(x) = H(x, x)\ \ \ \text{for all}\ x \in V.\]
|
||||
\end{definition}
|
||||
|
||||
\subsection*{Quadratic Forms Over the Field $\R$}
|
||||
\addcontentsline{toc}{subsection}{Quadratic Forms Over the Field $\R$}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ be a finite-dimensional real inner product space, and let $H$ be a symmetric bilinear form on $V$. Then there exists an orthonormal basis $\beta$ for $V$ such that $\psi_\beta(H)$ is a diagonal matrix.
|
||||
@@ -141,6 +153,9 @@
|
||||
In fact, if $H$ is the symmetric bilinear form determined by $K$, then $\beta$ can be chosen to be any orthonormal basis for $V$ such that $\psi_\beta(H)$ is a diagonal matrix.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{The Second Derivative Test for Functions of Several Variables}
|
||||
\addcontentsline{toc}{subsection}{The Second Derivative Test for Functions of Several Variables}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $z=f(t_1, t_2, \dots, t_n)$ be a fixed real-valued function of $n$ real variables for which all third-order partial derivatives exist and are continuous. The function $f$ is said to have a \textbf{local maximum} at point $p \in \R^n$ if there exists a $\delta > 0$ such that $f(p) \geq f(x)$ whenever $||x - p|| < \delta$. Likewise, $f$ has a \textbf{local minimum} at $p \in \R^n$ if there exists a $\delta > 0$ such that $f(p) \leq f(x)$ whenever $||x - p|| < \delta$. If $f$ has either a local minimum or a local maximum at $p$, we say that $f$ has a \textbf{local extremum} at $p$. A point $p \in \R^n$ is called a \textbf{critical point} of $f$ if $\displaystyle\frac{\partial f(p)}{\partial(t_i)} = 0$ for $i = 1, 2, \dots, n$. It is a well known fact that if $f$ has a local extremum at a point $p \in \R^n$, then $p$ is a critical point of $f$. For, if $f$ has a local extremum at $p=(p_1, p_2, \dots, p_n)$, then for any $i = 1, 2, \dots, n$, the function $\phi_i$ defined by $\phi_i(t) = f(p_1, p_2, \dots, p_{i-1}, t, p_{i+1}, \dots, p_n)$ has a local extremum at $t = p_i$. So, by an elementary single-variable argument,
|
||||
@@ -168,6 +183,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Sylvester's Law of Inertia}
|
||||
\addcontentsline{toc}{subsection}{Sylvester's Law of Inertia}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The \textbf{rank} of a bilinear form is the rank of any of its matrix representations.
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
\section{Einstein's Special Theory of Relativity}
|
||||
|
||||
\begin{definition}[\textbf{Axioms of the Special Theory of Relativity}]
|
||||
\subsection*{Axioms of the Special Theory of Relativity}
|
||||
\addcontentsline{toc}{subsection}{Axioms of the Special Theory of Relativity}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The basic problem is to compare two different inertial (non-accelerating) coordinate systems $S$ and $S'$ in three-space ($\R^3$) that are in motion relative to each other under the assumption that the speed of light is the same when measured in either system. We assume that $S'$ moves at a constant velocity in relation to $S$ as measured from $S$. To simplify matters, let us suppose that the following conditions hold:
|
||||
|
||||
|
||||
@@ -58,6 +58,9 @@
|
||||
\end{enumerate}
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Least Squares Approximation}
|
||||
\addcontentsline{toc}{subsection}{Least Squares Approximation}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
Let $A \in M_{m \times n}(\F), x \in F^n$, and $y \in F^m$. Then
|
||||
@@ -85,6 +88,9 @@
|
||||
A solution $s$ to a system of linear equations $Ax = b$ is called a \textbf{minimal solution} if $||s|| \leq ||u||$ for all other solutions $u$.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{Minimal Solutions to Systems of Linear Equations}
|
||||
\addcontentsline{toc}{subsection}{Minimal Solutions to Systems of Linear Equations}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A \in M_{m \times n}(\F)$ and $b \in F^m$. Suppose that $Ax = b$ is consistent. Then the following statements are true.
|
||||
|
||||
@@ -41,7 +41,10 @@
|
||||
Let $A$ be an $m \times n$ matrix of rank $r$ with positive singular values $\sigma_1 \geq \sigma_2 \geq \dots \geq \sigma_r$. A factorization $A = U\Sigma V^*$ where $U$ and $V$ are unitary matrices and $\Sigma$ is the $m \times n$ matrix defined as in \autoref{Theorem 6.27} is called a \textbf{singular value decomposition} of $A$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\subsection*{The Polar Decomposition of a Square Matrix}
|
||||
\addcontentsline{toc}{subsection}{The Polar Decomposition of a Square Matrix}
|
||||
|
||||
\begin{theorem}[\textbf{Polar Decomposition}]
|
||||
\hfill\\
|
||||
For any square matrix $A$, there exists a unitary matrix $W$ and a positive semidefinite matrix $P$ such that
|
||||
|
||||
@@ -55,6 +58,9 @@
|
||||
The factorization of a square matrix $A$ as $WP$ where $W$ is unitary and $P$ is positive semidefinite is called a \textbf{polar decomposition} of $A$.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{The Pseudoinverse}
|
||||
\addcontentsline{toc}{subsection}{The Pseudoinverse}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be finite-dimensional inner product spaces over the same field, and let $T: V \to W$ be a linear transformation. Let $L: \n{T}^\perp \to \range{T}$ be the linear transformation defined by $L(x) = T(x)$ for all $x \in \n{T}^\perp$. The \textbf{pseudoinverse} (or \textit{Moore-Penrose generalized inverse}) of $T$, denoted by $T^\dagger$, is defined as the unique linear transformation from $W$ to $V$ such that
|
||||
@@ -67,7 +73,9 @@
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix. Then there exists a unique $n \times m$ matrix $B$ such that $(L_A)^\dagger: F^m \to F^n$ is equal to the left-multiplication transformation $L_B$. We call $B$ the \textbf{pseudoinverse} of $A$ and denote it by $B = A^\dagger$.
|
||||
Let $A$ be an $m \times n$ matrix. Then there exists a unique $n \times m$ matrix $B$ such that $(L_A)^\dagger: F^m \to F^n$ is equal to the left-multiplication transformation $L_B$. We call $B$ the \textbf{pseudoinverse} of $A$ and denote it by $B = A^\dagger$. Thus
|
||||
|
||||
\[(L_A)^\dagger = L_{A^\dagger}\]
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
@@ -82,6 +90,9 @@
|
||||
Then $A^\dagger = V\Sigma^\dagger U^*$, and this is a singular value decomposition of $A^\dagger$.
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{The Pseudoinverse and Systems of Linear Equations}
|
||||
\addcontentsline{toc}{subsection}{The Pseudoinverse and Systems of Linear Equations}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be finite-dimensional inner product spaces, and let $T: V \to W$ be linear. Then
|
||||
|
||||
@@ -72,6 +72,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Rigid Motions}
|
||||
\addcontentsline{toc}{subsection}{Rigid Motions}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a real inner product space. A function $f: V \to V$ is called a \textbf{rigid motion} if
|
||||
@@ -91,6 +94,9 @@
|
||||
Let $f: V \to V$ be a rigid motion on a finite-dimensional real inner product space $V$. Then there exists a unique orthogonal operator $T$ on $V$ and a unique translation $g$ on $V$ such that $f = g \circ T$.
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Orthogonal Operators on $\R^2$}
|
||||
\addcontentsline{toc}{subsection}{Orthogonal Operators on $\R^2$}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T$ be an orthogonal operator on $\R^2$, and let $A = [T]_\beta$ where $\beta$ is the standard ordered basis for $\R^2$. Then exactly one of the following conditions is satisfied:
|
||||
@@ -106,6 +112,9 @@
|
||||
Any rigid motion on $\R^2$ is either a rotation followed by a translation or a reflection about a line through the origin followed by a translation.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Conic Sections}
|
||||
\addcontentsline{toc}{subsection}{Conic Sections}
|
||||
|
||||
\begin{definition}
|
||||
Consider the quadratic equation
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
\section{The rational Canonical Form}
|
||||
\section{The Rational Canonical Form}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
@@ -174,6 +174,9 @@
|
||||
Notice that $\alpha_j$ contains $p_jd$ vectors.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{Uniqueness of the Rational Canonical Form}
|
||||
\addcontentsline{toc}{subsection}{Uniqueness of the Rational Canonical Form}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
$\alpha_j$ is an ordered basis for $\mathsf{C}_{v_j}$.
|
||||
@@ -211,6 +214,9 @@
|
||||
Let $A \in M_{n \times n}(\F)$. The \textbf{rational canonical form} of $A$ is defined to be the rational canonical form of $L_A$. Likewise, for $A$, the \textbf{elementary divisors} and their \textbf{multiplicities} are the same as those of $L_A$.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{Direct Sums}
|
||||
\addcontentsline{toc}{subsection}{Direct Sums}
|
||||
|
||||
\begin{theorem}[\textbf{Primary Decomposition Theorem}]
|
||||
\hfill\\
|
||||
Let $T$ be a linear operator on an $n$-dimensional vector space $V$ with characteristic polynomial
|
||||
|
||||
@@ -55,8 +55,36 @@
|
||||
\newcommand{\F}{\mathbb{F}}
|
||||
|
||||
% Theorem Styles
|
||||
\declaretheorem[numberwithin=chapter, style=definition]{theorem, definition, notation, lemma, corollary, remark, example}
|
||||
\declaretheorem[numberwithin=chapter, style=definition]{theorem, definition, notation, lemma, corollary, remark}
|
||||
|
||||
% Formatting
|
||||
\setlist[enumerate]{font=\bfseries}
|
||||
\newcounter{alphasect}
|
||||
\def\alphainsection{0}
|
||||
|
||||
\let\oldsection=\section
|
||||
\def\section{%
|
||||
\ifnum\alphainsection=1%
|
||||
\addtocounter{alphasect}{1}
|
||||
\fi%
|
||||
\oldsection}%
|
||||
|
||||
\renewcommand\thesection{%
|
||||
\ifnum\alphainsection=1%
|
||||
\Alph{alphasect}
|
||||
\else%
|
||||
\arabic{section}
|
||||
\fi%
|
||||
}%
|
||||
|
||||
\newenvironment{alphasection}{%
|
||||
\ifnum\alphainsection=1%
|
||||
\errhelp={Let other blocks end at the beginning of the next block.}
|
||||
\errmessage{Nested Alpha section not allowed}
|
||||
\fi%
|
||||
\setcounter{alphasect}{0}
|
||||
\def\alphainsection{1}
|
||||
}{%
|
||||
\setcounter{alphasect}{0}
|
||||
\def\alphainsection{0}
|
||||
}%
|
||||
Reference in New Issue
Block a user