Added subsections when they appear, added all of the appendices, and finished the packet
This commit is contained in:
@@ -1,5 +1,8 @@
|
||||
\section{Bilinear and Quadratic Forms}
|
||||
|
||||
\subsection*{Bilinear Forms}
|
||||
\addcontentsline{toc}{subsection}{Bilinear Forms}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space over a field $\F$. A function $H$ from the set $V \times V$ if ordered pairs of vectors to $\F$ is called a \textbf{bilinear form} on $V$ if $H$ is linear in each variable when the other variable is held fixed; that is, $H$ is a bilinear form on $V$ if
|
||||
@@ -76,6 +79,9 @@
|
||||
Let $V$ be an $n$-dimensional vector space with ordered basis $\beta$, and let $H$ be a bilinear form on $V$. For any $n \times n$ matrix $B$, if $B$ is congruent to $\psi_\beta(H)$, then there exists an ordered basis $\gamma$ for $V$ such that $\psi_\gamma(H) = B$. Furthermore, if $B = Q^t\psi_\beta(H)Q$ for some invertible matrix $Q$, then $Q$ changes $\gamma$-coordinates into $\beta$-coordinates.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Symmetric Bilinear Forms}
|
||||
\addcontentsline{toc}{subsection}{Symmetric Bilinear Forms}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
A bilinear form $H$ on a vector space $V$ is \textbf{symmetric} if $H(x,y) = H(y,x)$ for all $x,y \in V$.
|
||||
@@ -116,6 +122,9 @@
|
||||
Let $\F$ be a field that is not of characteristic two. If $A \in M_{n \times n}(\F)$ is a symmetric matrix, then $A$ is congruent to a diagonal matrix.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Quadratic Forms}
|
||||
\addcontentsline{toc}{subsection}{Quadratic Forms}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a vector space over $\F$. A function $K: V \to \F$ is called a \textbf{quadratic form} if there exists a symmetric bilinear form $H \in \mathcal{B}(V)$ such that
|
||||
@@ -123,6 +132,9 @@
|
||||
\[K(x) = H(x, x)\ \ \ \text{for all}\ x \in V.\]
|
||||
\end{definition}
|
||||
|
||||
\subsection*{Quadratic Forms Over the Field $\R$}
|
||||
\addcontentsline{toc}{subsection}{Quadratic Forms Over the Field $\R$}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $V$ be a finite-dimensional real inner product space, and let $H$ be a symmetric bilinear form on $V$. Then there exists an orthonormal basis $\beta$ for $V$ such that $\psi_\beta(H)$ is a diagonal matrix.
|
||||
@@ -141,6 +153,9 @@
|
||||
In fact, if $H$ is the symmetric bilinear form determined by $K$, then $\beta$ can be chosen to be any orthonormal basis for $V$ such that $\psi_\beta(H)$ is a diagonal matrix.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{The Second Derivative Test for Functions of Several Variables}
|
||||
\addcontentsline{toc}{subsection}{The Second Derivative Test for Functions of Several Variables}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $z=f(t_1, t_2, \dots, t_n)$ be a fixed real-valued function of $n$ real variables for which all third-order partial derivatives exist and are continuous. The function $f$ is said to have a \textbf{local maximum} at point $p \in \R^n$ if there exists a $\delta > 0$ such that $f(p) \geq f(x)$ whenever $||x - p|| < \delta$. Likewise, $f$ has a \textbf{local minimum} at $p \in \R^n$ if there exists a $\delta > 0$ such that $f(p) \leq f(x)$ whenever $||x - p|| < \delta$. If $f$ has either a local minimum or a local maximum at $p$, we say that $f$ has a \textbf{local extremum} at $p$. A point $p \in \R^n$ is called a \textbf{critical point} of $f$ if $\displaystyle\frac{\partial f(p)}{\partial(t_i)} = 0$ for $i = 1, 2, \dots, n$. It is a well known fact that if $f$ has a local extremum at a point $p \in \R^n$, then $p$ is a critical point of $f$. For, if $f$ has a local extremum at $p=(p_1, p_2, \dots, p_n)$, then for any $i = 1, 2, \dots, n$, the function $\phi_i$ defined by $\phi_i(t) = f(p_1, p_2, \dots, p_{i-1}, t, p_{i+1}, \dots, p_n)$ has a local extremum at $t = p_i$. So, by an elementary single-variable argument,
|
||||
@@ -168,6 +183,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Sylvester's Law of Inertia}
|
||||
\addcontentsline{toc}{subsection}{Sylvester's Law of Inertia}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The \textbf{rank} of a bilinear form is the rank of any of its matrix representations.
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
\section{Einstein's Special Theory of Relativity}
|
||||
|
||||
\begin{definition}[\textbf{Axioms of the Special Theory of Relativity}]
|
||||
\subsection*{Axioms of the Special Theory of Relativity}
|
||||
\addcontentsline{toc}{subsection}{Axioms of the Special Theory of Relativity}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
The basic problem is to compare two different inertial (non-accelerating) coordinate systems $S$ and $S'$ in three-space ($\R^3$) that are in motion relative to each other under the assumption that the speed of light is the same when measured in either system. We assume that $S'$ moves at a constant velocity in relation to $S$ as measured from $S$. To simplify matters, let us suppose that the following conditions hold:
|
||||
|
||||
|
||||
@@ -58,6 +58,9 @@
|
||||
\end{enumerate}
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Least Squares Approximation}
|
||||
\addcontentsline{toc}{subsection}{Least Squares Approximation}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
Let $A \in M_{m \times n}(\F), x \in F^n$, and $y \in F^m$. Then
|
||||
@@ -85,6 +88,9 @@
|
||||
A solution $s$ to a system of linear equations $Ax = b$ is called a \textbf{minimal solution} if $||s|| \leq ||u||$ for all other solutions $u$.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{Minimal Solutions to Systems of Linear Equations}
|
||||
\addcontentsline{toc}{subsection}{Minimal Solutions to Systems of Linear Equations}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $A \in M_{m \times n}(\F)$ and $b \in F^m$. Suppose that $Ax = b$ is consistent. Then the following statements are true.
|
||||
|
||||
@@ -41,7 +41,10 @@
|
||||
Let $A$ be an $m \times n$ matrix of rank $r$ with positive singular values $\sigma_1 \geq \sigma_2 \geq \dots \geq \sigma_r$. A factorization $A = U\Sigma V^*$ where $U$ and $V$ are unitary matrices and $\Sigma$ is the $m \times n$ matrix defined as in \autoref{Theorem 6.27} is called a \textbf{singular value decomposition} of $A$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
\subsection*{The Polar Decomposition of a Square Matrix}
|
||||
\addcontentsline{toc}{subsection}{The Polar Decomposition of a Square Matrix}
|
||||
|
||||
\begin{theorem}[\textbf{Polar Decomposition}]
|
||||
\hfill\\
|
||||
For any square matrix $A$, there exists a unitary matrix $W$ and a positive semidefinite matrix $P$ such that
|
||||
|
||||
@@ -55,6 +58,9 @@
|
||||
The factorization of a square matrix $A$ as $WP$ where $W$ is unitary and $P$ is positive semidefinite is called a \textbf{polar decomposition} of $A$.
|
||||
\end{definition}
|
||||
|
||||
\subsection*{The Pseudoinverse}
|
||||
\addcontentsline{toc}{subsection}{The Pseudoinverse}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be finite-dimensional inner product spaces over the same field, and let $T: V \to W$ be a linear transformation. Let $L: \n{T}^\perp \to \range{T}$ be the linear transformation defined by $L(x) = T(x)$ for all $x \in \n{T}^\perp$. The \textbf{pseudoinverse} (or \textit{Moore-Penrose generalized inverse}) of $T$, denoted by $T^\dagger$, is defined as the unique linear transformation from $W$ to $V$ such that
|
||||
@@ -67,7 +73,9 @@
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $A$ be an $m \times n$ matrix. Then there exists a unique $n \times m$ matrix $B$ such that $(L_A)^\dagger: F^m \to F^n$ is equal to the left-multiplication transformation $L_B$. We call $B$ the \textbf{pseudoinverse} of $A$ and denote it by $B = A^\dagger$.
|
||||
Let $A$ be an $m \times n$ matrix. Then there exists a unique $n \times m$ matrix $B$ such that $(L_A)^\dagger: F^m \to F^n$ is equal to the left-multiplication transformation $L_B$. We call $B$ the \textbf{pseudoinverse} of $A$ and denote it by $B = A^\dagger$. Thus
|
||||
|
||||
\[(L_A)^\dagger = L_{A^\dagger}\]
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}
|
||||
@@ -82,6 +90,9 @@
|
||||
Then $A^\dagger = V\Sigma^\dagger U^*$, and this is a singular value decomposition of $A^\dagger$.
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{The Pseudoinverse and Systems of Linear Equations}
|
||||
\addcontentsline{toc}{subsection}{The Pseudoinverse and Systems of Linear Equations}
|
||||
|
||||
\begin{lemma}
|
||||
\hfill\\
|
||||
Let $V$ and $W$ be finite-dimensional inner product spaces, and let $T: V \to W$ be linear. Then
|
||||
|
||||
@@ -72,6 +72,9 @@
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Rigid Motions}
|
||||
\addcontentsline{toc}{subsection}{Rigid Motions}
|
||||
|
||||
\begin{definition}
|
||||
\hfill\\
|
||||
Let $V$ be a real inner product space. A function $f: V \to V$ is called a \textbf{rigid motion} if
|
||||
@@ -91,6 +94,9 @@
|
||||
Let $f: V \to V$ be a rigid motion on a finite-dimensional real inner product space $V$. Then there exists a unique orthogonal operator $T$ on $V$ and a unique translation $g$ on $V$ such that $f = g \circ T$.
|
||||
\end{theorem}
|
||||
|
||||
\subsection*{Orthogonal Operators on $\R^2$}
|
||||
\addcontentsline{toc}{subsection}{Orthogonal Operators on $\R^2$}
|
||||
|
||||
\begin{theorem}
|
||||
\hfill\\
|
||||
Let $T$ be an orthogonal operator on $\R^2$, and let $A = [T]_\beta$ where $\beta$ is the standard ordered basis for $\R^2$. Then exactly one of the following conditions is satisfied:
|
||||
@@ -106,6 +112,9 @@
|
||||
Any rigid motion on $\R^2$ is either a rotation followed by a translation or a reflection about a line through the origin followed by a translation.
|
||||
\end{corollary}
|
||||
|
||||
\subsection*{Conic Sections}
|
||||
\addcontentsline{toc}{subsection}{Conic Sections}
|
||||
|
||||
\begin{definition}
|
||||
Consider the quadratic equation
|
||||
|
||||
|
||||
Reference in New Issue
Block a user