Wrote out Chapter 1 theorems and definitions

This commit is contained in:
2024-02-19 16:40:39 -07:00
parent b8f92be964
commit 33101973d4
9 changed files with 447 additions and 1 deletions
+106
View File
@@ -1 +1,107 @@
\section{Bases and Dimension}
\begin{definition}
\hfill\\
A \textbf{basis} $\beta$ for a vector space $V$ is a linearly independent subset of $V$ that generates $V$. If $\beta$ is a basis for $V$, we also say that the vectors of $\beta$ form a basis for $V$.
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ be a vector space and $\beta = \{v_1, v_2, \dots, v_n\}$ be a subset of $V$. Then $\beta$ is a basis for $V$ if and only if each $v \in V$ can be uniquely expressed as a linear combination of vectors of $\beta$, that is, can be expressed in the form
\[v = a_1v_1 + a_2v_2 + \dots + a_nv_n\]
for unique scalars $a_1, a_2, \dots, a_n$.
\end{theorem}
\begin{theorem}
\hfill\\
If a vector space $V$ is generated by a finite set $S$, then some subset of $S$ is a basis for $V$. Hence $V$ has a finite basis.
\end{theorem}
\begin{theorem}[\textbf{Replacement Theorem}]
\hfill\\
Let $V$ be a vector space that is generated by a set $G$ containing exactly $n$ vectors, and let $L$ be a linearly independent subset of $V$ containing exactly $m$ vectors. Then $m \leq n$ and there exists a subset $H$ of $G$ containing exactly $n-m$ vectors such that $L \cup H$ generates $V$.
\end{theorem}
\begin{corollary}
Let $V$ be a vector space having a finite basis. Then every basis for $V$ contains the same number of vectors.
\end{corollary}
\begin{definition}
\hfill\\
A vector space is called \textbf{finite-dimensional} if it has a basis consisting of a finite number of vectors. The unique number of vectors in each basis for $V$ is called the \textbf{dimension} of $V$ and is denoted by $\text{dim}(V)$. A vector space that is not finite-dimensional is called \textbf{infinite-dimensional}.
\end{definition}
\begin{corollary}
\hfill\\
Let $V$ be a vector space with dimension $n$.
\begin{enumerate}
\item Any finite generating set for $V$ contains at least $n$ vectors, and a generating set for $V$ that contains exactly $n$ vectors is a basis for $V$.
\item Any linearly independent subset of $V$ that contains exactly $n$ vectors is a basis for $V$.
\item Every linearly independent subset of $V$ can be extended to a basis for $V$.
\end{enumerate}
\end{corollary}
\begin{theorem}
\hfill\\
Let $W$ be a subspace of a finite-dimensional vector space $V$. Then $W$ is finite-dimensional and $\text{dim}(W) \leq \text{dim}(V)$. Moreover, if $\text{dim}(W) = \text{dim}(V)$, then $V = W$.
\end{theorem}
\begin{corollary}
\hfill\\
If $W$ is a subspace of a finite-dimensional vector space $V$, then any basis for $W$ can be extended to a basis for $V$.
\end{corollary}
\begin{definition}[\textbf{The Lagrange Interpolation Formula}]
\hfill\\
Corollary 2 of the replacement theorem can be applied to obtain a useful formula. Let $c_0, c_1, \dots, c_n$ be distinct scalars in an infinite field $\F$. The polynomials $f_0(x), f_1(x), \dots, f_n(x)$ defined by
\[f_i(x) = \frac{(x-c_0)\dots(x-c_{i-1})(x-c_{i+1})\dots(x-c_n)}{(c_i - c_0)\dots(c_i-c_{i-1})(c_i-c_{i+1})\dots(c_i-c_n)} = \prod_{\substack{k=0 \\ k \neq i}}^{n} \frac{x-c_k}{c_i - c_k}\]
are called the \textbf{Lagrange polynomials} (associated with $c_0, c_1, \dots, c_n$). Note that each $f_i(x)$ is a polynomial of degree $n$ and hence is in $P_n(\F)$. By regarding $f_i(x)$ as a polynomial function $f_i: \F \to \F$, we see that
\begin{equation}
f_i(c_j) = \begin{cases}
0 &\text{if}\ i \neq j,\\
1 &\text{if}\ i = j.
\end{cases}
\end{equation}
This property of Lagrange polynomials can be used to show that $\beta = \{f_0, f_1, \dots, f_n\}$ is a linearly independent subset of $P_n(\F)$. Suppose that
\[\sum_{i=0}^{n}a_if_i = 0\ \ \text{for some scalars}\ a_0, a_1, \dots, a_n,\]
where $0$ denotes the zero function. Then
\[\sum_{i=0}^{n}a_if_i(c_j)=0\ \ \text{for}\ j=0, 1, \dots, n.\]
But also
\[\sum_{i=0}^{n}a_if_i(c_j)=a_j\]
by (1.1). Hence $a_j = 0$ for $j = 0, 1, \dots, n$; so $\beta$ is linearly independent. Since the dimension of $P_n(\F)$ is $n + 1$, it follows from Corollary 2 of the replacement theorem that $\beta$ is a basis for $P_n(\F)$.
Because $\beta$ is a basis for $P_n(\F)$, every polynomial function $g$ in $P_n(\F)$ is a linear combination of polynomial functions of $\beta$, say,
\[g = \sum_{i=0}^{n}b_if_i.\]
It follows that
\[g(c_j)=\sum_{i=0}^{n}b_if_i(c_j)=b_j;\]
so
\[g=\sum_{i=0}^{n}g(c_i)f_i\]
is the unique representation of $g$ as a linear combination of elements of $\beta$. This representation is called the \textbf{Lagrange interpolation formula}. Notice that the preceding argument shows that if $b_0, b_1, \dots, b_n$ are any $n+1$ scalars in $\F$ (not necessarily distinct), then the polynomial function
\[g = \sum_{i=0}^{n}b_if_i\]
is the unique polynomial in $P_n(\F)$ such that $g(c_j) = b_j$. Thus we have found the unique polynomial of degree not exceeding $n$ that has specified values $b_j$ at given points $c_j$ in its domain ($j = 0, 1, \dots, n$).\\
An important consequence of the Lagrange interpolation formula is the following result: If $f \in P_n(\F)$ and $f(c_i) = 0$, for $n+1$ distinct scalars $c_0, c_1, \dots, c_n$ in $\F$, then $f$ is the zero function.
\end{definition}
+10
View File
@@ -1 +1,11 @@
\section{Introduction}
\begin{theorem}[\textbf{Parallelogram Law for Vector Addition}]
\hfill\\
The sum of two vectors $x$ and $y$ that act at the same point $P$ is the vector beginning at $P$ that is represented by the diagonal of parallelogram having $x$ and $y$ as adjacent sides.
\end{theorem}
\begin{definition}
\hfill\\
Two nonzero vectors $x$ and $y$ are called \textbf{parallel} if $y=tx$ for some nonzero real number $t$. (Thus nonzero vectors having the same or opposite directions are parallel.)
\end{definition}
@@ -1 +1,21 @@
\section{Linear Combinations and Systems of Linear Equations}
\begin{definition}
\hfill\\
Let $V$ be a vector space and $S$ a nonempty subset of $V$. A vector $v \in V$ is called a \textbf{linear combination} of vectors of $S$ if there exist a finite number of vectors $v_1, v_2, \dots, v_n$ in $S$ and scalars $a_1, a_2, \dots, a_n$ in $\F$ such that $v=a_1v_1 + a_2v_2 + \dots + a_nv_n$. In this case we also say that $v$ is a linear combination of $v_1, v_2, \dots, v_n$ and call $a_1, a_2, \dots, a_n$ the \textbf{coefficients} of the linear combination.
\end{definition}
\begin{definition}
\hfill\\
Let $S$ be a nonempty subset of a vector space $V$. The \textbf{span} of $S$, denoted $\text{span}(S)$, is the set consisting of all linear combinations of the vectors in $S$. For convenience, we define $\text{span}(\emptyset) = \{0\}$.
\end{definition}
\begin{theorem}
\hfill\\
The span of any subset $S$ of a vector space $V$ is a subspace of $V$. Moreover, any subspace of $V$ that contains $S$ must also contain the span of $S$.
\end{theorem}
\begin{definition}
\hfill\\
A subset $S$ of a vector space $V$ \textbf{generates} (or \textbf{spans}) $V$ if $\text{span}(S) = V$. In this case, we also say that the vectors of $S$ generate (or span) $V$.
\end{definition}
@@ -1 +1,43 @@
\section{Linear Dependence and Linear Independence}
\begin{definition}
\hfill\\
A subset $S$ of a vector space $V$ is called \textbf{linearly dependent} if there exist a finite number of distinct vectors $v_1, v_2, \dots, v_n$ in $S$ and scalars $a_1, a_2, \dots, a_n$ not all zero, such that
\[a_1v_2 + a_2v_2 + \dots + a_nv_n = 0\]
In this case, we also say that the vectors of $S$ are linearly dependent.\\
For any vectors $v_1, v_2, \dots, v_n$, we have $a_1v_1 + a_2v_2 + \dots + a_nv_n = 0$ if $a_1 = a_2 = \dots = a_n = 0$. We call this the \textbf{trivial representation} of $0$ as a linear combination of $v_1, v_2, \dots, v_n$. Thus, for a set tot be linearly dependent, there must exist a nontrivial representation of $0$ as a linear combination of vectors in the set. Consequently, any subset of a vector space that contains the zero vector is linearly dependent, because $0 = 1 \cdot 0$ is a nontrivial representation of $0$ as a linear combination of vectors in the set.
\end{definition}
\begin{definition}
\hfill\\
A subset $S$ of a vector space that is not linearly dependent is called \textbf{linearly independent}. As before, we also say that the vectors of $S$ are linearly independent.\\
The following facts about linearly independent sets are true in any vector space.
\begin{enumerate}
\item The empty set is linearly independent, for linearly dependent sets must be nonempty.
\item A set consisting of a single nonzero vector is linearly independent. For if $\{v\}$ is linearly dependent, then $av = 0$ for some nonzero scalar $a$. thus
\[v = a^{-1}(av) = a^{-1}0 = 0.\]
\item A set is linearly independent if and only if the only representations of $0$ as linear combinations of its vectors are trivial representations.
\end{enumerate}
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ be a vector space, and let $S_1 \subseteq S_2 \subseteq V$. If $S_1$ is linearly dependent, then $S_2$ is linearly dependent.
\end{theorem}
\begin{corollary}
\hfill\\
Let $V$ be a vector space, and let $S_1 \subseteq S_2 \subseteq V$. If $S_2$ is linearly independent, then $S_1$ is linearly independent.
\end{corollary}
\begin{theorem}
\hfill\\
Let $S$ be a linearly independent subset of a vector space $V$, and let $v$ be a vector in $V$ that is not in $S$. Then $S \cup \{v\}$ is linearly dependent if and only if $v \in \text{span}(S)$.
\end{theorem}
@@ -1 +1,38 @@
\section{Maximal Linearly Independent Subsets}
\begin{definition}
\hfill\\
Let $\mathcal{F}$ be a family of sets. A member $M$ of $\mathcal{F}$ is called \textbf{maximal} (with respect to set inclusion) if $M$ is contained in no member of $\mathcal{F}$ other than $M$ itself.
\end{definition}
\begin{definition}
\hfill\\
Let $\mathcal{F}$ be the family of all subsets of a nonempty set $S$. This family $\mathcal{F}$ is called the \textbf{power set} of $S$.
\end{definition}
\begin{definition}
\hfill\\
A collection of sets $\mathcal{C}$ is called a \textbf{chain} (or \textbf{nest} or \textbf{tower}) if for each pair of sets $A$ and $B$ in $\mathcal{C}$, either $A \subseteq B$ or $B \subseteq A$.
\end{definition}
\begin{definition}[\textbf{Maximal Principle}]
\hfill\\
Let $\mathcal{F}$ be a family of sets. If, for each chain $\mathcal{C} \subseteq \mathcal{F}$, there exists a member of $\mathcal{F}$ that contains each member of $\mathcal{C}$, then $\mathcal{F}$ contains a maximal member.\\
\textbf{Note:} The \textit{Maximal Principle} is logically equivalent to the \textit{Axiom of Choice}, which is an assumption in most axiomatic developments of set theory.
\end{definition}
\begin{definition}
\hfill\\
Let $S$ be a subset of a vector space $V$. A \textbf{maximal linearly independent subset} of $S$ is a subset $B$ of $S$ satisfying both of the following conditions
\begin{enumerate}
\item $B$ is linearly independent.
\item The only linearly independent subset of $S$ that contains $B$ is $B$ itself.
\end{enumerate}
\end{definition}
\begin{corollary}
\hfill\\
Every vector space has a basis.
\end{corollary}
+59
View File
@@ -1 +1,60 @@
\section{Subspaces}
\begin{definition}
\hfill\\
A subset $W$ of a vector space $V$ over a field $\F$ is called a \textbf{subspace} of $V$ if $W$ is a vector space over $\F$ with the operations of addition and scalar multiplication defined on $V$.\\
In any vector space $V$, note that $V$ and $\{0\}$ are subspaces. The latter is called the \textbf{zero subspace} of $V$.
Fortunately, it is not necessary to verify all of the vector space properties to prove that a subset is a subspace. Because properties (VS 1), (VS 2), (VS 5), (VS 6), (VS 7) and (VS 8) hold for all vectors in the vector space, these properties automatically hold for the vectors in any subset. Thus a subset $W$ of a vector space $V$ is a subspace of $V$ if and only if the following four properties hold:
\begin{enumerate}
\item $x + y \in W$ whenever $x \in W$ and $y \in W$. ($W$ is \textbf{closed under addition}).
\item $cx \in W$ whenever $c \in \F$ and $x \in W$. ($W$ is \textbf{closed under scalar multiplication}).
\item $W$ has a zero vector.
\item Each vector in $W$ has an additive inverse in $W$.1
\end{enumerate}
\end{definition}
\begin{theorem}
\hfill\\
Let $V$ be a vector space and $W$ a subset of $V$. Then $W$ is a subspace of $V$ if and only if the following three conditions hold for the operations defined in $V$.
\begin{enumerate}
\item $0 \in W$.
\item $x + y \in W$ whenever $x \in W$ and $y \in W$.
\item $cx \in W$ whenever $c \in \F$ and $x \in W$.
\end{enumerate}
\end{theorem}
\begin{definition}
\hfill\\
The \textbf{transpose} $A^t$ of an $m \times n$ matrix $A$ is the $n \times m$ matrix obtained from $A$ by interchanging the rows with the columns; that is, $(A^t)_{ij} = A_{ji}$.
\end{definition}
\begin{definition}
\hfill\\
A \textbf{symmetric matrix} is a matrix $A$ such that $A^t = A$.
\end{definition}
\begin{definition}
\hfill\\
An $n \times n$ matrix $M$ is called a \textbf{diagonal matrix} if $M_{ij} = 0$ whenever $i \neq j$; that is, if all nondiagonal entries are zero.
\end{definition}
\begin{definition}
\hfill\\
The \textbf{trace} of an $n \times n$ matrix $M$, denoted $\text{tr}(M)$, is the sum of the diagonal entries of $M$; that is,
\[\text{tr}(M) = M_{11} + M_{22} + \dots + M_{nn}.\]
\end{definition}
\begin{theorem}
\hfill\\
Any intersection of subspaces of a vector space $V$ is a subspace of $V$.
\end{theorem}
\begin{definition}
\hfill\\
An $m \times n$ matrix $A$ is called \textbf{upper triangular} if all entries lying below the diagonal entries are zero; that is, if $A_{ij} = 0$ whenever $i > j$.
\end{definition}
+169
View File
@@ -1 +1,170 @@
\section{Vector Spaces}
\begin{definition}
\hfill\\
A \textbf{vector space} (or \textbf{linear space}) $V$ over a field $\F$ consists of a set on which two operations (called \textbf{addition} and \textbf{scalar multiplication}, respectively) are defined so that for each pair of elements $x$ and $y$ in $V$ there is a unique element $a$ in $\F$ and each element $x$ in $V$ there is a unique element $ax$ in $V$, such that the following conditions hold:
\begin{description}
\item[(VS 1)] For all $x, y$ in $V$, $x + y = y + x$ (commutativity of addition).
\item[(VS 2)] For all $x, y$ in $V$, $(x + y) + z = x + (y + z)$ (associativity of addition).
\item[(VS 3)] There exists an element in $V$ denoted by $0$ such that $x + 0 = x$ for each $x$ in $V$.
\item[(VS 4)] For each element $x$ in $V$ there exists an element $y$ in $V$ such that $x + y = 0$.
\item[(VS 5)] For each element $x$ in $V$, $1x=x$.
\item[(VS 6)] For each pair of elements $a, b$ in $\F$ and each element $x$ in $V$, $(ab)x = a(bx)$.
\item[(VS 7)] For each element $a$ in $\F$ and each pair of elements $x, y$ in $V$, $a(x + y) = ax + ay$.
\item[(VS 8)] For each pair of elements $a, b$ in $\F$ and each element $x$ in $V$, $(a + b)x = ax + bx$.
\end{description}
The elements $x + y$ and $ax$ are called the \textbf{sum} of $x$ and $y$ and the \textbf{product} of $a$ and $x$, respectively.\\
The elements of the field $\F$ are called \textbf{scalars} and the elements of the vector space $V$ are called \textbf{vectors}.\\
\textbf{Note:} The reader should not confuse this use of the word "vector" with the physical entity discussed in section 1.1: the word "vector" is now being used to describe any element of a vector space.
\end{definition}
\begin{definition}
\hfill\\
An object of the form $(a_1, a_2, \dots, a_n)$, where the entries $a_1, a_2, \dots, a_n$ are elements of a field $\F$, is called an \textbf{\textit{n}-tuple} with entries from $\F$. The elements $a_1, a_2, \dots, a_n$ are called the \textbf{entries} or \textbf{components} of the $n$-tuple. Two $n$-tuples $(a_1, a_2, \dots, a_n)$ and $(b_1, b_2, \dots, b_n)$ with entries from a field $\F$ are called \textbf{equal} if $a_i = b_i$ for $i=1, 2, \dots, n$.
\end{definition}
\begin{definition}
\hfill\\
Vectors in $\F^n$ may be written as \textbf{column vectors}
\[\begin{pmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{pmatrix}\]
rather than as \textbf{row vectors} $(a_1, a_2, \dots, a_n)$. Since a 1-tuple whose only entry is from $\F$ can be regarded as an element of $\F$, we usually write $\F$ rather than $\F^1$ for the vector space of 1-tuples with entry from $\F$.
\end{definition}
\begin{definition}
\hfill\\
An $m \times n$ \textbf{matrix} with entries from a field $\F$ is a rectangular array of the form
\[\begin{pmatrix}
a_{11} & a_{12} & \dots &a_{1n} \\
a_{21} & a_{22} & \dots & a_{2n} \\
\vdots & \vdots & & \vdots \\
a_{m1} & a_{m2} & \dots & a_{mn}
\end{pmatrix},\]
where each entry $a_{ij}\ (1 \leq i \leq m,\ 1 \leq j \leq n)$ is an element of $\F$. We call the entries $a_{ij}$ with $i=j$ the \textbf{diagonal entries} of the matrix. The entries $a_{i1}, a_{i2}, \dots, a_{in}$ compose the \textbf{\textit{i}th row} of the matrix, and the entries $a_{1j}, a_{2j}, \dots, a_{mj}$ compose the \textbf{\textit{j}th column} of the matrix. The rows of the preceding matrix are regarded as vectors in $\F^n$, and the columns are regarded as vectors in $\F^m$. The $m \times n$ matrix in which each entry equals zero is called the \textbf{zero matrix} and is denoted by $O$.\\
In this book, we denote matrices by capital italic letters (e.g. $A$, $B$, and $C$), and we denote the entry of a matrix $A$ that lies in row $i$ and column $j$ by $A_{ij}$. In addition, if the number of rows and columns of a matrix are equal, the matrix is called \textbf{square}.
Two $m \times n$ matrices $A$ and $B$ are called \textbf{equal} if all their corresponding entries are equal, that is, if $A_{ij} = B_{ij}$ for $1 \leq i \leq m$ and $1 \leq j \leq n$.
\end{definition}
\begin{definition}
\hfill\\
The set of all $m \times n$ matrices with entries from a field $\F$ is a vector space which we denote by $M_{m \times n}(\F)$, with the following operations of \textbf{matrix addition} and \textbf{scalar multiplication}: For $A, B \in M_{m \times n}(\F)$ and $c \in \F$,
\[(A + B)_{ij} = A_{ij} + B_{ij}\ \ \ \text{and}\ \ \ (cA)_{ij} = cA_{ij}\]
for $1 \leq i \leq m$ and $1 \leq j \leq n$.
\end{definition}
\begin{definition}
\hfill\\
Let $S$ be any nonempty set and $\F$ be any field, and let $\mathcal{F}(S, \F)$ denote the set of all functions from $S$ to $\F$. Two functions $f$ and $g$ in $\mathcal{F}(S, \F)$ are called \textbf{equal} if $f(s) = g(s)$ for each $s \in S$. The set $\mathcal{F}(S, \F)$ is a vector space with the operations of addition and scalar multiplication defined for $f,g \in \mathcal{F}(S, \F)$ and $c \in \F$ defined by
\[(f + g)(s) = f(s) + g(s)\ \ \ \text{and}\ \ \ (cf)(s) = c[f(s)]\]
for each $s \in S$. Note that these are the familiar operations of addition and scalar multiplication for functions used in algebra and calculus.
\end{definition}
\begin{definition}
\hfill\\
A \textbf{polynomial} with coefficients from a field $\F$ is an expression of the form
\[f(x)=a_nx^n + a_{n-1}x^{n-1}+\dots+a_1x+a_0,\]
where $n$ is a nonnegative integer and each $a_k$, called the \textbf{coefficient} of $x^k$, is in $\F$. If $f(x)=0$, that is, if $a_n = a_{n-1} = \dots = a_0 = 0$, then $f(x)$ is called the \textbf{zero polynomial} and, for convenience, its degree is defined to be $-1$; otherwise, the \textbf{degree} of a polynomial is defined to be the largest exponent of $x$ that appears in the representation
\[f(x)=a_nx^n + a_{n-1}x^{n-1}+\dots+a_1x+a_0\]
with a nonzero coefficient. Note that the polynomials of degree zero may be written in the form $f(x) = c$ for some nonzero scalar $c$. Two polynomials,
\[f(x)=a_nx^n + a_{n-1}x^{n-1}+\dots+a_1x+a_0\]
and
\[g(x)=b_mx^m + b_{m-1}x^{m-1}+\dots+b_1x+b_0,\]
are called \textbf{equal} if $m=n$ and $a_i = b_i$ for $i=1, 2, \dots, n$.
\end{definition}
\begin{definition}
\hfill\\
Let $\F$ be any field. A \textbf{sequence} in $\F$ is a function $\sigma$ from the positive integers into $\F$. In this book, the sequence $\sigma$ such that $\sigma(n) = a_n$ for $n=1, 2, \dots$ is denoted $\{a_n\}$. Let $V$ consist of all sequences $\{a_n\}$ in $\F$ that have only a finite number of nonzero terms $a_n$. If $\{a_n\}$ and $\{b_n\}$ are in $V$ and $t \in \F$, define
\[\{a_n\} + \{b_n\} = \{a_n + b_n\}\ \ \ \text{and}\ \ \ t\{a_n\} = \{ta_n\}\]
\end{definition}
\begin{theorem}[\textbf{Cancellation Law for Vector Addition}]
\hfill\\
If $x, y$ and $z$ are vectors in a vector space $V$ such that $x + z = y + z$, then $x = y$.
\end{theorem}
\begin{corollary}
\hfill\\
The vector $0$ described in (VS 3) is unique.
\end{corollary}
\begin{corollary}
\hfill\\
The vector $y$ described in (VS 4) is unique.
\end{corollary}
\begin{definition}
\hfill\\
The vector $0$ in (VS 3) is called the \textbf{zero vector} of $V$, and the vector $y$ in (VS 4) (that is, the unique vector such that $x + y = 0$) is called the \textbf{additive inverse} of $x$ and is denoted by $-x$.
\end{definition}
\begin{theorem}
\hfill\\
In any vector space $V$, the following statements are true:
\begin{enumerate}
\item $0x = 0$ for each $x \in V$.
\item $(-a)x = -(ax) = a(-x)$ for each $a \in \F$ and each $x \in V$.
\item $a0 = 0$ for each $a \in \F$.
\end{enumerate}
\end{theorem}
\begin{definition}
\hfill\\
Let $V=\{0\}$ consist of a single vector $0$ and define $0 + 0 = 0$ and $c0 = 0$ for each scalar $c \in \F$. Then $V$ is called the \textbf{zero vector space}.
\end{definition}
\begin{definition}
\hfill\\
A real-valued function $f$ defined on the real line is called an \textbf{even function} if $f(-t) = f(t)$ for each real number $t$, and is called an \textbf{odd function} if $f(-t) = -f(t)$ for each real number $t$.
\end{definition}
\begin{definition}
\hfill\\
If $S_1$ and $S_2$ are nonempty subsets of a vector space $V$, then the \textbf{sum} of $S_1$ and $S_2$, denoted $S_1 + S_2$, is the set $\{x + y\ |\ x \in S_1,\ \text{and}\ y \in S_2\}$.
\end{definition}
\begin{definition}
\hfill\\
A vector space $V$ is called the \textbf{direct sum} of $W_1$ and $W_2$ if $W_1$ and $W_2$ are subspaces of $V$ such that $W_1 \cap W_2 = \{0\}$ and $W_1 + W_2 = V$. We denote that $V$ is the direct sum of $W_1$ and $W_2$ by writing $V = W_1 \oplus W_2$.
\end{definition}
\begin{definition}
\hfill\\
A matrix $M$ is called \textbf{skew-symmetric} if $M^t = -M$.
\end{definition}
\begin{definition}
\hfill\\
Let $W$ be a subspace of a vector space $V$ over a field $\F$. For any $v \in V$, the set $\{v\} + W = \{v + w\ |\ w \in W\}$ is called the \textbf{coset of $W$ containing $v$}. It is customary to denote this coset by $v + W$ rather than $\{v\} + W$.
\end{definition}
\begin{definition}
\hfill\\
Let $W$ be a subspace of a vector space $V$ over a field $\F$, and let $S := \{v + W\ |\ v \in V\}$ be the set of all cosets of $W$. Then $S$ is called the \textbf{quotient space of $V$ modulo $W$}, and is denoted by $V/W$. Addition and scalar multiplication by the scalars of $\F$ can be defined as follows:
\[(v_1 + W) + (v_2 + W) = (v_1 + v_2) + W\]
for all $v_1, v_2 \in V$, and
\[a(v + W) = av + W\]
for all $v \in V$ and $a \in \F$.
\end{definition}