\documentclass[10pt,a4paper,twoside,draft]{article}
\usepackage{url}
\input{../../math/abbreviations}
\input{../../math/format}
\theoremstyle{definition}
\newtheorem{exercise}{Exercise}
\newcommand{\news}[1][]{\subsection{#1}}
\newcommand{\pref}[1]{(\ref{#1})}
\renewcommand{\theequation}{\roman{equation}}
\pagestyle{myheadings}
\markboth{David Pierce, \today}{\today, Analysis II notes}
%\addtolength{\voffset}{-1.5cm}
%\addtolength{\textheight}{3cm}
\title{Analysis II notes}
\author{David Pierce}
\date{\today}
\begin{document}
\pagenumbering{roman}
%\renewcommand{\thepage}{\roman{page}}
%\renewcommand{\labelpage}{(\thepage)}
\maketitle
%\thispagestyle{empty}
These notes are for Math 272 at METU. I intend to edit them and add
to them from time to time; the latest version is in the course
directory,
\url{}. The
catalog description of the
course is:
\begin{quote}
Riemann--Stieltjes Integral. Infinite series and
products. Sequences of functions. Inverse Function Theorem.
Multiple Integrals.
\end{quote}
These topics are covered in \cite[chs 7, 8, 9, 12, 13 and
14]{MR49:9123}. This book will be my main
reference, although \cite{MR52:14179} may also be useful. I shall
also cover functions of bounded variation
(in \cite[ch.~6]{MR49:9123}), a topic left over from Math
271. My proof of Theorem \ref{thm:sf:diff} below is based on that
of \cite[ch.~1, \S~1, Proposition~6]{MR87h:30001}; my
\S\S~\ref{sect:diff} and \ref{sect:int} are influenced by
\cite[chs~2 and 3]{MR35:309}.
I prepare
the notes, first of all, for my own use.
They are only an outline of what is to be discussed
in class. In particular, for the student, reading
these notes is probably not an adequate substitute for coming to
class. Your own notes, taken properly in class, will be richer and
more complete than these notes.
The parts of these notes labelled
`proof' are generally only sketches of proofs. I leave it to the
reader both:
\begin{itemize}
\item
to recognize where details are missing, and
\item
to supply those details.
\end{itemize}
I might give the details in class, especially if I am asked to. I
myself might ask for the details on an exam.
Some proofs are omitted entirely and are thus left to be given in
class or to be done as exercises. These proofs too
might be asked for on an exam.
I do intend to write my proofs (or proof-sketches) in complete
sentences, with the
usual sorts of punctuation (commas, semicolons, full stops/periods).
Like any other writing (in English and Turkish and many other
languages), the proofs are to be read left to right, top to bottom.
Students should follow this example in writing their own complete
proofs.
Every time a new class of functions (for example) is introduced, one
should ask: What are some examples of functions that belong to this
class? Which functions do \emph{not} belong to this class?
Likewise, when a theorem is introduced, one should ask: What sorts of
functions does the theorem apply to? What does the theorem \emph{not}
tell us? For example, if the theorem is an implication (an if-then
statement), then can we prove the converse, or is there a
counter-example?
Examples and counter-examples could be requested on
an exam.
In studying the \emph{proof} of a theorem, one should ask: What
previous lemmas and theorems does the proof rely on? Is the proof in
the style of earlier proofs; does it use familiar techniques; or does
it introduce a new approach? Will a similar proof work to prove
something else? Is there an alternative proof?
\bibliographystyle{plain}
\bibliography{../../math/references}
\tableofcontents
\newpage
\pagenumbering{arabic}
%\setcounter{page}{1}
%\renewcommand{\thepage}{\arabic{page}}
%\renewcommand{\labelpage}{\thepage}
\section{Variation}
Here are some conventions, definitions and notations to be used
throughout this section:
Let $I$ be a compact interval of $\R$; so $I$ is $[a,b]$ for some
numbers $a$ and $b$.
Let $f$ be a real-valued function on $I$. A \defn{partition} of $I$
is a subset that:
\begin{itemize}
\item
is finite, and
\item
contains the endpoints $a$ and $b$.
\end{itemize}
Let $P$ be a partition of $I$. We may also write $P$ as
\begin{equation*}
\{x_0,x_1,\dots,x_n\}
\end{equation*}
for some positive integer $n$, where
\begin{equation*}
a=x_00$.
It is enough to find positive numbers $\delta_{\ell}$ and
$\delta_{\mathrm r}$ such that, for all $x$ in $[a,b]$,
we have the two implications
\begin{gather*}
c-\delta_{\ell}0$. By Lemma \ref{lem:rs:order}, we can let $P$ be
a partition
of $I$ such that $\ls Pfg$ is within $\epsilon$
of $\underline{\int_a^b}f\dee g$, \emph{and} $\us Pfg$ is within
$\epsilon$ of $\overline{\int_a^b}f\dee g$.
But if $P=\{x_0,\dots,x_n\}$, for any $t_k$ in $[x_{k-1},x_k]$, we have
\begin{equation*}
\ls Pfg\leq\sum_{i=1}^n f(t_k)\chng g_k\leq\us Pfg;
\end{equation*}
if condition \pref{cond:rs:ul} holds, then by Lemma
\ref{lem:rs:more-order}, the sum $\sum_{i=1}^n
f(t_k)\chng g_k$ must be within $\epsilon$ of the common value of
$\underline{\int_a^b}f\dee g$ and $\overline{\int_a^b}f\dee g$. Hence
this common value is $\int_a^bf\dee g$, and condition
\pref{cond:rs:fing} holds.
Finally, suppose \pref{cond:rs:fing} holds. We can choose $P$ so that
\begin{equation*}
\abs{\sum_{i=1}^n f(t_k)\chng g_k-\int_a^bf\dee g}<\frac\epsilon 4
\end{equation*}
for all $t_k$ in $[x_{k-1},x_k]$. But also, since $g$ is increasing,
we can choose
the $t_k$ so that, in addition,
\begin{equation*}
\abs{\sum_{i=1}^n f(t_k)\chng g_k-\ls Pfg} <\frac{\epsilon}4.
\end{equation*}
Then $\abs{\ls Pfg-\int_a^b f\dee g}<\epsilon/2$. Likewise,
$\abs{\us Pfg-\int_a^b f\dee g}<\epsilon/2$. So condition
\pref{cond:rs:rc} holds.
\end{proof}
\begin{theorem}
Assume $g$ is increasing. If $f_0,f_1\in R(g)$, and $f_0\leq f_1$
(that is, $f_0(x)\leq f_1(x)$ for all $x$ in $I$), then
\begin{equation*}
\int_a^b f_0\dee g\leq \int_a^b f_1\dee g.
\end{equation*}
\end{theorem}
\begin{lemma}
Assume $g$ is increasing. If $f\in R(g)$, then $\abs f\in R(g)$ and
\begin{equation*}
\abs{\int_a^bf\dee g}\leq\int_a^b\abs f\dee g.
\end{equation*}
\end{lemma}
\begin{proof}
Since
$\big\lvert\abs{f(x)}-\abs{f(y)}\big\rvert\leq\abs{f(x)-f(y)}$, we
have
\begin{equation*}
\mx k{\abs f}-\mn k{\abs f}\leq \mx kf -\mn kf,
\end{equation*}
hence
$\us P{\abs f}g-\ls P{\abs f}g\leq \us Pfg-\ls Pfg$. Now use Lemma
\ref{lem:rs:rc}.
\end{proof}
\begin{lemma}
Assume $g$ is increasing. If $f\in R(g)$, then $f^2\in R(g)$.
\end{lemma}
\begin{proof}
Suppose $M$ bounds $\abs f$. Then
\begin{multline*}
\mx k{f^2}-\mn k{f^2}=\\
(\mx k{\abs f}+\mn k{\abs f})
(\mx k{\abs f}-\mn k{\abs f})\leq\\
2M(\mx k{\abs f}-\mn k{\abs f}).
\end{multline*}
Now use Lemma \ref{lem:rs:rc}.
\end{proof}
If $g$ is merely of bounded variation, then $g$ is the difference
$g_0-g_1$ of increasing functions, by Theorem \ref{thm:bv:diff}, and
$R(g_0)\cap R(g_1)\included R(g)$, by Theorem \ref{thm:rs:vector}.
But what about the reverse inclusion?
\begin{lemma}
Assume $g$ is of bounded variation. Let $V$ be the function
$x\mapsto V_g[a,x]$. If $f$ is bounded, and $f\in R(g)$, then $f\in
R(V)$.
\end{lemma}
\begin{proof}[Proof (not given in class)]
Since $V$ is increasing, we can use Lemma
\ref{lem:rs:rc}. We have
\begin{multline*}
\us PfV-\ls PfV= \sum_{k=1}^n(\mx kf-\mn kf)\chng V_k =\\
\sum_{k=1}^n(\mx kf-\mn kf)(\chng V_k-\abs{\chng g_k})+
\sum_{k=1}^n(\mx kf-\mn kf)\abs{\chng g_k},
\end{multline*}
and we can make each of the last summations less than $\epsilon/2$.
Indeed, if $\abs f\leq M$, then
\begin{equation*}
\sum_{k=1}^n(\mx kf-\mn kf)(\chng V_k-\abs{\chng g_k})\leq
2M(V(b)-\sum_{k=1}^n\abs{\chng g_k}),
\end{equation*}
which is less than $\epsilon/2$ if $P$ is fine enough. We can also
choose $t_k$ and $t_k'$ in $[x_{k-1},x_k]$ so that
\begin{equation*}
\mx kf-\mn kf<\abs{f(t_k)-f(t_k')}+\frac{\epsilon}{4V(b)}.
\end{equation*}
We may assume also that $f(t_k)-f(t_k')$ has the same sign as $\chng
g_k$. Then
\begin{equation*}
\sum_{k=1}^n(\mx kf-\mn kf)\abs{\chng g_k}<
\sum_{k=1}^n(f(t_k)-f(t_k')){\chng g_k}+ \frac{\epsilon}4.
\end{equation*}
If $P$ is fine enough, then $\sum_{k=1}^n(f(t_k)-f(t_k')){\chng
g_k}<\epsilon/4$.
\end{proof}
\begin{theorem}
Assume $g$ is of bounded variation. Then there are increasing
functions $g_0$ and $g_1$ such that $g=g_0-g_1$ and every bounded
function in $R(g)$ is also in $R(g)=R(g_0)\cap R(g_1)$.
\end{theorem}
\begin{theorem}\label{thm:rs:cont}
If $g$ is of bounded variation, then $R(g)$ contains all continuous
functions.
\end{theorem}
\begin{proof}
It is enough to prove the theorem in case $g$ is increasing and
$g(a)0$. Then there is a positive
$\delta$ such that for all $x$ and $y$ (in $I$),
\begin{equation*}
\abs{x-y}<\delta\implies \abs{f(x)-f(y)}<\frac{\epsilon}{g(b)-b(a)}.
\end{equation*}
Let $P$ be a partition fine enough that $\chng x_k<\delta$ in each
case. Since $f$ is continuous, we have
\begin{equation*}
\mx kf-\mn kf=\abs{f(t_k)-f(t_k')}< \frac{\epsilon}{g(b)-b(a)}
\end{equation*}
for some $t_k$ and $t_k'$ in $[x_{k-1},x_k]$, for each $k$; this means
\begin{equation*}
\us Pfg-\ls Pfg<\sum_{k=1}^n \frac{\epsilon}{g(b)-g(a)}\chng
g_k=\epsilon.
\end{equation*}
Thus, Riemann's condition is met.
\end{proof}
\begin{corollary}
All functions of bounded variation are Riemann-integrable.
\end{corollary}
\begin{lemma}[Mean-Value Theorem for Integrals]
Suppose $f$ is continuous, and $g$ is increasing. Then
\begin{equation*}
\int_a^bf\dee g=f(t)(g(b)-g(a))=f(t)\int_a^b\dee g
\end{equation*}
for some $t$ in $(a,b)$.
\end{lemma}
\begin{proof}
The second equation is clear from the definition of the
Riemann--Stieltjes integral, or from Theorem \ref{thm:rs:parts}.
For the first equation, work with the partition $\{a,b\}$ of $I$.
We have
\begin{equation*}
\mn 1f(g(b)-g(a))\leq\int_a^bf\dee g\leq\mx 1f(g(b)-g(a)),
\end{equation*}
so $(\int_a^bf\dee g)/(g(b)-g(a))$ is between $\mn 1f$ and $\mx 1f$. Now
use the interme\-diate-value theorem for continuous functions.
\end{proof}
\begin{theorem}[First Fundamental, of Calculus]
If $g$ is increasing, then the function
\begin{equation*}
x\mapsto\int_a^xf\dee g
\end{equation*}
is well-defined on $I$, and is differentiable, with derivative
$f(u)g'(u)$, at every point $u$ of
$I$ where $f$ is continuous and $g$ is differentiable.
\end{theorem}
\begin{proof}
If $u$ is such a point, and $u+h\in I$, then
\begin{equation*}
\frac{\int_a^{u+h}f\dee g-\int_a^uf\dee g}h=
\frac{\int_u^{u+h}f\dee g}h= f(t)\frac{g(u+h)-g(u)}h
\end{equation*}
for some $t$ between $u$ and $u+h$ inclusive.
\end{proof}
\begin{theorem}[Second Fundamental, of Calculus]\label{thm:rs:2}
For any function $g$ that is continuous on $I$ and differentiable on
$(a,b)$,
\begin{equation*}
\int_a^b g'=g'(b)-g'(a),
\end{equation*}
provided the integral exists.
\end{theorem}
\begin{proof}
For any partition $\{x_0,\dots,x_n\}$ of $I$, we can pick $t_k$ in
$(x_{k-1},x_k)$ such that $g'(t_k)\chng x_k=\chng g'_k$, whence the
Riemann sum $\sum_{k=1}^ng'(t_k)\chng x_k$ is just $g'(b)-g'(a)$.
\end{proof}
\section{Infinite series and products}
An infinite sequence is a function with domain $\{n\in\Z:k\leq n\}$
for some $k$ in $\Z$. Usually the domain is $\N$ (the set
$\{0,1,2,\dots\}$ of natural numbers) or $\Zp$ (the set
$\{1,2,3,\dots\}$ of positive integers).
Informally, a sequence $(a_n)$ has a real number $b$ as a limit,
provided that $a_n$ is close to $b$ whenever $n$ is
sufficiently large. We can understand `sufficiently large' to mean
`close to $\infty$'. Then we can allow the limit $b$ to be $\infty$
or $-\infty$ as well.
To be more precise: Recall that a neighborhood of a real number $x$
is a set of real numbers that has, as a subset, an open interval that
contains $x$. We can define a \defn{neighborhood of $\infty$} to be a
set of real numbers that has, as a subset, an interval of the form
$(a,\infty)$. Similarly, neighborhoods of $-\infty$ have subsets
$(\infty,a)$.
Now we can say that the sequence $(a_n)$ of real numbers has the
\defn{limit} $b$ (where $b\in\{-\infty\}\cup\R\cup\{\infty\}$) if, for
every neighborhood $U$ of $b$, there is a neighborhood $U'$ of
$\infty$ such that, for all $n$ in the domain of the sequence,
\begin{equation*}
n\in U'\implies b_n\in U.
\end{equation*}
In this case, we write
\begin{equation*}
\lim_{n\to\infty}a_n=b.
\end{equation*}
If $b$ is \defn{finite} (that is, $b\in\R$), then the sequence is said
to \defn{converge to $b$}. If $b$ is infinite, then the sequence
\defn{diverges to $b$}. If the sequence has no limit, then the
sequence simply \defn{diverges}.
The \defn{extended real number system}, $\R^*$, consists of the
elements of the set
\begin{equation*}
\{-\infty\}\cup\R\cup\{\infty\}.
\end{equation*}
This can be ordered in the obvious way, so that $-\inftyM\implies\abs{\sum_{m=n}^{n+k}a_m}<\epsilon.
\end{equation*}
\end{lemma}
\begin{theorem}
If $\sum a_n$ converges, then $\lim_{n\to\infty}a_n=0$.
\end{theorem}
\begin{proof}
Use the Cauchy condition when $k=1$.
\end{proof}
It is clear that, if $\sum a_n$ converges, then so does
$\sum(a_{2n}+a_{2n+1})$.
\begin{lemma}\label{lem:s:brackets}
If $\sum(a_{2n}+a_{2n+1})$ converges, and $\lim_{n\to\infty}a_n=0$,
then $\sum a_n$ converges.
\end{lemma}
\begin{proof}
Let $\sum_{k=p}^{\infty}(a_{2k}+a_{2k+1})=b$. Let $M$ be such that
\begin{equation*}
n>M\implies \abs{\sum_{k=p}^{p+n}(a_{2k}+a_{2k+1})-b},
\abs{a_{2(p+n)+1}}<\frac{\epsilon}2.
\end{equation*}
Then also $n>M\implies\abs{\sum_{k=p}^{2(p+n)}a_k-b},
\abs{\sum_{k=p}^{2(p+n)+1}a_k-b}<\epsilon$.
\end{proof}
A series $\sum a_n$ is \defn{alternating} if $(-1)^na_n\geq0$ for each
$n$, or $(-1)^na_n\leq0$ for each
$n$.
\begin{theorem}
An alternating series $\sum a_n$ converges if $\lim_{n\to\infty}a_n=0$.
\end{theorem}
\begin{proof}
Assume $(-1)^na_n\geq0$ in each case. Then $a_{2n}-a_{2n+1}\geq0$,
and
\begin{equation*}
\sum_{k=0}^n(a_{2k}-a_{2k+1})=a_0-
\sum_{k=0}^{n-1}(a_{2k+1}-a_{2k+2})-a_{2n+1},
\end{equation*}
so partial sums of $\sum (a_{2n}-a_{2n+1})$ are bounded. Therefore
the latter series converges. Now use Lemma \ref{lem:s:brackets}.
\end{proof}
\begin{example}
$\sum(-1)^n/n$ converges.
\end{example}
\begin{theorem}
If $\abs x<1$, then
\begin{equation*}
\sum_{k=0}^{\infty}x^k=\frac 1{1-x}.
\end{equation*}
\end{theorem}
\begin{proof}
$\sum_{k=0}^n x^k=(1-x^{n+1})/(1-x)$. If $\abs x<1$, then the
partial sums converge to $1/(1-x)$.
\end{proof}
\begin{lemma}[Comparison Test]
If $0\leq a_n\leq cb_n$ when $n$ is large enough, for some non-zero
$c$, and $\sum b_n$ converges, then $\sum a_n$ converges.
\end{lemma}
\begin{theorem}[Limit Comparison Test]
If $\lim_{n\to\infty}a_n/b_n$ is finite and non-zero, then
\begin{equation*}
\sum a_n\text{ converges }\Iff\sum b_n\text{ converges.}
\end{equation*}
\end{theorem}
\begin{theorem}[Integral Test]
If $f$ is monotone on $[0,\infty)$, then
\begin{equation*}
\sum f(n)\text{ converges }\Iff \lim_{x\to\infty}\int_0^xf\text{
is finite.}
\end{equation*}
\end{theorem}
\begin{proof}
Assume $f$ is decreasing. If $f(x)<0$ for some $x$, then each member
of the equivalence fails. Suppose $f(x)\geq0$ for all $x$. Since
$f(k+1)\leq f(x)\leq f(k)$ when $k\leq x\leq k+1$, we have
\begin{equation*}
\sum_{k=1}^{n}f(k)\leq\int_0^n f\leq\sum_{k=0}^{n-1}f(k).
\end{equation*}
If $\int_0^{\infty}f$ exists, then so does $\sum_{k=1}^{\infty}a_k$; if
not, not.
\end{proof}
\begin{examples}\label{examp:s:int}
We have
\begin{equation*}
\int_1^x\frac{\dee t}{t^s}=
\begin{cases}
\log x,&\text{ if }s=1;\\
\displaystyle\frac{1}{s-1}\left(1-\displaystyle\frac
1{x^{s-1}}\right),&\text{ if }s\neq 1.
\end{cases}
\end{equation*}
Hence $\sum 1/n^s$ converges if and only if $s>1$.
\end{examples}
A series $\sum a_n$ is \defn{absolutely convergent} if $\sum\abs{a_n}$
converges. Absolute convergence implies convergence, by the Cauchy
criterion.
\begin{theorem}[Ratio Test]
Let
\begin{equation*}
r=\liminf_{n\to\infty}\abs{\frac{a_{n+1}}{a_n}}\quad\text{ and }\quad
R=\limsup_{n\to\infty}\abs{\frac{a_{n+1}}{a_n}}.
\end{equation*}
Then
\begin{itemize}
\item
$R<1\implies \sum a_n$ converges absolutely;
\item
$11,
\end{equation*}
which means $a_n$ cannot converge to $0$.
Finally,
$\lim_{n\to\infty}\displaystyle\frac{1/(n+1)}{1/n}=1
=\displaystyle\frac{1/(n+1)^2}{1/n^2}$.
\end{proof}
\begin{theorem}[Root Test]
Let $\rho=\limsup_{n\to\infty}\sqrt[n]{\abs{a_n}}$. Then
\begin{itemize}
\item
$\rho<1\implies\sum a_n$ converges absolutely;
\item
$1<\rho\implies\sum a_n$ diverges.
\end{itemize}
In case $\rho=1$, no conclusion is possible.
\end{theorem}
\begin{proof}
If $\rho0$. Let $m$ be large enough that
$\abs{\sum_{k=m+1}^{\infty}{a_k}}\leq
\sum_{k=m+1}^{\infty}\abs{a_k}\leq\epsilon/2$. Then $n$ can be large
enough that
\begin{equation*}
\{0,1,\dots,m\}\included\{f(0),f(1),\dots,f(n)\}.
\end{equation*}
Then $\abs{\sum_{k=0}^na_{f(k)}-\sum_{k=0}^{m}a_k}<\epsilon/2$, so
\begin{math}
\abs{\sum_{k=0}^na_{f(k)}-\sum_{k=0}^{\infty}a_k}<\epsilon
\end{math}.
\end{proof}
\begin{theorem}
If $\sum a_n$ is convergent, but not absolutely convergent, and
$-\infty\leq b\leq c\leq\infty$, then there is a bijection
$f:\N\to\N$ such that
\begin{equation*}
\liminf_{n\to\infty}\sum_{k=0}^na_{f(k)}=b\quad\text{ and }\quad
\limsup_{n\to\infty}\sum_{k=0}^na_{f(k)}=c.
\end{equation*}
\end{theorem}
\begin{proof}
For any $x$ in $\R$, we have
\begin{equation*}
x=\max(0,x)+\min(0,x)
\quad\text{ and }\quad
\abs{x}= \max(0,x)-\min(0,x).
\end{equation*}
Hence (by Theorem \ref{thm:s:linear}), if
$\sum a_n$ and one of $\sum\max(0,a_n)$ and $\sum\min(0,a_n)$
converge, then $\sum a_n$ converges absolutely.
Suppose $\sum a_n$ converges, but not absolutely, and $-\infty<
b\leq c<\infty$. Then
both $\sum\max(0,a_n)$ and $\sum\min(0,a_n)$ diverge, to $\infty$ and
$-\infty$ respectively. So it is possible to find a strictly increasing
sequence $(g(n):n\in\N)$ and a bijection $f:\N\to\N$ such that:
\begin{itemize}
\item
$g(0)=0$;
\item
$f$ is increasing on $\{k\in\N:(\exists n\in\N)\;g(2n)\leq
kM\implies\abs{a_{p\,q}-b}<\epsilon.
\end{equation*}
\begin{theorem}
Suppose
\begin{equation*}
\lim_{(p,q)\to\infty}a_{p\,q}=b.
\end{equation*}
If
$\lim_{q\to\infty}a_{p\,q}$ exists for each $p$ in
$\N$, then
\begin{equation*}
\lim_{p\to\infty}\lim_{q\to\infty}a_{p\,q}=b.
\end{equation*}
\end{theorem}
\begin{proof}
Let $N$ be such that
\begin{equation*}
p,q>N\implies\abs{a_{p\,q}-b}<\frac{\epsilon}2.
\end{equation*}
Suppose $p>N$ and $\lim_{q\to\infty}a_{p\,q}=c_p$. There is $M$ such
that
\begin{equation*}
q>M\implies\abs{a_{p\,q}-c_p}<\frac{\epsilon}2.
\end{equation*}
Hence, if $q>\max(M,N)$, then $\abs{c_p-b}\leq\abs{c_p-a_{p\,q}}+
\abs{a_{p\,q}-b}<\epsilon$.
\end{proof}
\begin{example}
We have
\begin{equation*}
\lim_{p\to\infty}\lim_{q\to\infty}\frac{pq}{p^2+q^2}=
\lim_{p\to\infty}0=0;
\end{equation*}
but $pq/(p^2+q^2)=1/2$ if $p=q$; so the double
sequence
\begin{equation*}
(p,q)\mapsto \frac {pq}{p^2+q^2}
\end{equation*}
has no limit.
\end{example}
Suppose $(a_n:n\in\N)$ is a sequence of real numbers.
We shall say that $\prod a_n$ \defn{converges} if, for some $p$ in
$\N$, the sequence of products $\prod_{k=p}^na_k$ converges to a
\emph{finite} and \emph{non-zero} limit $b$. In this case, we write
\begin{equation*}
\prod_{k=p}^{\infty}a_n=b\quad\text{ and }\quad
\prod_{k=0}^{\infty}a_k= b\cdot\prod_{k=0}^{p-1}a_k.
\end{equation*}
(If $p=0$, then $\prod_{k=0}^{p-1}a_k=1$.)
The definition has two immediate consequences:
\begin{itemize}
\item
If $\prod a_n$
converges, then $a_n\neq0$ for all but finitely many $n$.
\item
$\prod a_n$ converges if and only if $\prod 1/a_n$ converges.
\end{itemize}
\begin{examples}
$\prod_{k=1}^n(1+ 1/k)=\prod_{k=1}^n((k+ 1)/k)=n+1$, and
\begin{equation*}
\prod_{k=2}^{n+1}\left(1-\frac 1k\right)= \prod_{k=2}^{n+1}\frac{k-1}k=
\frac 1{n+1};
\end{equation*}
so
$\prod(1\pm 1/n)$ diverges in each case.
\end{examples}
\begin{theorem}[Cauchy condition for products]
The product $\prod a_n$ converges if and only if, for every positive
$\epsilon$, there is $M$ such that, for all $n$ and $k$ in $\N$,
\begin{equation*}
n>M\implies\abs{\prod_{\ell=n}^{n+k}a_{\ell}-1}<\epsilon.
\end{equation*}
\end{theorem}
\begin{proof}
Suppose $\prod a_n$ converges. Then for some $p$ there is a
positive $\delta$ such that
\begin{equation*}
\abs{\prod_{\ell=p}^{p+n}a_{\ell}}>\delta
\end{equation*}
for all $n$ in $\N$. Also, there is $M$ such that
\begin{equation*}
n>M\implies\abs{\prod_{\ell=p}^{p+n+k+1}a_{\ell}-
\prod_{\ell=p}^{p+n}a_{\ell}}<\epsilon\delta
\end{equation*}
for all $k$ in $\N$. Division yields
\begin{equation*}
n>M\implies\abs{\prod_{\ell=p+n+1}^{p+n+k+1}a_{\ell}-
1}<\epsilon.
\end{equation*}
Suppose conversely that for all positive $\epsilon$ there is $M$ such
that
\begin{equation*}
n>M\implies\abs{\prod_{\ell=n}^{n+k}a_{\ell}-
1}<\epsilon.
\end{equation*}
for all $k$ in $\N$. Then, in particular, there is $p$ such that
\begin{equation*}
\frac12<\prod_{\ell=p}^{p+k}a_{\ell} <\frac32
\end{equation*}
for all $k$ in $\N$.
Hence if $\lim_{n\to\infty}\prod_{\ell=p}^{p+n}a_{\ell}$ exists, then
it is not zero, so $\prod a_n$ converges. We can show that this limit
exists by the Cauchy criterion. Indeed, we have
\begin{equation*}
\abs{\prod_{\ell=p}^{p+n+k+1}a_{\ell}-\prod_{\ell=p}^{p+n}a_{\ell}}=
\abs{\prod_{\ell=p+n+1}^{p+n+k+1}a_{\ell}- 1} \cdot
\abs{\prod_{\ell=p}^{p+n}a_{\ell}}< \frac 32\epsilon
\end{equation*}
if $n>M$.
\end{proof}
\begin{theorem}
If $(a_n)$ is a sequence of \emph{positive} terms, then
\begin{equation*}
\prod(1+a_n)\text{ converges }\Iff\sum a_n\text{ converges.}
\end{equation*}
\end{theorem}
\begin{proof}
If $1M\implies \abs{f_n(x)-f(x)}<\epsilon.
\end{equation*}
We showed in Math 271 that the uniform limit of continuous functions
is continuous.
Let $B(I)$ be the set of bounded real-valued functions on $I$. This
becomes a metric space when we define
\begin{equation*}
d(f,g)=\sup\{\abs{f(x)-g(x)}:x\in I\}.
\end{equation*}
Then a sequence of functions in $B(I)$ converges uniformly if and
only if it converges in the metric $d$.
\begin{lemma}[Cauchy condition for uniform convergence]
A sequence $(f_n)$ of functions on $I$ converges uniformly if and
only if for all positive $\epsilon$ there is $M$ such that for all
$x$ in $I$, all $n$ in $\Z$ and all $k$ in $\N$,
\begin{equation*}
n>M\implies\abs{f_{n+k}(x)-f_n(x)}<\epsilon.
\end{equation*}
\end{lemma}
\begin{proof}
Suppose $(f_n)$ converges uniformly to $f$. If $\epsilon>0$, let
$M$ be such that
\begin{equation}\label{eqn:sf:Cauchy}
n>M\implies \abs{f_n(x)-f(x)}<\frac{\epsilon}2.
\end{equation}
If $n>M$, then also $\abs{f_{n+k}(x)-f(x)}<\epsilon/2$, so
$\abs{f_{n+k}(x)-f(x)}<\epsilon$.
Conversely, if $(f_n)$ satisfies the Cauchy condition, then each
sequence $(f_n(x))$ of real numbers is Cauchy, so it has a limit, say
$f(x)$. Hence, for all $n$, we have
\begin{equation*}
\lim_{k\to\infty}\abs{f_n(x)-f_{n+k}(x)}=\abs{f_n(x)-f(x)}.
\end{equation*}
If $\epsilon>0$, let $M$ be such that \pref{eqn:sf:Cauchy}
holds. Then
\begin{equation*}
n>M\implies\abs{f_n(x)-f(x)}\leq\frac{\epsilon}2<\epsilon.
\end{equation*}
So $(f_n)$ converges uniformly to $f$.
\end{proof}
A \emph{series} $\sum f_n$ of functions converges uniformly if
the sequence of partial sums $\sum_{k=0}^{n}f_k$ converges uniformly.
\begin{lemma}[Cauchy condition for uniform convergence of series]
A series $\sum f_n$ of functions converges uniformly if and only if,
for all positive $\epsilon$, there is $M$ such that for all $x$ in
$I$, for all $n$ in $\Z$ and all $k$ in $\N$,
\begin{equation*}
n>M\implies\abs{\sum_{\ell=n}^{n+k}f_{\ell}(x)}<\epsilon.
\end{equation*}
\end{lemma}
\begin{theorem}[Weierstra\ss{} $M$-test]
Suppose the sequences $(f_n)$ of functions and $(M_n)$ of numbers are
such that, for each $x$ in $I$,
\begin{equation*}
\abs{f_n(x)}\leq M_n.
\end{equation*}
If $\sum M_n$ converges, then $\sum f_n$ converges uniformly.
\end{theorem}
\begin{proof}
If $\sum M_n$ converges, then it satisfies the Cauchy criterion for
series; so $\sum f_n$ satisfies the Cauchy criterion for series of
functions; so $\sum f_n$ converges uniformly.
\end{proof}
\begin{theorem}
Suppose each $f_n$ is continuous, and $\sum f_n$ converges uniformly to
$f$. Then $f$ is continuous; in particular,
\begin{equation*}
\lim_{y\to x}\sum_{n=0}^{\infty}f_n(y)=
\sum_{n=0}^{\infty}\lim_{y\to x}f_n(y).
\end{equation*}
\end{theorem}
\begin{theorem}[A space-filling curve]
There is a continuous surjective function from $[0,1]$ to
$[0,1]\times[0,1]$.
\end{theorem}
\begin{proof}
Let $\phi$ be a continuous function on $\R$ such that, for all $n$
in $\Z$,
\begin{equation*}
\phi(x)=
\begin{cases}
0,&\text{ if }k-1/6\leq x\leq k+1/6;\\
1,&\text{ if }k+1/3\leq x\leq k+2/3.
\end{cases}
\end{equation*}
Then $\phi$ is periodic, with period $1$; that is,
$\phi(x)=\phi(x-\floor x)$.
For each $e$ in $\{0,1\}$, let $f_e$ be the function given by
\begin{equation*}
f_e(x)=\sum_{k=0}^{\infty}\frac{\phi(3^{2k+e}x)}{2^{k+1}}.
\end{equation*}
The series converge uniformly, by the $M$-test with $M_k=1/2^{k+1}$.
Since $\phi$ is continuous, so are the $f_e$. Our function will be
\begin{equation*}
x\mapsto (f_0(x),f_1(x)).
\end{equation*}
Any point of $[0,1]\times[0,1]$ can be written, in binary notation, as
\begin{equation*}
\left(\sum_{k=1}^{\infty}\frac{a_k}{2^k},
\sum_{k=1}^{\infty}\frac{b_k}{2^k}\right),
\end{equation*}
where $a_k$ and $b_k$ are in $\{0,1\}$. Now define
\begin{equation*}
c_n=
\begin{cases}
a_k,&\text{ if }2k-1=n;\\
b_k,&\text{ if }2k=n;
\end{cases}
\end{equation*}
and let
\begin{equation*}
c=\sum_{n=1}^{\infty}\frac{c_n}{3^n}.
\end{equation*}
Then
\begin{equation*}
3^kc-\floor{3^kc}=
\frac{c_{k+1}}{3}+\sum_{n=1}^{\infty}\frac{c_{n+k+1}}{3^{n+1}},
\end{equation*}
so $\phi(3^kc)=c_{k+1}$.
\end{proof}
\begin{theorem}
Let $g$ be of bounded variation on $[a,b]$. If $f_n\in R(g)$, and
$(f_n)$ converges uniformly to $f$ on $[a,b]$, then $f\in R(g)$, and
$(\int_a^xf_n\dee g)$ converges uniformly to $\int_a^xf\dee g$.
\end{theorem}
\begin{proof}
We may assume $g$ is increasing and $g(b)\neq g(a)$. There is $M$
such that, for all $x$ in $[a,b]$,
\begin{equation*}
n\geq M\implies\abs{f_n(x)-f(x)}<\frac{\epsilon}{3(g(b)-g(a))}.
\end{equation*}
We use this first to prove
$f\in R(g)$. For any partition $P$ of $[a,b]$, we have
\begin{equation*}
\abs{\us P{f-f_M}g},\abs{\ls P{f-f_M}g}<\frac{\epsilon}3.
\end{equation*}
Suppose in particular that $P$ is fine enough that
\begin{equation*}
\us P{f_M}g-\ls P{f_M}g<\frac{\epsilon}3.
\end{equation*}
Because $\sup\{f(x):x\in[a,b]\}\leq\sup\{f(x)-f_M(x):x\in[a,b]\}+
\sup\{f_M(x):x\in[a,b]\}$, and likewise for the infima, we have
\begin{multline*}
\us Pfg-\ls Pfg\leq\\ \us P{f-f_M}g+\us P{f_M}g - (\ls P{f-f_M}g +\ls
P{f_M}g) \leq \epsilon.
\end{multline*}
So Riemann's condition is satisfied, and $f\in R(g)$. We have also
\begin{multline*}
\abs{\int_a^xf_n\dee g-\int_a^xf\dee g}=\abs{\int_a^x(f_n-f)\dee g}
\leq \\
\int_a^x\abs{f_n-f}\dee g\leq \int_a^b\abs{f_n-f}\dee
g\leq\frac{\epsilon}3,
\end{multline*}
so the convergence of the sequence of integrals is uniform.
\end{proof}
Under the conditions of the theorem, we have
\begin{equation*}
\lim_{n\to\infty}\int_a^xf_n\dee
g=\int_a^x(\lim_{n\to\infty}f_n)\dee g.
\end{equation*}
If also $\sum f_n$ converges uniformly, then
\begin{equation*}
\sum_{n=0}^{\infty}\int_a^xf_n\dee
g=\int_a^x(\sum_{n=0}^{\infty}f_n)\dee g.
\end{equation*}
A \defn{power series} is a function
\begin{equation*}
z\mapsto\sum_{n=0}^{\infty}c_n(z-a)^n.
\end{equation*}
At $a$, this function has the value $c_0$, by definition. The
function may, but need not, be well-defined at other points. Nice
results are obtained if the coefficients $c_n$ and the point $a$ (as
well as the argument $z$) are allowed to be \tech{complex numbers}.
\begin{theorem}
The vector-space $\R^2$ becomes a field when equipped with the
multiplication given by
\begin{equation*}
(a,b)(c,d)=(ac-bd,ad+bc).
\end{equation*}
In this field, the multiplicative identity is $(1,0)$, and
\begin{equation*}
(a,b)\inv=\frac1{a^2+b^2}(a,-b).
\end{equation*}
\end{theorem}
The field in the theorem is denoted $\C$; its elements are
\defn{complex numbers}. For the complex numbers $(1,0)$ and $(0,1)$,
we may write
\begin{equation*}
1 \quad\text{ and }\quad i
\end{equation*}
respectively;
then the complex number $(a,b)$---that is, $a(1,0)+b(0,1)$---can be
written $a+bi$. Note that $i^2=-1$.
\begin{theorem}
If $z,w\in\C$, then $\abs{zw}=\abs z\abs w$.
\end{theorem}
If $z\in\C$, then $\abs z$ is called the \defn{absolute value} or
\defn{modulus} of $z$. We have the triangle inequality,
$\abs{z+w}\leq\abs z+\abs w$.
Much of what we have done with sequences of \emph{real} numbers
applies to \emph{complex} numbers as well. In particular,
\begin{itemize}
\item
the \emph{definition} of absolute convergence is meaningful for series
of complex numbers;
\item
the \emph{tests} for absolute convergence work for such series (since
absolute convergence is still convergence of a certain series of real
numbers);
\item
the \emph{proof} that absolute convergence implies convergence is
still valid for such series.
\end{itemize}
Let $\rho=\limsup_{n\to\infty}\sqrt[n]{\abs{c_n}}$. Then
\begin{multline*}
\limsup_{n\to\infty}\sqrt[n]{\abs{c_n(z-a)^n}}=
\limsup_{n\to\infty}\sqrt[n]{\abs{c_n}\abs{(z-a)^n}} = \\
\limsup_{n\to\infty}(\abs{z-a}\sqrt[n]{\abs{c_n}})=\abs{z-a}\rho.
\end{multline*}
If $\rho>0$, then the series $\sum c_n(z-a)^n$:
\begin{itemize}
\item
converges absolutely, if $\abs{z-a}<1/\rho$;
\item
diverges, if $\abs{z-a}>1/\rho$.
\end{itemize}
The number $1/\rho$ (if it exists) is the \defn{radius of convergence}
of the series; the ball $B(a;1/\rho)$ is the \defn{disc of
convergence}. (If $\rho=0$, then the radius of convergence is
$\infty$; if $\rho=\infty$, then the radius of convergence is $0$.)
\begin{theorem}
Every power series is continuous on its disc of convergence.
\end{theorem}
\begin{proof}
Let $R$ be the radius of convergence of $\sum c_n(z-a)^n$, and
suppose $w\in B(a;R)$. Let $\delta=(R-\abs{w-a})/2$, and let
$F=\{z\in\C:\abs{z-w}\leq\delta\}$. Then $F$ is a neighborhood of
$w$, so it is enough to show that the series is continuous on $F$.
For \emph{this}, since $z\mapsto c_n(z-a)^n$ is continous, it is
enough to show that the series converges uniformly on $F$. But $F$
is compact, so some point $b$ of $F$ is furthest from $a$; that is,
if $z\in F$, then
\begin{equation*}
\abs{c_n(z-a)^n}=\abs{c_n}\abs{z-a}^n\leq\abs{c_n}\abs{b-a}^n.
\end{equation*}
Since $\abs{b-a}0$.
\end{lemma}
\begin{proof}
The claim is trivial if $w=0$. Assume $w\neq 0$, and let $t=(z-w)/w$.
Then we have to prove
\begin{equation*}
\abs{(1+t)^n-1}\leq n\abs{t}(1+\abs{t})^{n-1}.
\end{equation*}
If $t$ is a non-negative real number, then the inequality holds, since
then
\begin{equation*}
(1+t)^n-1=n\int_0^t(1+x)^{n-1}\dee x\leq nt(1+t)^{n-1}.
\end{equation*}
For arbitrary $t$ in $\C$, we have
\begin{equation*}
\abs{(1+t)^n-1}=\abs{\sum_{k=1}^n\binom nkt^k}\leq
\sum_{k=1}^n\binom nk\abs t^k=(1+\abs t)^n-1,
\end{equation*}
which, combined with the previous inequality, yields the claim.
\end{proof}
Note that, by interchanging $z$ and $w$, we can write the inequality
of the lemma as
\begin{equation*}
\abs{z^n-w^n}\leq
n\abs{z-w}(\abs{z}+\abs{z-w})^{n-1}.
\end{equation*}
\begin{theorem}\label{thm:sf:diff}
If $\sum_{n=0}^{\infty}c_n(z-a)^n$ has a positive radius of
convergence, then the function is differentiable on its disc of
convergence, and its derivative is $\sum_{n=1}^{\infty}nc_n(z-a)^{n-1}$.
\end{theorem}
\begin{proof}
Write $f(z)$ for $\sum_{n=0}^{\infty}c_n(z-a)^n$. Let $\zeta$ be an
element of the disc of convergence; we shall compute $f'(\zeta)$. For
all elements $w$ of this disc distinct from $\zeta$,
\begin{equation*}
\frac{f(\zeta)-f(w)}{\zeta-w}=
\sum_{n=1}^{\infty}c_n\frac{(\zeta-a)^n-(w-a)^n}{\zeta-w}.
\end{equation*}
We want to make the difference between this and
$\sum_{n=1}^{\infty}nc_n(\zeta-a)^{n-1}$ small. To do this, we break
this difference into two pieces, and make each piece small.
For any positive integer $M$,
\begin{multline*}
\abs{\frac{f(\zeta)-f(w)}{\zeta-w}-
\sum_{n=1}^{\infty}nc_n(\zeta-a)^{n-1}} \leq\\
\sum_{n=1}^M \abs{c_n}\abs{\frac{(\zeta-a)^n-(w-a)^n}{\zeta-w}-
n(\zeta-a)^{n-1}} +\\
\sum_{n=M+1}^{\infty}c_n
\left(\abs{\frac{(\zeta-a)^n-(w-a)^n}{\zeta-w}}+
n\abs{\zeta-a}^{n-1}\right)\leq \\
\sum_{n=1}^M \abs{c_n}\abs{\frac{(\zeta-a)^n-
(w-a)^n}{\zeta-w}-n(\zeta-a)^{n-1}} +\\
\sum_{n=M+1}^{\infty}n\abs{c_n}
\left((\abs{\zeta-a}+\abs{\zeta-w})^{n-1}+\abs{\zeta-a}^{n-1}\right)
\end{multline*}
by Lemma \ref{lem:sf:neq}.
Let $R$ be the radius of convergence of $f$, and let $\delta_0=
(R-\abs{\zeta-a})/2$. Suppose $0<\abs{\zeta-w}<\delta_0$. Then
\begin{multline*}
\abs{\frac{f(\zeta)-f(w)}{\zeta-w}-
\sum_{n=1}^{\infty}nc_n(\zeta-a)^{n-1}} \leq\\
\sum_{n=1}^M \abs{c_n}\abs{\frac{(\zeta-a)^n-
(w-a)^n}{\zeta-w}-n(\zeta-a)^{n-1}} +\\
2 \sum_{n=M+1}^{\infty}nc_n
(\abs{\zeta-a}+\delta_0)^{n-1}.
\end{multline*}
Say $\epsilon>0$.
By Lemma \ref{lem:sf:diff}, we can choose $M$ large enough that
\begin{equation*}
2\sum_{n=M+1}^{\infty}nc_n
(\abs{\zeta-a}+\delta_0)^{n-1}<\epsilon/2.
\end{equation*}
But for each positive $n$, the derivative of $z\mapsto (z-a)^n$ is $z\mapsto
n(z-a)^{n-1}$; hence, for the chosen $M$, we can find $\delta_1$ such
that, if $0<\abs{\zeta-w}<\delta_1$, then
\begin{equation*}
\sum_{n=1}^M
\abs{c_n}\abs{\frac{(\zeta-a)^n-(w-a)^n}{\zeta-w}-n(\zeta-a)^{n-1}}<
\frac{\epsilon}2.
\end{equation*}
Therefore, if $0<\abs{\zeta-w}<\min\{\delta_0,\delta_1\}$, then
\begin{equation*}
\abs{\frac{f(\zeta)-f(w)}{\zeta-w}-\sum_{n=1}^{\infty}nc_n(\zeta-a)^{n-1}}
\leq\epsilon;
\end{equation*}
this proves the claim.
\end{proof}
\begin{corollary}
If $f(z)=\sum_{n=0}^{\infty}c_n(z-a)^n$, and $R$ is the radius of
convergence of the series, then $f$ is infinitely differentiable on
$B(a;R)$, and
\begin{equation*}
c_n=\frac{f^{(n)}(a)}{n!}.
\end{equation*}
\end{corollary}
\begin{proof}
Repeated application of the theorem yields
\begin{equation*}
f^{(k)}(z)=\sum_{n=k}^{\infty}n(n-1)\dots(n-k+1)c_n(z-a)^{n-k}=
\sum_{n=k}^{\infty}\frac{n!}{(n-k)!}c_n(z-a)^{n-k},
\end{equation*}
hence $f^{(k)}(a)=k!c_k$.
\end{proof}
For which functions $f$, defined on a neighborhood of $a$, can we conclude
\begin{equation}\label{eqn:sf:Taylor}
f(z)=\sum_{n=0}^{\infty}\frac{f^{(n)}(a)}{n!}(z-a)^n
\end{equation}
on some neighborhood of $a$? If $a\in \C$, then it is enough for $f$
to be once-differentiable (but we won't prove this). If $a\in\R$,
then it is not even enough to
know that $f$ is infinitely differentiable:
\begin{example}
Let $f(x)=
\begin{cases}
\exp(-x^{-2}),&\text{ if }x\neq0;\\
0,&\text{ if }x=0.
\end{cases}$
Then $f^{(n)}(0)=0$ for all $n$; so Equation \pref{eqn:sf:Taylor}
fails when $a=0$.
\end{example}
We can come up with a condition under which a function $f$ satisfies
Equation~\pref{eqn:sf:Taylor}:
\begin{lemma}[Generalized Mean-Value Theorem]
Let $F$ and $G$ be real-valued functions continuous on $[a,b]$ and
differentiable on $(a,b)$. Then
\begin{equation*}
F'(c)[G(b)-G(a)]=G'(c)[F(b)-F(a)]
\end{equation*}
for some $c$ in $(a,b)$.
\end{lemma}
\begin{proof}
Apply Rolle's Theorem to $x\mapsto F(x)[G(b)-G(a)]-G(x)[F(b)-F(a)]$.
\end{proof}
\begin{theorem}
Suppose $f^{(n-1)}$ and $g^{(n-1)}$ are continuous on $[a,b]$ and
differentiable on $(a,b)$. Then for any distinct $c$ and $x$ in
$[a,b]$ there is $y$, strictly between them, such that
\begin{equation*}
f^{(n)}(y)\left[g(x)-\sum_{k=0}^{n-1}\frac{g^{(k)}(c)}{k!}(x-c)^k\right]
= g^{(n)}(y)\left[f(x)-\sum_{k=0}^{n-1}\frac{f^{(k)}(c)}{k!}(x-c)^k\right].
\end{equation*}
\end{theorem}
\begin{proof}
Use the Generalized Mean-Value Theorem, with $[c,x]$ (or $[x,c]$) in
place of $[a,b]$, with $F$ as the function
\begin{equation*}
t\mapsto \sum_{k=0}^{n-1}\frac{f^{(k)}(t)}{k!}(x-t)^k,
\end{equation*}
and with $G$ as a similar function in terms of $g$. In particular,
$G(x)=g(x)$, so
\begin{equation*}
G(x)-G(c)=g(x)-\sum_{k=0}^{n-1}\frac{g^{(k)}(c)}{k!}(x-c)^k;
\end{equation*}
also,
\begin{equation*}
F'(t)=f'(t)+ \sum_{k=1}^{n-1}
\left[\frac{f^{(k+1)}(t)}{k!}(x-t)^k -
\frac{f^{(k)}(t)}{(k-1)!}(x-t)^{k-1}\right],
\end{equation*}
a telescoping sum, so $F'(t)=f^{(n)}(t)(x-t)^{n-1}/(n-1)!$.
\end{proof}
\begin{corollary}[Taylor's Theorem]
Suppose $f^{(n-1)}$ is continuous on $[a,b]$ and
differentiable on $(a,b)$. Then for any distinct $c$ and $x$ in
$[a,b]$ there is $y$, strictly between them, such that
\begin{equation*}
f(x)= \sum_{k=0}^{n-1}\frac{f^{(k)}(c)}{k!}(x-c)^k+
\frac{f^{(n)}(y)}{k!}(x-c)^n.
\end{equation*}
\end{corollary}
\begin{proof}
In the theorem, let $g$ be $t\mapsto (t-c)^n$. We have
\begin{equation*}
g^{(k)}(t)=\frac{n!}{(n-k)!}(t-c)^{n-k},
\end{equation*}
so the theorem gives
\begin{equation*}
f^{(n)}(y)(x-c)^n
= n!\left[f(x)-\sum_{k=0}^{n-1}\frac{f^{(k)}(c)}{k!}(x-c)^k\right],
\end{equation*}
whence the claim.
\end{proof}
\begin{lemma}
Suppose $f$ has derivatives of all orders on some neighborhood of
$c$, and for some $M$, each $f^{(n)}$ is bounded by $M^n$ on that
neighborhood; then on that neighborhood, Equation
\pref{eqn:sf:Taylor} holds.
\end{lemma}
\begin{proof}
For any positive $a$, we have $\lim_{n\to\infty}(a^n/n!)=0$, since
$\sum(a^n/n!)$ converges by the ratio test.
\end{proof}
\section{Differentiation in several dimensions}\label{sect:diff}
Suppose in this section that $f$ is a function from $\R^n$ to $\R^m$,
and $\tuple a$ is a point of $\R^n$.
We want to investigate the possibilities for differentiating $f$ at
$\tuple a$ .
We can write $f$ as
$(f_0,\dots,f_{m-1})$, where each $f_i$ is a function from $\R^n$ to
$\R$; so if $n=1$, then we can form $(f_0',\dots,f_{m-1}')$.
In the general case, we can take \tech{partial derivatives}: Define a
function $\delta:\N\times\N\to\{0,1\}$ by
\begin{equation*}
\delta_{i\,j}=
\begin{cases}
1,&\text{ if }i=j;\\
0,&\text{ if }i\neq j.
\end{cases}
\end{equation*}
Let $\tuple e_i$ be the element
$(\delta_{i\,0},\dots,\delta_{i\,n-1})$ of $\R^n$. Let $D_if$ be the
function defined by
\begin{equation*}
D_if(\tuple a)=\lim_{h\to 0}\frac{f(\tuple a+h\tuple e_i)-f(\tuple
a)} {h};
\end{equation*}
this function (where it is well-defined) is the \defn{partial derivative of
$f$ with respect to the $i$th coordinate}. Note that $D_if_j(\tuple
a)$ is the ordinary derivative at $0$ of the function
\begin{equation*}
x\mapsto f_j(\tuple a+x\tuple e_i).
\end{equation*}
\begin{example}\label{ex:d:partial}
Let $f:\R^2\to\R$ be given by
\begin{equation*}
f(x,y)=
\begin{cases}
xy/(x^2+y^2),&\text{ if }(x,y)\neq(0,0);\\
0,&\text{ if }(x,y)=(0,0).
\end{cases}
\end{equation*}
Then $D_0f(0,0)=0=D_1f(0,0)$, but $f$ is not continuous at $(0,0)$.
\end{example}
For a stronger property than having partial derivatives, we define the
generalization called the \defn{directional derivative}: If $\tuple
u\in\R^n$, then
\begin{equation*}
f'(\tuple a;\tuple u)=\lim_{h\to0}\frac{f(\tuple a+h\tuple
u)-f(\tuple a)}{h}.
\end{equation*}
Thus $f'(\tuple a;\tuple e_i)=D_if(\tuple a)$. In Example
\ref{ex:d:partial}, not all directional derivatives at $(0,0)$ exist.
\begin{example}
Let $f:\R^2\to\R$ be given by
\begin{equation*}
f(x,y)=
\begin{cases}
xy^2/(x^2+y^4),&\text{ if }x\neq0;\\
0,&\text{ if }x=0.
\end{cases}
\end{equation*}
Then all directional derivatives at $0$ exist, since
\begin{equation*}
\frac{f(hu,hv)-f(0,0)}h=\frac{uv^2}{u^2+h^2v^4},
\end{equation*}
which has a limit as $h$ goes to $0$, whether or not $u=0$. However,
$f(y^2,y)= 1/2$, unless $y=0$; so $f$ is not continuous at $(0,0)$.
\end{example}
If $n=1$, then $f$ is differentiable at $a$ if and only if there is a
number, called $f'(a)$, such that
\begin{equation*}
\lim_{h\to0}\frac{f(a+h)-f(a)-f'(a)\cdot h}h=0.
\end{equation*}
The function $h\mapsto f'(a)\cdot h:\R\to \R$ is \tech{linear}; call it
$\lambda$. (A function $T:\R^n\to \R^m$ is \defn{linear} if
$T(a\tuple u+b\tuple v)=aT(\tuple u)+bT(\tuple v)$ in all cases.)
Then the function $x\mapsto f(a)+\lambda(x-a)$ is an approximation to
$f$ near $a$.
The function $f:\R^n\to\R^m$ is said to be \defn{differentiable at
$\tuple a$} if there is a linear function $\lambda:\R^n\to\R^m$ such
that
\begin{equation*}
\lim_{\tuple h\to\tuple 0}\frac{\abs{f(\tuple a+\tuple h)-f(\tuple
a)-\lambda(\tuple h)}}{\abs{\tuple h}}=0.
\end{equation*}
In this case, $\lambda$ is the \defn{total derivative of $f$ at
$\tuple a$} and is denoted $Df(\tuple a)$. Note well that this is a
\emph{function}; however, we have to check that it is unique:
\begin{theorem}
If $Df(\tuple a)$ exists, then $Df(\tuple a)(\tuple u)=f'(\tuple
a;\tuple u)$ for all $\tuple u$ in $\R^n$; so the total derivative
is uniquely determined by the directional derivatives.
\end{theorem}
\begin{proof}
If $Df(\tuple a)$ exists, and $\tuple u\neq0$, then
\begin{equation*}
0=\lim_{h\to 0}
\frac{\abs{f(\tuple a+h\tuple u)-f(\tuple
a)-Df(\tuple a)(h\tuple u)}}{\abs{h\tuple u}}=0;
\end{equation*}
but $Df(\tuple a)(h\tuple u)=hDf(\tuple a)(\tuple u)$; hence the claim.
\end{proof}
\begin{corollary}\label{cor:d:tasp}
$Df(\tuple a)(\tuple u)=\sum_{i0$. There are positive numbers $\delta_0$,
$\delta_1$ and $\delta_2$ such that
\begin{align*}
0<\abs{\tuple h}<\delta_0& \implies\frac{\abs{\phi(\tuple
h)}}{\abs{\tuple h}}<1;\\
0<\abs{\tuple k}<\delta_1& \implies \frac{\abs{\psi(f(\tuple a)
+\tuple k)}}{\abs{\tuple k}} <\frac{\epsilon}{N+1};\\
0<\abs{\tuple h}<\delta_2& \implies \abs{f(\tuple a+\tuple h)-f(\tuple
a)}< \delta_2.
\end{align*}
Let $\delta=\min\{\delta_0,\delta_2\}$; then
\begin{equation*}
\frac{\abs{\psi(f(\tuple a+\tuple h))}}{\abs{\tuple
h}}<\epsilon
\end{equation*}
whenever $0<\abs{\tuple h}<\delta$.
\end{proof}
\begin{lemma}
If the partial derivatives $D_if$ are defined on a neighborhood of
$\tuple a$ and are continuous at $\tuple a$, then
$f$ is differentiable at $\tuple a$, that is, the total derivative
$Df(\tuple a)$ exists.
\end{lemma}
\begin{proof}
We may assume $m=1$. The $D_if$ must be defined on an open
\emph{interval} $I$ that contains $\tuple a$. (Note then that $I$ is
$I_0\times\dots\times I_{n-1}$ for some open intervals $I_i$ of $\R$
such that $a_i\in I_i$.)
We shall show that $Df(\tuple a)$ exists by showing that
\begin{equation*}
\lim_{\tuple h\to\tuple 0}\frac{\abs{f(\tuple a+\tuple h)-f(\tuple a)-
\sum_{i0$, then, by the continuity of the $D_if$ at $\tuple a$,
there is a ball $B(\tuple a;\delta)$ around $\tuple a$ such that, if
$\tuple x$ is in this ball, then
\begin{equation*}
\abs{D_if(\tuple x)-D_if(\tuple a)}\leq\frac{\epsilon}n.
\end{equation*}
If $\tuple h$ is $B(\tuple a;\delta)$, then so are the $\tuple b_i$,
which means
\begin{equation*}
\frac{\abs{f(\tuple a+\tuple h)-f(\tuple
a)-\sum_{i0$, let
\begin{equation*}
I_j=\left[x_j-\frac{\epsilon}{2^{j+2}},
x_j+\frac{\epsilon}{2^{j+2}}\right].
\end{equation*}
Then
\begin{equation*}
\sum_{j=0}^{\infty}\mu(I_j)=
\sum_{j=0}^{\infty}\frac{\epsilon}{2^{j+1}}= \epsilon,
\end{equation*}
and $x_j\in I_j$. Let
$f:\R\to \R$ be given by
\begin{equation*}
f(x)=
\begin{cases}
0,&\text{ if }x\notin\Q;\\
1/\abs n,&\text{ if }x=m/n\text{ in lowest terms.}
\end{cases}
\end{equation*}
Then the set of discontinuities of $f$ is precisely $\Q$, so $f$ is
integrable on every compact interval, and each of the integrals is $0$.
\end{example}
The \defn{boundary} of a subset $S$ of $\R^n$ is the set $\partial
S$ of points $\tuple x$ of $\R^n$ such that every ball with center
$\tuple x$ contains points of both $S$ and $\R^n\setminus S$. If $S$
is bounded, then $S$ is said to be \defn{Jordan-measurable} if
$\partial S$ has measure zero.
\begin{theorem}
Let $S$ be a bounded, Jordan-measurable subset of $\R^n$, and let
$f$ be a real-valued function on $S$. Then $\int_Sf$ exists if and
only if the set of discontinuities of $f$ on $S$ has measure zero.
\end{theorem}
\end{document}