Content-Type: multipart/mixed; boundary="-------------0304161313455" This is a multi-part message in MIME format. ---------------0304161313455 Content-Type: text/plain; name="03-178.keywords" Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename="03-178.keywords" Herglotz functions ---------------0304161313455 Content-Type: application/x-tex; name="herglotz.tex" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="herglotz.tex" \documentclass[reqno,centertags, 12pt]{amsart} \usepackage{amsmath,amsthm,amscd,amssymb} \usepackage{verbatim} \usepackage{latexsym} %\usepackage{showkeys} \sloppy %%%%%%%%%%%%% fonts/sets %%%%%%%%%%%%%%%%%%%%%%% \newcommand{\bbN}{{\mathbb{N}}} \newcommand{\bbR}{{\mathbb{R}}} \newcommand{\bbD}{{\mathbb{D}}} \newcommand{\bbP}{{\mathbb{P}}} \newcommand{\bbZ}{{\mathbb{Z}}} \newcommand{\bbC}{{\mathbb{C}}} \newcommand{\bbQ}{{\mathbb{Q}}} \newcommand{\calH}{{\mathcal H}} %%%%%%%%%%%%%%%%%% abbreviations %%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\dott}{\,\cdot\,} \newcommand{\no}{\nonumber} \newcommand{\lb}{\label} \newcommand{\f}{\frac} \newcommand{\ul}{\underline} \newcommand{\ol}{\overline} \newcommand{\ti}{\tilde } \newcommand{\wti}{\widetilde } \newcommand{\Oh}{O} \newcommand{\oh}{o} \newcommand{\marginlabel}[1]{\mbox{}\marginpar{\raggedleft\hspace{0pt}#1}} \newcommand{\tr}{\text{\rm{Tr}}} \newcommand{\dist}{\text{\rm{dist}}} \newcommand{\loc}{\text{\rm{loc}}} \newcommand{\spec}{\text{\rm{spec}}} \newcommand{\rank}{\text{\rm{rank}}} \newcommand{\ran}{\text{\rm{ran}}} \newcommand{\dom}{\text{\rm{dom}}} \newcommand{\ess}{\text{\rm{ess}}} \newcommand{\ac}{\text{\rm{ac}}} \newcommand{\s}{\text{\rm{s}}} \newcommand{\sing}{\text{\rm{sc}}} \newcommand{\pp}{\text{\rm{pp}}} \newcommand{\supp}{\text{\rm{supp}}} \newcommand{\AC}{\text{\rm{AC}}} \newcommand{\bi}{\bibitem} \newcommand{\hatt}{\widehat} \newcommand{\beq}{\begin{equation}} \newcommand{\eeq}{\end{equation}} \newcommand{\ba}{\begin{align}} \newcommand{\ea}{\end{align}} \newcommand{\veps}{\varepsilon} %\newcommand{\Ima}{\operatorname{Im}} %\newcommand{\Real}{\operatorname{Re}} %\newcommand{\diam}{\operatorname{diam}} % use \hat in subscripts % and upperlimits of int. %%%%%%%%%%%%% marginal warnings %%%%%%%%%%%%%%%% % ON: \newcommand{\TK}{{\marginpar{x-ref?}}} % OFF: %\newcommand{\TK}{} % % Rowan's unspaced list % \newcounter{smalllist} \newenvironment{SL}{\begin{list}{{\rm\roman{smalllist})}}{% \setlength{\topsep}{0mm}\setlength{\parsep}{0mm}\setlength{\itemsep}{0mm}% \setlength{\labelwidth}{2em}\setlength{\leftmargin}{2em}\usecounter{smalllist}% }}{\end{list}} % %smaller \bigtimes % \newcommand{\bigtimes}{\mathop{\mathchoice% {\smash{\vcenter{\hbox{\LARGE$\times$}}}\vphantom{\prod}}% {\smash{\vcenter{\hbox{\Large$\times$}}}\vphantom{\prod}}% {\times}% {\times}% }\displaylimits} %%%%%%%%%%%%%%%%%%%%%% renewed commands %%%%%%%%%%%%%%% %\renewcommand{\Re}{\text{\rm Re}} %\renewcommand{\Im}{\text{\rm Im}} %%%%%%%%%%%%%%%%%%%%%% operators %%%%%%%%%%%%%%%%%%%%%% \DeclareMathOperator{\Real}{Re} \DeclareMathOperator{\Ima}{Im} \DeclareMathOperator{\sgn}{sgn} \DeclareMathOperator{\diam}{diam} \DeclareMathOperator*{\slim}{s-lim} \DeclareMathOperator*{\wlim}{w-lim} \DeclareMathOperator*{\simlim}{\sim} \DeclareMathOperator*{\eqlim}{=} \DeclareMathOperator*{\arrow}{\rightarrow} \allowdisplaybreaks \numberwithin{equation}{section} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%% end of definitions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newtheorem{theorem}{Theorem}[section] %\newtheorem*{t0}{Theorem} \newtheorem*{t1}{Theorem 1} \newtheorem*{t2}{Theorem 2} \newtheorem*{t3}{Theorem 3} \newtheorem*{t4}{Theorem 4} \newtheorem*{t5}{Theorem 5} %\newtheorem*{c4}{Corollary 4} %\newtheorem*{p2.1}{Proposition 2.1} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} %\newtheorem{hypothesis}[theorem]{Hypothesis} %\theoremstyle{hypothesis} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{xca}[theorem]{Exercise} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} % Absolute value notation \newcommand{\abs}[1]{\lvert#1\rvert} \begin{document} \title[Factorization of Meromorphic Herglotz Functions] {A Canonical Factorization for Meromorphic Herglotz Functions \\ on the Unit Disk and Sum Rules \\for Jacobi Matrices} \author{Barry Simon} \thanks{$^*$ Mathematics 253-37, California Institute of Technology, Pasadena, CA 91125. E-mail: bsimon@caltech.edu. Supported in part by NSF grant DMS-0140592.} \date{March 21, 2003} \begin{abstract} We prove a general canonical factorization for meromorphic Herglotz functions on the unit disk whose notable elements are that there is no restriction (other than interlacing) on the zeros and poles for their Blaschke product to converge and there is no singular inner function. We use this result to provide a significant simplification in the proof of Killip-Simon \cite{KS} of their result characterizing the spectral measures of Jacobi matrices, $J$, with $J-J_0$ Hilbert-Schmidt. We prove a nonlocal version of Case and step-by-step sum rules. \end{abstract} \maketitle \section{Introduction} \lb{s1} The canonical factorization for Nevanlinna functions, $f$, that is, functions obeying \begin{equation} \lb{1.1a} \sup_{00$ {\rm{(}}resp.~$<0${\rm{)}} if $\Ima z >0$ {\rm{(}}resp.~$<0${\rm{)}}. Then for a.e.~$\theta \in [0,2\pi)$, \begin{equation} \lb{1.1e} f(e^{i\theta}) \equiv \lim_{r\uparrow 1} \, f(re^{i\theta}) \end{equation} exists and is a.e.~nonzero. Moreover, \begin{equation} \lb{1.1f} \int_0^{2\pi} \abs{\log \abs{f(e^{i\theta})}}^p \, \f{d\theta}{2\pi}<\infty \end{equation} for all $p<\infty$. If $p_1^+ p_2^- > \cdots$ and $z_1^- > z_2^- > \cdots$ the poles and zeros in $(-1,0)$, then \begin{equation} \lb{1.1g} B(z) =\lim_{n\to\infty} \prod_{j=1}^n b(z,z_j^+) b(z, p_j^+)^{-1} b(z,z_j^-) b(z,p_j^-)^{-1} \end{equation} converges uniformly on compact subsets on $\bbD\backslash\{p_j^\pm\}$ and \begin{equation} \lb{1.1h} f(z) =\pm B(z) \exp \biggl( \int \f{e^{i\theta}+z}{e^{i\theta}-z}\, \log \abs{f(e^{i\theta})} \, \f{d\theta}{2\pi} \biggr) \end{equation} where the $\pm$ sign in front is $\sgn (f(0))$ if $f(0)\neq 0$ and $+$ if $f(0)=0$. \end{theorem} Two aspects of this theorem should be emphasized. First, unlike the Nevanlinna function case, where one proves $\sum (1-\abs{z_j})<\infty$ and needs this to assure convergence of the product defining that $B$, our $f$'s can have arbitrary interlaced poles and zeros on $(-1,1)$ since the interlacing will assure convergence. Second, \eqref{1.1h} has no singular inner part. This is connected with \eqref{1.1f}. Also interesting is that the proof of Theorem~\ref{T1.1a} has fewer technicalities; for example, the product defining $B$ will actually converge on all of $\bbC_+ =\{z\mid\Ima z>0\}$, so $\abs{B(e^{i\theta})} =1$ will be immediate without the separate argument needed in the Nevanlinna function case (see \cite[p.~312]{Rudin}). While this theorem is interesting for its own sake, we found it in the course of simplifying some recent results of Killip-Simon \cite{KS} on Jacobi matrices. We will consider Jacobi matrix spectral and inverse spectral theory, mainly using the notation of Killip-Simon \cite{KS}. A Jacobi matrix, $J$, is a tridiagonal selfadjoint matrix \begin{equation} \lb{1.1} J= \begin{pmatrix} b_1 & a_1 & 0 & 0 & \cdots \\ a_1 & b_2 & a_2 & 0 & \cdots \\ 0 & a_2 & b_3 & a_3 & \cdots \\ \vdots & \vdots & \vdots & \vdots & \ddots \end{pmatrix} \end{equation} with $a_n >0$ and $\sup_n \abs{a_n} + \abs{b_n} <\infty$. $J_0$ is the special case with $b_n\equiv 0$, $a_n\equiv 1$. $\mu_J$ (often just $\mu$) is the spectral measure for $J$ and vector $\delta_1$, that is, \begin{equation} \lb{1.2} m(E) \equiv \int \f{d\mu(x)}{x-E} =\langle\delta_1, (J-E)^{-1} \delta_1 \rangle \end{equation} for $E\in\bbC_+ =\{E\mid\Ima E>0\}$. The $a$'s and $b$'s are then the recursion coefficients for the orthonormal polynomials for $\mu$, that is, if $p_n(x)$ is defined recursively by \begin{equation} \lb{1.3} xp_n(x) =a_{n+1} p_{n+1}(x) + b_{n+1} p_n(x) + a_n p_{n-1}(x) \end{equation} then $p_n(x) =\gamma_n x^n +$ lower order with $\gamma_n =(a_1 \dots a_n)^{-1} >0$ and \begin{equation} \lb{1.3a} \int p_n(x) p_\ell(x)\, d\mu(x) =\delta_{n\ell} \end{equation} Note that for $J=J_0$, the $d\mu$ is \begin{equation} \lb{1.3b} d\mu_0 = (2\pi)^{-1} \chi_{[-2,2]} (E) \sqrt{4-E^2}\, dE \end{equation} Write \begin{equation} \lb{1.4} d\mu(x) = f(x) \, dx + d\mu_\s (x) \end{equation} where $d\mu_\s (x)$ is singular with respect to Lebesgue measure. Our goal here is to present a simple self-contained proof of the main theorem of Killip-Simon \cite{KS}: \begin{theorem} \lb{T1.1} $J-J_0$ is Hilbert-Schmidt, that is, \begin{equation} \lb{1.5} \sum_{n=1}^\infty [2(a_n -1)^2 + b_n^2]<\infty \end{equation} if and only if $d\mu$ obeys the following four conditions: \begin{SL} \item[{\rm{(i)}}] {\rm{(Blumenthal-Weyl)}} \begin{equation} \lb{1.6} \supp [d\mu] = [-2,2] \cup \{E_j^-\}_{j=1}^{N_-} \cup \{E_j^+\}_{j=1}^N \end{equation} where \begin{equation} \lb{1.7} E_1^- < E_2^- < \cdots < -2 \quad{and}\quad E_1^+ > E_2^+ > \cdots > 2 \end{equation} are discrete pure points of $d\mu$, and if $N^+=\infty$ {\rm{(}}resp.~$N^- =\infty${\rm{)}}, then $E_j^+ \to 2$ {\rm{(}}resp.~$E_j^- \to -2${\rm{)}}. \item[{\rm{(ii)}}] {\rm{(Lieb-Thirring)}} \begin{equation} \lb{1.8} \sum_{j=1}^{N_+} \abs{E_j^+ -2}^{3/2} + \sum_{j=1}^{N_-} \abs{E_j^- + 2}^{3/2} <\infty \end{equation} \item[{\rm{(iii)}}] {\rm{(Quasi-Szeg\H{o})}} \begin{equation} \lb{1.9} \int_{-2}^2 \log (f(E)) \sqrt{4-E^2}\, dE >-\infty \end{equation} \item[{\rm{(iv)}}] {\rm{(Normalization)}} \begin{equation} \lb{1.10} \mu(\bbR) =1 \end{equation} \end{SL} \end{theorem} An important thing is that, like Szeg\H{o}'s theorem \cite{Ger,Szego2,OPUC,Szego1}, the only condition on the singular part of $\mu_\s$ is the one given by \eqref{1.10}. As in \cite{KS}, this theorem follows immediately from a sufficiently general form of a sum rule. Given two measures $\mu$ and $\nu$ on a compact Hausdorff space, define their relative entropy by \[ S(\mu\mid\nu) = \begin{cases} -\infty &\text{if $\mu$ is not $\nu$ a.c.} \\ -\int \log (\f{d\mu}{d\nu})\, d\mu &\text{if $\mu$ is $\nu$ a.c.} \end{cases} \] Let $\mu_0$ be the measure given by \eqref{1.3b} and let \[ Q(\mu) =-S(\mu_0\mid\mu) \] We will need the fact (\cite[Corollary~5.3]{KS}) that \begin{equation} \lb{1.10a} \mu_n \overset{w}{\longrightarrow}\mu \; \Rightarrow \; Q(\mu) \leq \liminf Q(\mu_n) \end{equation} (semicontinuity of the entropy). Also define two functions: $G$ on $(0,\infty)$ by \begin{equation} \lb{1.11} G(a) =a^2 -1-2 \log(a) \end{equation} and $F$ on $(2,\infty)$ by \begin{align} F(E) &=\tfrac14\, (\beta^2 + \beta^{-2} -\log \beta^4) \lb{1.12} \\ E& = \beta + \beta^{-1}; \qquad \beta >1 \lb{1.12a} \end{align} The $P_2$ sum rule of Killip-Simon \cite{KS} says \begin{equation} \lb{1.13} Q(\mu) + \sum_{n,\pm} F(\abs{E_n^\pm}) =\tfrac14 \sum_{n=1}^\infty b_n^2 + \tfrac12 \sum_{n=1}^\infty G(a_n) \end{equation} and the point of \cite{KS} is that it always holds in the sense that all terms are nonnegative and either both sides are finite and equal or both are infinite. Since the right side of \eqref{1.13} is finite if and only if $b_n$ and $a_n-1$ are $\ell_2$ and the left side is finite if and only if (i)--(iv) of Theorem~\ref{T1.1} hold, \eqref{1.13} immediately implies Theorem~\ref{T1.1}. As in \cite{KS}, one proves \eqref{1.13} by showing $\text{LHS} \leq\text{RHS}$ and $\text{RHS}\leq \text{LHS}$. The proof in \cite{KS} of the first inequality is conceptually and technically simple --- essentially, one proves \eqref{1.13} when only finitely many $b_n$ and $a_n -1$ are nonzero and uses \eqref{1.10a}. The proof in the other direction in \cite{KS} is involved and opaque; Simon-Zlato\v{s} \cite{SZ} improved this part, but their argument still has several pages of calculations. Instead, our key technical input will be Theorem~\ref{T1.1a}. As in \cite{KS} and related to ideas going back at least as far as Szeg\H{o} \cite{Szego1}, we will map $\bbC\cup\{\infty\}\backslash [-2,2]$ to the open unit disk $\bbD$ by the inverse of the map $z\mapsto (z+z^{-1})$. This allows us to define a natural function $M$ on $\bbD$ associated to the $m$ of \eqref{1.2} by \begin{equation} \lb{1.14} M(z) = -m(z+z^{-1}) \end{equation} $M$ is meromorphic in $\bbD$ and has poles at the points $(\beta_j^\pm)^{-1}$ defined by \eqref{1.12a}. Since $m(E)\sim (-E)^{-1}$ for $E\sim\infty$, \begin{equation} \lb{1.13a} M(z)=z+O(z^2) \end{equation} near $z=0$. Since $\Ima M>0$ if $\Ima z>0$, between any two poles of $M$ is a zero and the zeros (other than $z=0$) are easily seen to be related to the eigenvalues of the Jacobi matrix $J^{(1)}$ obtained by removing the top row and leftmost column in $J$. Let $M_1$ be the $M$-function for $J^{(1)}$. Then our main technical result is the following representation for $M$: \begin{theorem} \lb{T1.2} For any Jacobi matrix, $\Ima M$ and $\Ima M_1$ have boundary values a.e.~on $\partial\bbD$ and up to Lebesgue measure zero sets: \begin{equation} \lb{1.14a} S\equiv \{\theta\mid\Ima M(\theta)\neq 0\} =\{\theta\mid\Ima M_1 (\theta)\neq 0\} \end{equation} Moreover, for any $p<\infty$, \begin{equation} \lb{1.15} \int_S \, \biggl|\, \log \biggl(\f{\Ima M}{\Ima M_1}\biggr)\biggr|^p \, \f{d\theta}{2\pi} <\infty \end{equation} Most importantly, \begin{equation} \lb{1.16} a_1 M =zB^+(z) B^- (z) \exp \biggl[ \f{1}{4\pi} \int \log \biggl( \f{\Ima M(\theta)}{\Ima M_1(\theta)} \biggr) \biggl[ \f{e^{i\theta}+z}{e^{i\theta}-z}\biggr] \, d\theta \biggr] \end{equation} where $B^+$ and $B^-$ are suitable convergent products of the zeros and poles. \end{theorem} This theorem not only yields Theorem~\ref{T1.1} but has other applications \cite{DaS}. For the case of interest, $\abs{S}=2\pi$ and then \eqref{1.16} is intended literally. If $\abs{S} <2\pi$, we interpret $\Ima M(\theta)/\Ima M_1 (\theta)$ for $\theta\notin S$ by \begin{equation} \lb{1.17} \f{\Ima M(\theta)}{\Ima M_1 (\theta)} \equiv \lim_{r\uparrow 1} \, \f{\Ima M(re^{i\theta})}{\Ima M_1 (re^{i\theta}) + a_1^{-2} (r^{-1} -r)\sin\theta} \end{equation} Part of the theorem is that with this extended definition, $\log (\Ima M(\theta)/\Ima M_1 (\theta)) \in L^p (d\theta/2\pi)$ for all $p<\infty$. The connection between Theorem~\ref{T1.1a} and Theorem~\ref{T1.2} is evident; essentially, the later is implied by the former and the formula already used in \cite{KS}: \begin{equation} \lb{1.18} \f{\Ima M(\theta)}{\Ima M_1 (\theta)} = a_1^2 \abs{M(e^{i\theta})}^2 \end{equation} In Section~\ref{s2}, we will discuss Blaschke products of zeros and poles, and in Section~\ref{s3}, we will prove Theorems~\ref{T1.1a} and \ref{T1.2}. In Section~\ref{s4}, we will see that the Taylor coefficients of the logs of the two sides of \eqref{1.16} are the step-by-step sum rules of \cite{KS} and \cite{SZ}, and use them following \cite{KS} and \cite{SZ} to prove \eqref{1.13}. Section~\ref{s5} has a few closing remarks, including a brief indication of the analog of $M$ for orthogonal polynomials on the unit circle. I would like to thank Leonid Golinskii for a comment that got me thinking in the right direction, and Rowan Killip for useful comments. Our joint work extending \cite{KS} to the continuum case (in preparation) was an important guide to my thinking. \section{Alternating Blaschke Products} \lb{s2} Given real numbers $00$, then \begin{equation} \lb{2.4} \abs{\Ima (\log B_\infty (z))}\leq \pi \end{equation} \end{theorem} {\it Remarks.} 1. This should be viewed as a souped-up version of the fact that if $a_n\to 0$, $\abs{a_n}\geq \abs{a_{n+1}}$, and $(-1)^n a_n \geq 0$, then $\sum_{n=1}^N a_n$ converges. \smallskip 2. There is no regular Blaschke condition that $\sum_{j=1}^\infty (1-\abs{z_j})<\infty$ because we can instead use \begin{equation} \lb{2.5} \sum_{j=1}^\infty \, \abs{z_j -p_j} <\infty \end{equation} Killip has remarked that this is sufficient for all parts of the theorem to be true, save \eqref{2.4}. \smallskip 3. \eqref{2.4} holds because there is a cancellation of phases from the zeros and poles. \smallskip 4. This theorem is a close relative to the one in Gesztesy-Simon \cite{GS} that if $p_1 0 \] if $a\in (0,1)$, we have \begin{alignat*}{2} &\abs{\log c_j (x)} \geq \abs{\log c_{j+1}(x)} && \qquad j=1,2 \\ &\log c_j(x) \to 0 &&\qquad \text{as } j\to\infty \\ &(-1)^{j+1} \log c_j (x) >0 \end{alignat*} so $\sum\log c_j (x)$ is a convergent alternating series and thus, for $x\in (-1,0)$, $\lim_{j\to\infty} B_j(x)$ exists. Noting that \[ \biggl| \f{z_j -z}{p_j -z}-1\biggr| = \f{\abs{z_j-p_j}}{\abs{p_j-z}} \qquad \quad \biggl| \f{1-p_j z}{1-z_j z}^{-1}\biggr| =\f{\abs{z_j -p_j}\, \abs{z}}{\abs{1-z_j z}} \] one sees that \[ \abs{B_{2j}(z)} \leq \exp \biggl( S(z) \sum_{k=1}^j \, \abs{z_j-p_j}\biggr) \] where \[ S(z) =\max_{\alpha\in (p_1, 1)} \bigg( \f{1}{\abs{\alpha-z}}\, , \f{\abs{z}}{\abs{1-\alpha z}}\biggr) \] Since \[ \abs{c_{2j-1} (z)} \leq \max_{\alpha\in (p_1, 1)} \, \f{\abs{1-\alpha z}}{\abs{z-\alpha}} \] we see that on $\bbC\backslash [p_1, p_1^{-1}]$, $\sup_j \abs{B_j(z)}< \infty$ and so we get the uniform convergence. A small modification handles the points in $[p_1, p_1^{-1}]$ other than $\{p_j\} \cup \{z_j^{-1}\}$. Since $\abs{b_\ell(z)}=1$ if $\abs{z}=1$ and $B_j \to B_\infty$ uniformly, $\abs{B_\infty (z)} =1$ on $\partial\bbD\backslash\{-1\}$. That proves everything but \eqref{2.4}. To prove that, note that \[ \prod_{j=1}^k \f{z_j -z}{p_j -z} \, \f{1}{p_{k+1}-z} = \sum_{\ell=1}^k \, \f{\alpha_\ell}{p_\ell -z} \] where each $\alpha_\ell$ is positive, and so it defines a Herglotz function. Thus $B_{2j-1}$ is a ratio of Herglotz functions and so $\abs{\Ima [\log (B_{2j-1})]}<\pi$. Taking limits, we get \eqref{2.4}. \end{proof} \section{Factorization of Meromorphic Herglotz Functions} \lb{s3} Here we will prove Theorems~\ref{T1.1a} and \ref{T1.2}. \begin{proof}[Proof of Theorem~\ref{T1.1a}] Since an analytic function $g(z)$ has points with $\Ima g <0$ in any neighborhood of a polar singularity or zero, $f$ is analytic and nonvanishing in $\bbD_+\equiv\bbD\cap\bbC_+$ and $\bbD_-=\bbD\cap\{z\mid\Ima z<0\}$. Similarly, since each half-neighborhood of a zero or pole of degree larger than $1$ has points with $\Ima g <0$, we conclude that $f$ has only simple poles and zeros on $(-1,1)$. Since $\Ima f$ is $0$ on $(-1,1)$, we have $\f{\partial\Ima f}{\partial y} \geq 0$ so, by the Cauchy-Riemann equations, $\f{\partial\Real f}{\partial x}=f' (x+i0)\geq 0$. That implies the zeros and poles interlace. Using slightly different labelling of zeros and poles from Section~\ref{s1}, let $z_0$ be the smallest zero in $[0,1)$ and $p_1^+ < z_1^+ < \cdots$ and $p_1^- > z_1^- >\cdots$ the poles and zeros in $(z_0,1)$ and $(-1, z_0)$. Define \begin{equation} \lb{3.1} B(z) =b(z,z_0) B_+ (z) B_-(z) \end{equation} where $B_+$ (resp.~$B_-$) are the product defined by Theorem~\ref{T2.1} for $z_j^+, p_j^+$ (resp.~$B_-$ is a similar product for $z_j^+, p_j^-$). It is easy to see that this agrees with slightly reordered product in \eqref{1.1g}. Define \begin{equation} \lb{3.2} G(z) =f(z) B(z)^{-1} \end{equation} Then $G(z)$ is \begin{SL} \item[(i)] analytic and nonvanishing on $\bbD$ since we have explicitly removed all the zeros and poles. \item[(ii)] has \begin{equation} \lb{3.3} \lim_{r\uparrow 1} \biggl| \f{G(re^{i\theta})}{f(re^{i\theta})}\biggr| =1 \end{equation} for $\theta\neq 0,\pi$ since $\abs{B(e^{i\theta})}=1$. \item[(iii)] We have that in $\bbD$ \begin{equation} \lb{3.4} \abs{\Ima \log (G(z))}\leq 4\pi \end{equation} For this holds in $\bbD\cap\bbC_+$ since $f$ and $\pm b$ are Herglotz functions (for which $\Ima \log g(z)\in (0,\pi)$) and $B_\pm$ obey \eqref{2.4}. \end{SL} \smallskip $G(z)$ as a nonvanishing function has an analytic logarithm. By \eqref{3.4} and the boundedness of the conjugate harmonic function map (see Rudin \cite{Rudin}), we have that \begin{equation} \lb{3.5} \log G(z)\in H^p \qquad \text{all }p<\infty \end{equation} so that \begin{equation} \lb{3.6} \log G(z) = \f{1}{2\pi} \int \log \abs{G(e^{i\theta})}\, \f{e^{i\theta}+z}{e^{i\theta}-z}\, d\theta \end{equation} Since \eqref{3.3} holds, we have that for all $p<\infty$, \[ \f{1}{2\pi} \int_0^{2\pi} \abs{\log [\abs{f(e^{i\theta})}]}^p\, d\theta <\infty \] and, by \eqref{3.1} and \eqref{3.6}, \eqref{1.1h} holds. \end{proof} \begin{proof}[Proof of Theorem~\ref{T1.2}] We use the continued fraction expansion for $M$ (see \cite{KS}): \begin{equation} \lb{3.8} M(z) = (z+z^{-1} -b_1 - a_1^2 M_1 (z))^{-1} \end{equation} Since $\Ima (-M(z)^{-1}) =\Ima M(z)/\abs{M(z)}^2$, we see for $z\in\bbD$, \begin{equation} \lb{3.9} \f{\Ima M(z)}{\abs{M}^2} = a_1^2 \Ima M_1 (z) - \Ima (z+z^{-1}) \end{equation} so, since $\Ima M(z)$ and $\Ima M_1(z)$ are nonzeros on $\bbD\cap\bbC_+$, we have there that \begin{equation} \lb{3.10} a_1^2 \abs{M(re^{i\theta})}^2 = \f{\Ima M (re^{i\theta})} {[\Ima M_1 (re^{i\theta}) + a_1^{-2} (r^{-1} -r) \sin(\theta)]} \end{equation} Taking boundary values and using the fact that $M$ is Herglotz on $\bbD$ so that $\lim \abs{M(e^{i\theta})}$ is finite and nonzero for a.e.~$\theta$ (by Theorem~\ref{T1.1a}), we see that \eqref{1.14a} holds and that on $S$, \begin{equation} \lb{3.11} a_1^2 \abs{M(re^{i\theta})}^2 = \f{\Ima M(e^{i\theta})}{\Ima M_1 (e^{i\theta})} \end{equation} Off $S$, this formula holds if we interpret the right side by \eqref{1.17}. We have proven \eqref{1.16} by combining \eqref{1.1h} and \eqref{3.11} \end{proof} We will see in the next section that Taylor coefficients of \eqref{1.16} are a step-by-step sum rule. We can think of \eqref{1.16} as a kind of global (i.e., not just Taylor coefficients at $z=0$) step-by-step sum rule. It will be used in \cite{DaS}. \section{The $P_2$ Sum Rule} \lb{s4} Our goal in this section is to prove \eqref{1.13} in the strong sense indicated in Section~\ref{s1}, which then, as explained there, implies Theorem~\ref{T1.1}. The arguments follow those in \cite{KS} and \cite{SZ} and are presented here because our subsidiary results are stronger here and because, by specializing to $P_2$, some details are easier. The initial step is a general step-by-step sum rule: \begin{theorem}\lb{T4.1} Let $J$ be an arbitrary Jacobi matrix which obeys condition {\rm{(i)}} of Theorem~\ref{T1.1} {\rm{(}}e.g., $J-J_0$ is compact{\rm{)}}. Let $J^{(1)}$ be the matrix with one row and column removed. Then \begin{SL} \item[{\rm{(i)}}] If $E_n^\pm$ and $E_n^{(1)\pm}$ are the eigenvalues for $J$ and $J^{(1)}$, then with $F$ given by \eqref{1.12}/\eqref{1.12a}, \begin{equation} \lb{4.1} \sum \, \abs{F(E_n^\pm)-F(E_n^{(1)\pm})}<\infty \end{equation} where $F(E_n^\pm)$ is interpreted as $0$ if $n > N_\pm$, and similarly for $F(E_n^{(1)\pm})$. \item[{\rm{(ii)}}] $\log(\Ima M_1 (\theta)/\Ima M(\theta))$ {\rm{(}}interpreted as \eqref{1.17} if $\Ima M(\theta) =0${\rm{)}} lies in all $L^p$ for $p<\infty$. \item[{\rm{(iii)}}] \begin{equation} \lb{4.2} \f{1}{4\pi} \int \log \biggl( \f{\Ima M_1 (\theta)}{\Ima M(\theta)}\biggr) \sin^2\theta \, d\theta + \sum_{n,\pm} F(E_n^+)-F(E_n^{\pm(1)}) =\tfrac14\, b_1^2 + \tfrac12\, G(a_1) \end{equation} with $G$ given by \eqref{1.11}. \end{SL} \end{theorem} {\it Remark.} This proof is essentially the same as Theorem~4.2 of \cite{KS} given our Theorem~\ref{T1.2}. \begin{proof} By \eqref{1.13a} and \eqref{3.8}, one finds \begin{equation} \lb{4.3} \log \biggl( \f{M(z)}{z}\biggr) = 1 + b_1 z + (\tfrac 12\, b_1^2 + a_1^2 - 1) z^2 + O(z^3) \end{equation} Computing the Taylor series for $B_\infty (z)$ up to order $2$, one finds by taking logs of \eqref{1.16} and looking at the zeroth and second Taylor coefficient that \begin{equation} \lb{4.4} \f{1}{4\pi} \int_0^{2\pi} \log \biggl( \f{\Ima M_1 (e^{i\theta})}{\Ima M (e^{i\theta})}\biggr) d\theta - \sum_{n,\pm}\, [\log \abs{\beta_n^\pm (J)}-\log \abs{\beta_n^\pm (J^{(1)})}] = -\log (a_1) \end{equation} and \begin{equation} \lb{4.5} \begin{split} -& \f{1}{2\pi} \int_0^{2\pi} \log \biggl( \f{\Ima M_1 (e^{i\theta})}{\Ima M(e^{i\theta})}\biggr) \cos 2\theta \, d\theta \\ & + \tfrac12 \sum_{n,\pm} \{\beta_n^\pm (J)^2 - \beta^\pm (J_n)^2 - (\beta_n^\pm (J)^{-2} + (\beta^\pm (J_n))^{-2}\} = \tfrac12\, b_1^2 + (a_1^2 -1) \end{split} \end{equation} In these formulae, the $\sum_{n,\pm}$ converge because $\abs{E_n^\pm} \leq \abs{E_n^{(1)\pm}} \leq \abs{E_{n+1}^\pm}$ so the sums are alternating sums since $\log \abs{\beta}$ and $\beta^2 - \beta^{-2}$ are monotone. \eqref{4.3} is obtained by adding $\f12 \times$ \eqref{4.5} to \eqref{4.4}. \end{proof} {\it Remark.} For cases where $M$ has a meromorphic continuation past $\partial\bbD$, \eqref{4.2} was proven by Killip-Simon \cite{KS}, and if $\int -\log [\Ima M(e^{i\theta})] \sin^2 \theta \f{d\theta}{4\pi} <\infty$, it was proven by Simon-Zlato\v{s} \cite{SZ}. In this generality, it is new. \begin{proof}[Proof of \eqref{1.13}] We can iterate \eqref{4.2} to get \begin{equation} \lb{4.6} \begin{split} \int \f{1}{4\pi} &\log \biggl( \f{\Ima M(e^{i\theta}, J^{(\ell)})}{\Ima M(e^{i\theta})}\biggr) \sin^2\theta \, d\theta \\ & + \sum_{n,\pm} F(E_n^\pm)- F(E_n^{\pm (\ell)}) = \sum_{j=1}^\ell [\tfrac14\, b_j^2 + \tfrac12\, G(a_j)] \end{split} \end{equation} with $J^{(\ell)}$ the matrix with the top $\ell$ rows and leftmost $\ell$ columns removed. In particular, if $J^{(\ell)}=J_0$ for some $\ell$, we have \eqref{1.13} in that case. Given a general $J$, let $J_\ell$ be defined by having $a$ values ($a_1, a_2, \dots, a_{\ell-1}, 1,1, \dots$) and $b$ values ($b_1, \dots, b_\ell, 0,0\dots$) so $(J_\ell)^{(\ell)} =J_0$. Thus \eqref{1.13} holds for $J_\ell$. The limit of the right side of \eqref{1.13} converges as $\ell\to\infty$. By \eqref{1.10a}, the left sides obey an inequality. The result is \begin{equation} \lb{4.7} \text{RHS of \eqref{1.13}} \leq \text{LHS of \eqref{1.13}} \end{equation} Suppose $J$ is such that $\text{RHS of \eqref{1.13}} <\infty$. Then each term is finite, so we can separate the logs and infinite sums, and \eqref{4.6} becomes \begin{align*} \text{RHS of \eqref{1.13} for $J$} &= \biggl[ \, \sum_{j=1}^\ell \tfrac14\, b_j^2 + \tfrac12\, G(a_j)\biggr] + \text{RHS of \eqref{1.13} for $J^{(\ell)}$} \\ &\geq \sum_{j=0}^\ell \tfrac14\, b_j^2 + \tfrac12\, G(a_j) \end{align*} since the right-hand side of \eqref{1.13} is always nonnegative. Taking $\ell\to\infty$, we find \[ \text{RHS of \eqref{1.13}} \geq \text{LHS of \eqref{1.13}} \] \end{proof} \section{Remarks} \lb{s5} The ideas of this paper also provide a proof of Theorem~9.14 of \cite{KS} that when $J-J_0$ is trace class, we have that the Jost function $u(z;J)$ has no singular inner component. This proof avoids Lemma~9.13 and its several pages of argument (plus the need to prove (2.50) of \cite{KS}). For, as in \cite{KS}, $u(z;J)$ is a Nevanlinna function, so it has a factorization \eqref{1.13} \begin{equation} \lb{5.1} u=B_0 O_0 S_0 \end{equation} Similarly, $u_1 \equiv u(z;J)$ has a factorization \begin{equation} \lb{5.2} u_1 = B_1 O_1 S_1 \end{equation} But the $m$-function obeys \[ a_1 M(z) = \f{z u_1 (z;J)}{u_0 (z;J)} \] Recognizing that $B_1/B_0$ is exactly the product $zB_+ B_-$ for the canonical product for $M$ and that this product has no singular part, we see that \begin{equation} \lb{5.3} S_0 = S_1 \end{equation} Let \begin{equation} \lb{5.4} u_n = B_n O_n S_n \end{equation} be the Nevanlinna factorization for $u(z; J^{(n)})$. Since $J$ is trace class, $B_n\to 1$, and using estimates from Section~2 of \cite{KS}, $u_n\to 1$, $O_n\to 1$. It follows that $S_n\to 1$. But $S_0 = S_1 =\cdots = S_n$ so $S_0\equiv 1$. This theme will be further pursued in \cite{DaS}. The second topic concerns analogs of Theorem~\ref{T1.2} in the case of orthogonal polynomials on the unit circle \cite{Ger,Szego2,OPUC,Szego1}. If $d\mu =\f{w(\theta)}{2\pi}d\theta +d\mu_\s$ is a measure on $\partial\bbD$, $w(\theta)$ is the analog of $\Ima M(e^{i\theta})$. Just as $d\mu$ in the real line case is associated to a set of Jacobi parameters, $a_n >0$ and $b_n\in\bbR$, $d\mu$ in the circle case is associated to a sequence $\{\alpha_0, \alpha_1, \alpha_2, \dots \}$ of complex Verblunsky coefficients (also called Schur parameters or reflection coefficients). There is a measure, $d\mu_1$, associated with Verblunsky coefficients $\{\alpha_1, \alpha_2, \dots\}$ analogous to $\mu_{J^{(1)}}$ in the real line case and a weight $w_1$. There is a natural analog of the $M$-function, viz.,~the Carath\'eodory function \[ F(z) = \int \f{e^{i\theta}+z}{e^{i\theta}-z}\, d\mu (e^{i\theta}) \] and $w(\theta) =\lim_{r\uparrow 1} \Real F(re^{i\theta})$. Alas, there is nothing as simple as \eqref{3.10} and no sense in which boundary values of $\abs{F(re^{i\theta})}$ are related to $w(\theta)/w_1 (\theta)$. $F$ is simply not the proper analog of $M$ for single-step sum rules. Instead, the function that replaces $a_1M$ in the step-by-step sum rules for this case is \begin{equation} \lb{5.5} (\delta_0 D)(z) = \f{1-\alpha_0 f}{\rho_0} \, \f{1-zf_1}{1-zf} \end{equation} where $f$ is the Schur function for $d\mu$, that is, $F=(1+zf)/(1-zf)$, $f_1$ is the Schur function of $d\mu_1$, and $\rho_0=(1-\abs{\alpha_0}^2)^{1/2}$. This $\delta_0 D$ is an outer function and that produces suitable step-by-step sum rules. The details will appear in \cite{OPUC}. \medskip %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{thebibliography}{100} %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \smallskip % \bi{DaS} D. Damanik and B. Simon, {\it Jost functions and Jost solutions for Jacobi matrices}, in preparation. % \bi{Ger} Ya. L. Geronimus, \textit{Polynomials Orthogonal on a Circle and Their Applications}, Amer. Math. Soc. Translation {\bf 1954} (1954), no.~104, 79 pp. % \bi{GS} F. Gesztesy and B. Simon, {\it On the determination of a potential from three spectra}, Amer. Math. Soc. Transl. (2) {\bf 189} (1999), 85--92. \bi{Szego2} U. Grenander and G. Szeg\H{o}, \textit{Toeplitz Forms and Their Applications}, 2nd edition, Chelsea, New York, 1984; 1st edition, University of California Press, Berkeley-Los Angeles, 1958. % \bi{KS} R. Killip and B. Simon, {\it Sum rules for Jacobi matrices and their applications to spectral theory}, to appear in Ann. of Math. % \bi{Levin} B. Ja. Levin, \textit{Distribution of Zeros of Entire Functions}, revised edition, American Mathematical Society, Providence, RI, 1980. % \bi{Rudin} W. Rudin, \textit{Real and Complex Analysis}, 3rd edition, McGraw-Hill, New York, 1987. % \bi{OPUC} B. Simon, \textit{Orthogonal Polynomials on the Unit Circle}, AMS Book Series, expected 2004. % \bi{SZ} B. Simon and A. Zlato\v{s}, {\it Sum rules and the Szeg\H{o} condition for orthogonal polynomials on the real line}, preprint. % \bi{Szego1} G. Szeg\H{o}, \textit{Orthogonal Polynomials}, Amer. Math. Soc. Colloq. Publ., Vol. 23, American Mathematical Society, Providence, RI, 1939; 3rd edition, 1967. % \end{thebibliography} \end{document} ---------------0304161313455--