question
stringclasses 1
value | answer
stringlengths 0
6.48M
|
|---|---|
/**
* ScriptDev2 is an extension for mangos providing enhanced features for
* area triggers, creatures, game objects, instances, items, and spells beyond
* the default database scripting in mangos.
*
* Copyright (C) 2006-2013 ScriptDev2 <http://www.scriptdev2.com/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* World of Warcraft, and all World of Warcraft or Warcraft art, images,
* and lore are copyrighted by Blizzard Entertainment, Inc.
*/
/**
* ScriptData
* SDName: bug_trio
* SD%Complete: 75
* SDComment: Summon Player spell NYI; Poison Cloud damage spell NYI; Timers need adjustments
* SDCategory: Temple of Ahn'Qiraj
* EndScriptData
*/
#include "precompiled.h"
#include "temple_of_ahnqiraj.h"
enum
{
// kri
SPELL_CLEAVE = 26350,
SPELL_TOXIC_VOLLEY = 25812,
SPELL_SUMMON_CLOUD = 26590, // summons 15933
// vem
SPELL_CHARGE = 26561,
SPELL_VENGEANCE = 25790,
SPELL_KNOCKBACK = 26027,
// yauj
SPELL_HEAL = 25807,
SPELL_FEAR = 26580,
NPC_YAUJ_BROOD = 15621
};
struct MANGOS_DLL_DECL boss_kriAI : public ScriptedAI
{
boss_kriAI(Creature* pCreature) : ScriptedAI(pCreature)
{
m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData();
Reset();
}
ScriptedInstance* m_pInstance;
uint32 m_uiCleaveTimer;
uint32 m_uiToxicVolleyTimer;
void Reset() override
{
m_uiCleaveTimer = urand(4000, 8000);
m_uiToxicVolleyTimer = urand(6000, 12000);
}
void JustDied(Unit* /*pKiller*/) override
{
// poison cloud on death
DoCastSpellIfCan(m_creature, SPELL_SUMMON_CLOUD, CAST_TRIGGERED);
if (!m_pInstance)
{
return;
}
// If the other 2 bugs are still alive, make unlootable
if (m_pInstance->GetData(TYPE_BUG_TRIO) != DONE)
{
m_creature->RemoveFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE);
m_pInstance->SetData(TYPE_BUG_TRIO, SPECIAL);
}
}
void JustReachedHome() override
{
if (m_pInstance)
{
m_pInstance->SetData(TYPE_BUG_TRIO, FAIL);
}
}
void UpdateAI(const uint32 uiDiff) override
{
// Return since we have no target
if (!m_creature->SelectHostileTarget() || !m_creature->getVictim())
{
return;
}
// Cleave_Timer
if (m_uiCleaveTimer < uiDiff)
{
if (DoCastSpellIfCan(m_creature->getVictim(), SPELL_CLEAVE) == CAST_OK)
{
m_uiCleaveTimer = urand(5000, 12000);
}
}
else
{ m_uiCleaveTimer -= uiDiff; }
// ToxicVolley_Timer
if (m_uiToxicVolleyTimer < uiDiff)
{
if (DoCastSpellIfCan(m_creature, SPELL_TOXIC_VOLLEY) == CAST_OK)
{
m_uiToxicVolleyTimer = urand(10000, 15000);
}
}
else
{ m_uiToxicVolleyTimer -= uiDiff; }
DoMeleeAttackIfReady();
}
};
struct MANGOS_DLL_DECL boss_vemAI : public ScriptedAI
{
boss_vemAI(Creature* pCreature) : ScriptedAI(pCreature)
{
m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData();
Reset();
}
ScriptedInstance* m_pInstance;
uint32 m_uiChargeTimer;
uint32 m_uiKnockBackTimer;
void Reset() override
{
m_uiChargeTimer = urand(15000, 27000);
m_uiKnockBackTimer = urand(8000, 20000);
}
void JustDied(Unit* /*pKiller*/) override
{
// Enrage the other bugs
DoCastSpellIfCan(m_creature, SPELL_VENGEANCE, CAST_TRIGGERED);
if (!m_pInstance)
{
return;
}
// If the other 2 bugs are still alive, make unlootable
if (m_pInstance->GetData(TYPE_BUG_TRIO) != DONE)
{
m_creature->RemoveFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE);
m_pInstance->SetData(TYPE_BUG_TRIO, SPECIAL);
}
}
void JustReachedHome() override
{
if (m_pInstance)
{
m_pInstance->SetData(TYPE_BUG_TRIO, FAIL);
}
}
void UpdateAI(const uint32 uiDiff) override
{
// Return since we have no target
if (!m_creature->SelectHostileTarget() || !m_creature->getVictim())
{
return;
}
// Charge_Timer
if (m_uiChargeTimer < uiDiff)
{
if (Unit* pTarget = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM, 0))
{
if (DoCastSpellIfCan(pTarget, SPELL_CHARGE) == CAST_OK)
{
m_uiChargeTimer = urand(8000, 16000);
}
}
}
else
{ m_uiChargeTimer -= uiDiff; }
// KnockBack_Timer
if (m_uiKnockBackTimer < uiDiff)
{
if (DoCastSpellIfCan(m_creature, SPELL_KNOCKBACK) == CAST_OK)
{
if (m_creature->GetThreatManager().getThreat(m_creature->getVictim()))
{
m_creature->GetThreatManager().modifyThreatPercent(m_creature->getVictim(), -80);
}
m_uiKnockBackTimer = urand(15000, 25000);
}
}
else
{ m_uiKnockBackTimer -= uiDiff; }
DoMeleeAttackIfReady();
}
};
struct MANGOS_DLL_DECL boss_yaujAI : public ScriptedAI
{
boss_yaujAI(Creature* pCreature) : ScriptedAI(pCreature)
{
m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData();
Reset();
}
ScriptedInstance* m_pInstance;
uint32 m_uiHealTimer;
uint32 m_uiFearTimer;
void Reset() override
{
m_uiHealTimer = urand(25000, 40000);
m_uiFearTimer = urand(12000, 24000);
}
void JustDied(Unit* /*Killer*/) override
{
// Spawn 10 yauj brood on death
float fX, fY, fZ;
for (int i = 0; i < 10; ++i)
{
m_creature->GetRandomPoint(m_creature->GetPositionX(), m_creature->GetPositionY(), m_creature->GetPositionZ(), 10.0f, fX, fY, fZ);
m_creature->SummonCreature(NPC_YAUJ_BROOD, fX, fY, fZ, 0.0f, TEMPSUMMON_TIMED_OOC_DESPAWN, 30000);
}
if (!m_pInstance)
{
return;
}
// If the other 2 bugs are still alive, make unlootable
if (m_pInstance->GetData(TYPE_BUG_TRIO) != DONE)
{
m_creature->RemoveFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE);
m_pInstance->SetData(TYPE_BUG_TRIO, SPECIAL);
}
}
void JustReachedHome() override
{
if (m_pInstance)
{
m_pInstance->SetData(TYPE_BUG_TRIO, FAIL);
}
}
void UpdateAI(const uint32 uiDiff) override
{
// Return since we have no target
if (!m_creature->SelectHostileTarget() || !m_creature->getVictim())
{
return;
}
// Fear_Timer
if (m_uiFearTimer < uiDiff)
{
if (DoCastSpellIfCan(m_creature, SPELL_FEAR) == CAST_OK)
{
DoResetThreat();
m_uiFearTimer = 20000;
}
}
else
{ m_uiFearTimer -= uiDiff; }
// Heal
if (m_uiHealTimer < uiDiff)
{
if (Unit* pTarget = DoSelectLowestHpFriendly(100.0f))
{
if (DoCastSpellIfCan(pTarget, SPELL_HEAL) == CAST_OK)
{
m_uiHealTimer = urand(15000, 30000);
}
}
}
else
{ m_uiHealTimer -= uiDiff; }
DoMeleeAttackIfReady();
}
};
CreatureAI* GetAI_boss_yauj(Creature* pCreature)
{
return new boss_yaujAI(pCreature);
}
CreatureAI* GetAI_boss_vem(Creature* pCreature)
{
return new boss_vemAI(pCreature);
}
CreatureAI* GetAI_boss_kri(Creature* pCreature)
{
return new boss_kriAI(pCreature);
}
void AddSC_bug_trio()
{
Script* pNewScript;
pNewScript = new Script;
pNewScript->Name = "boss_kri";
pNewScript->GetAI = &GetAI_boss_kri;
pNewScript->RegisterSelf();
pNewScript = new Script;
pNewScript->Name = "boss_vem";
pNewScript->GetAI = &GetAI_boss_vem;
pNewScript->RegisterSelf();
pNewScript = new Script;
pNewScript->Name = "boss_yauj";
pNewScript->GetAI = &GetAI_boss_yauj;
pNewScript->RegisterSelf();
}
|
|
---
abstract: 'The aim of this paper is to establish a global asymptotic equivalence between the experiments generated by the discrete (high frequency) or continuous observation of a path of a Lévy process and a Gaussian white noise experiment observed up to a time $T$, with $T$ tending to $\infty$. These approximations are given in the sense of the Le Cam distance, under some smoothness conditions on the unknown Lévy density. All the asymptotic equivalences are established by constructing explicit Markov kernels that can be used to reproduce one experiment from the other.'
address:
- '*Laboratoire LJK, Université Joseph Fourier UMR 5224 51, Rue des Mathématiques, Saint Martin d’Hères BP 53 38041 Grenoble Cedex 09*'
- 'Corresponding Author, [email protected]'
author:
- Ester Mariucci
bibliography:
- 'refs.bib'
title: Asymptotic equivalence for pure jump Lévy processes with unknown Lévy density and Gaussian white noise
---
Nonparametric experiments,Le Cam distance,asymptotic equivalence,Lévy processes. 62B15,(62G20,60G51).
Introduction
============
Lévy processes are a fundamental tool in modelling situations, like the dynamics of asset prices and weather measurements, where sudden changes in values may happen. For that reason they are widely employed, among many other fields, in mathematical finance. To name a simple example, the price of a commodity at time $t$ is commonly given as an exponential function of a Lévy process. In general, exponential Lévy models are proposed for their ability to take into account several empirical features observed in the returns of assets such as heavy tails, high-kurtosis and asymmetry (see [@tankov] for an introduction to financial applications).
From a mathematical point of view, Lévy processes are a natural extension of the Brownian motion which preserves the tractable statistical properties of its increments, while relaxing the continuity of paths. The jump dynamics of a Lévy process is dictated by its Lévy density, say $f$. If $f$ is continuous, its value at a point $x_0$ determines how frequent jumps of size close to $x_0$ are to occur per unit time. Concretely, if $X$ is a pure jump Lévy process with Lévy density $f$, then the function $f$ is such that $$\int_Af(x)dx=\frac{1}{t}{\ensuremath {\mathbb{E}}}\bigg[\sum_{s\leq t}{\ensuremath {\mathbb{I}}}_A(\Delta X_s)\bigg],$$ for any Borel set $A$ and $t>0$. Here, $\Delta X_s\equiv X_s-X_{s^-}$ denotes the magnitude of the jump of $X$ at time $s$ and ${\ensuremath {\mathbb{I}}}_A$ is the characteristic function. Thus, the Lévy measure $$\nu(A):=\int_A f(x)dx,$$ is the average number of jumps (per unit time) whose magnitudes fall in the set $A$. Understanding the jumps behavior, therefore requires to estimate the Lévy measure. Several recent works have treated this problem, see e.g. [@bel15] for an overview.
When the available data consists of the whole trajectory of the process during a time interval $[0,T]$, the problem of estimating $f$ may be reduced to estimating the intensity function of an inhomogeneous Poisson process (see, e.g. [@fig06; @rey03]). However, a continuous-time sampling is never available in practice and thus the relevant problem is that of estimating $f$ based on discrete sample data $X_{t_0},\dots,X_{t_n}$ during a time interval $[0,T_n]$. In that case, the jumps are latent (unobservable) variables and that clearly adds to the difficulty of the problem. From now on we will place ourselves in a high-frequency setting, that is we assume that the sampling interval $\Delta_n=t_i-t_{i-1}$ tends to zero as $n$ goes to infinity. Such a high-frequency based statistical approach has played a central role in the recent literature on nonparametric estimation for Lévy processes (see e.g. [@fig09; @comte10; @comte11; @bec12; @duval12]). Moreover, in order to make consistent estimation possible, we will also ask the observation time $T_n$ to tend to infinity in order to allow the identification of the jump part in the limit.
Our aim is to prove that, under suitable hypotheses, estimating the Lévy density $f$ is equivalent to estimating the drift of an adequate Gaussian white noise model. In general, asymptotic equivalence results for statistical experiments provide a deeper understanding of statistical problems and allow to single out their main features. The idea is to pass via asymptotic equivalence to another experiment which is easier to analyze. By definition, two sequences of experiments ${\ensuremath {\mathscr{P}}}_{1,n}$ and ${\ensuremath {\mathscr{P}}}_{2,n}$, defined on possibly different sample spaces, but with the same parameter set, are asymptotically equivalent if the Le Cam distance $\Delta({\ensuremath {\mathscr{P}}}_{1,n},{\ensuremath {\mathscr{P}}}_{2,n})$ tends to zero. For ${\ensuremath {\mathscr{P}}}_{i}=({\ensuremath {\mathscr{X}}}_i,{\ensuremath {\mathscr{A}}}_i, \big(P_{i,\theta}:\theta\in\Theta)\big)$, $i=1,2$, $\Delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)$ is the symmetrization of the deficiency $\delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)$ where $$\delta({\ensuremath {\mathscr{P}}}_{1},{\ensuremath {\mathscr{P}}}_{2})=\inf_K\sup_{\theta\in\Theta}\big\|KP_{1,\theta}-P_{2,\theta}\big\|_{TV}.$$ Here the infimum is taken over all randomizations from $({\ensuremath {\mathscr{X}}}_1,{\ensuremath {\mathscr{A}}}_1)$ to $({\ensuremath {\mathscr{X}}}_2,{\ensuremath {\mathscr{A}}}_2)$ and $\| \cdot \|_{TV}$ denotes the total variation distance. Roughly speaking, the Le Cam distance quantifies how much one fails to reconstruct (with the help of a randomization) a model from the other one and vice versa. Therefore, we say that $\Delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)=0$ can be interpreted as “the models ${\ensuremath {\mathscr{P}}}_1$ and ${\ensuremath {\mathscr{P}}}_2$ contain the same amount of information about the parameter $\theta$.” The general definition of randomization is quite involved but, in the most frequent examples (namely when the sample spaces are Polish and the experiments dominated), it reduces to that of a Markov kernel. One of the most important feature of the Le Cam distance is that it can be also interpreted in terms of statistical decision theory (see [@lecam; @LC2000]; a short review is presented in the Appendix). As a consequence, saying that two statistical models are equivalent means that any statistical inference procedure can be transferred from one model to the other in such a way that the asymptotic risk remains the same, at least for bounded loss functions. Also, as soon as two models, ${\ensuremath {\mathscr{P}}}_{1,n}$ and ${\ensuremath {\mathscr{P}}}_{2,n}$, that share the same parameter space $\Theta$ are proved to be asymptotically equivalent, the same result automatically holds for the restrictions of both ${\ensuremath {\mathscr{P}}}_{1,n}$ and ${\ensuremath {\mathscr{P}}}_{2,n}$ to a smaller subclass of $\Theta$.
Historically, the first results of asymptotic equivalence in a nonparametric context date from 1996 and are due to [@BL] and [@N96]. The first two authors have shown the asymptotic equivalence of nonparametric regression and a Gaussian white noise model while the third one those of density estimation and white noise. Over the years many generalizations of these results have been proposed such as [@regression02; @GN2002; @ro04; @C2007; @cregression; @R2008; @C2009; @R2013; @schmidt14] for nonparametric regression or [@cmultinomial; @j03; @BC04] for nonparametric density estimation models. Another very active field of study is that of diffusion experiments. The first result of equivalence between diffusion models and Euler scheme was established in 1998, see [@NM]. In later papers generalizations of this result have been considered (see [@C14; @esterdiffusion]). Among others we can also cite equivalence results for generalized linear models [@GN], time series [@GN2006; @NM], diffusion models [@D; @CLN; @R2006; @rmultidimensionale], GARCH model [@B], functional linear regression [@M2011], spectral density estimation [@GN2010] and volatility estimation [@R11]. Negative results are somewhat harder to come by; the most notable among them are [@sam96; @B98; @wang02]. There is however a lack of equivalence results concerning processes with jumps. A first result in this sense is [@esterESAIM] in which global asymptotic equivalences between the experiments generated by the discrete or continuous observation of a path of a Lévy process and a Gaussian white noise experiment are established. More precisely, in that paper, we have shown that estimating the drift function $h$ from a continuously or discretely (high frequency) time inhomogeneous jump-diffusion process: $$\label{ch4X}
X_t=\int_0^th(s)ds+\int_0^t\sigma(s)dW_s +\sum_{i=1}^{N_t}Y_i,\quad t\in[0,T_n],$$ is asymptotically equivalent to estimate $h$ in the Gaussian model: $$ dy_t=h(t)dt+\sigma(t)dW_t, \quad t\in[0,T_n].$$
Here we try to push the analysis further and we focus on the case in which the considered parameter is the Lévy density and $X=(X_t)$ is a pure jump Lévy process (see [@carr02] for the interest of such a class of processes when modelling asset returns). More in details, we consider the problem of estimating the Lévy density (with respect to a fixed, possibly infinite, Lévy measure $\nu_0$ concentrated on $I\subseteq {\ensuremath {\mathbb{R}}}$) $f:=\frac{d\nu}{d\nu_0}:I\to {\ensuremath {\mathbb{R}}}$ from a continuously or discretely observed pure jump Lévy process $X$ with possibly infinite Lévy measure. Here $I\subseteq {\ensuremath {\mathbb{R}}}$ denotes a possibly infinite interval and $\nu_0$ is supposed to be absolutely continuous with respect to Lebesgue with a strictly positive density $g:=\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}}$. In the case where $\nu$ is of finite variation one may write: $$\label{eqn:ch4Levy}
X_t=\sum_{0<s\leq t}\Delta X_s$$ or, equivalently, $X$ has a characteristic function given by: $${\ensuremath {\mathbb{E}}}\big[e^{iuX_t}\big]=\exp\bigg(-t\bigg(\int_{I}(1-e^{iuy})\nu(dy)\bigg)\bigg).$$
We suppose that the function $f$ belongs to some a priori set ${\ensuremath {\mathscr{F}}}$, nonparametric in general. The discrete observations are of the form $X_{t_i}$, where $t_i=T_n\frac{i}{n}$, $i=0,\dots,n$ with $T_n=n\Delta_n\to \infty$ and $\Delta_n\to 0$ as $n$ goes to infinity. We will denote by ${\ensuremath {\mathscr{P}}}_n^{\nu_0}$ the statistical model associated with the continuous observation of a trajectory of $X$ until time $T_n$ (which is supposed to go to infinity as $n$ goes to infinity) and by ${\ensuremath {\mathscr{Q}}}_n^{\nu_0}$ the one associated with the observation of the discrete data $(X_{t_i})_{i=0}^n$. The aim of this paper is to prove that, under adequate hypotheses on ${\ensuremath {\mathscr{F}}}$ (for example, $f$ must be bounded away from zero and infinity; see Section \[subsec:ch4parameter\] for a complete definition), the models ${\ensuremath {\mathscr{P}}}_n^{\nu_0}$ and ${\ensuremath {\mathscr{Q}}}_n^{\nu_0}$ are both asymptotically equivalent to a sequence of Gaussian white noise models of the form: $$dy_t=\sqrt{f(t)}dt+\frac{1}{2\sqrt{T_n}}\frac{dW_t}{\sqrt{g(t)}},\quad t\in I.$$ As a corollary, we then get the asymptotic equivalence between ${\ensuremath {\mathscr{P}}}_n^{\nu_0}$ and ${\ensuremath {\mathscr{Q}}}_n^{\nu_0}$. The main results are precisely stated as Theorems \[ch4teo1\] and \[ch4teo2\]. A particular case of special interest arises when $X$ is a compound Poisson process, $\nu_0\equiv {\ensuremath{\textnormal{Leb}}}([0,1])$ and ${\ensuremath {\mathscr{F}}}\subseteq {\ensuremath {\mathscr{F}}}_{(\gamma,K,\kappa,M)}^I$ where, for fixed $\gamma\in (0,1]$ and $K,\kappa, M$ strictly positive constants, ${\ensuremath {\mathscr{F}}}_{(\gamma,K,\kappa,M)}^I$ is a class of continuously differentiable functions on $I$ defined as follows: $$\label{ch4:fholder}
{\ensuremath {\mathscr{F}}}_{(\gamma,K,\kappa,M)}^I=\Big\{f: \kappa\leq f(x)\leq M, \ |f'(x)-f'(y)|\leq K|x-y|^{\gamma},\ \forall x,y\in I\Big\}.$$ In this case, the statistical models ${\ensuremath {\mathscr{P}}}_n^{\nu_0}$ and ${\ensuremath {\mathscr{Q}}}_n^{\nu_0}$ are both equivalent to the Gaussian white noise model: $$dy_t=\sqrt{f(t)}dt+\frac{1}{2\sqrt{T_n}}dW_t,\quad t\in [0,1].$$ See Example \[ex:ch4CPP\] for more details. By a theorem of Brown and Low in [@BL], we obtain, a posteriori, an asymptotic equivalence with the regression model $$Y_i=\sqrt{f\Big(\frac{i}{T_n}\Big)}+\frac{1}{2\sqrt{T_n}}\xi_i, \quad \xi_i\sim{\ensuremath {\mathscr{Nn}}}(0,1), \quad i=1,\dots, [T_n].$$ Note that a similar form of a Gaussian shift was found to be asymptotically equivalent to a nonparametric density estimation experiment, see [@N96]. Let us mention that we also treat some explicit examples where $\nu_0$ is neither finite nor compactly-supported (see Examples \[ch4ex2\] and \[ex3\]). Without entering into any detail, we remark here that the methods are very different from those in [@esterESAIM]. In particular, since $f$ belongs to the discontinuous part of a Lévy process, rather then its continuous part, the Girsanov-type changes of measure are irrelevant here. We thus need new instruments, like the Esscher changes of measure.
Our proof is based on the construction, for any given Lévy measure $\nu$, of two adequate approximations $\hat \nu_m$ and $\bar \nu_m$ of $\nu$: the idea of discretizing the Lévy density already appeared in an earlier work with P. Étoré and S. Louhichi, [@etore13]. The present work is also inspired by the papers [@cmultinomial] (for a multinomial approximation), [@BC04] (for passing from independent Poisson variables to independent normal random variables) and [@esterESAIM] (for a Bernoulli approximation). This method allows us to construct explicit Markov kernels that lead from one model to the other; these may be applied in practice to transfer minimax estimators.
The paper is organized as follows: Sections \[subsec:ch4parameter\] and \[subsec:ch4experiments\] are devoted to make the parameter space and the considered statistical experiments precise. The main results are given in Section \[subsec:ch4mainresults\], followed by Section \[sec:ch4experiments\] in which some examples can be found. The proofs are postponed to Section \[sec:ch4proofs\]. The paper includes an Appendix recalling the definition and some useful properties of the Le Cam distance as well as of Lévy processes.
Assumptions and main results
============================
The parameter space {#subsec:ch4parameter}
-------------------
Consider a (possibly infinite) Lévy measure $\nu_0$ concentrated on a possibly infinite interval $I\subseteq{\ensuremath {\mathbb{R}}}$, admitting a density $g>0$ with respect to Lebesgue. The parameter space of the experiments we are concerned with is a class of functions ${\ensuremath {\mathscr{F}}}={\ensuremath {\mathscr{F}}}^{\nu_0,I}$ defined on $I$ that form a class of Lévy densities with respect to $\nu_0$: For each $f\in{\ensuremath {\mathscr{F}}}$, let $\nu$ (resp. $\hat \nu_m$) be the Lévy measure having $f$ (resp. $\hat f_m$) as a density with respect to $\nu_0$ where, for every $f\in{\ensuremath {\mathscr{F}}}$, $\hat f_m(x)$ is defined as follows.
Suppose first $x>0$. Given a positive integer depending on $n$, $m=m_n$, let $J_j:=(v_{j-1},v_j]$ where $v_1=\varepsilon_m\geq 0$ and $v_j$ are chosen in such a way that $$\label{eq:ch4Jj}
\mu_m:=\nu_0(J_j)=\frac{\nu_0\big((I\setminus[0,\varepsilon_m])\cap {\ensuremath {\mathbb{R}}}_+\big)}{m-1},\quad \forall j=2,\dots,m.$$ In the sequel, for the sake of brevity, we will only write $m$ without making explicit the dependence on $n$. Define $x_j^*:=\frac{\int_{J_j}x\nu_0(dx)}{\mu_m}$ and introduce a sequence of functions $0\leq V_j\leq \frac{1}{\mu_m}$, $j=2,\dots,m$ supported on $[x_{j-1}^*, x_{j+1}^*]$ if $j=3,\dots,m-1$, on $[\varepsilon_m, x_3^*]$ if $j=2$ and on $(I\setminus [0,x_{m-1}^*])\cap {\ensuremath {\mathbb{R}}}_+$ if $j=m$. The $V_j$’s are defined recursively in the following way.
- $V_2$ is equal to $\frac{1}{\mu_m}$ on the interval $(\varepsilon_m, x_2^*]$ and on the interval $(x_2^*,x_3^*]$ it is chosen so that it is continuous (in particular, $V_2(x_2^*)=\frac{1}{\mu_m}$), $\int_{x_2^*}^{x_3^*}V_2(y)\nu_0(dy)=\frac{\nu_0((x_2^*, v_2])}{\mu_m}$ and $V_2(x_3^*)=0$.
- For $j=3,\dots,m-1$ define $V_j$ as the function $\frac{1}{\mu_m}-V_{j-1}$ on the interval $[x_{j-1}^*,x_j^*]$. On $[x_j^*,x_{j+1}^*]$ choose $V_j$ continuous and such that $\int_{x_j^*}^{x_{j+1}^*}V_j(y)\nu_0(dy)=\frac{\nu_0((x_j^*,v_j])}{\mu_m}$ and $V_j(x_{j+1}^*)=0$.
- Finally, let $V_m$ be the function supported on $(I\setminus [0,x_{m-1}^*]) \cap {\ensuremath {\mathbb{R}}}_+$ such that $$\begin{aligned}
V_m(x)&=\frac{1}{\mu_m}-V_{m-1}(x), \quad\text{for } x \in [x_{m-1}^*,x_m^*],\\
V_m(x)&=\frac{1}{\mu_m}, \quad\text{for } x \in (I\setminus [0,x_m^*])\cap {\ensuremath {\mathbb{R}}}_+.\end{aligned}$$
(It is immediate to check that such a choice is always possible). Observe that, by construction, $$\sum_{j=2}^m V_j(x)\mu_m=1, \quad \forall x\in (I\setminus[0,\varepsilon_m])\cap {\ensuremath {\mathbb{R}}}_+ \quad \textnormal{and} \quad \int_{(I\setminus[0,\varepsilon_m])\cap {\ensuremath {\mathbb{R}}}_+}V_j(y)\nu_0(dy)=1.$$
Analogously, define $\mu_m^-=\frac{\nu_0\big((I\setminus[-\varepsilon_m,0])\cap {\ensuremath {\mathbb{R}}}_-\big)}{m-1}$ and $J_{-m},\dots,J_{-2}$ such that $\nu_0(J_{-j})=\mu_m^-$ for all $j$. Then, for $x<0$, $x_{-j}^*$ is defined as $x_j^*$ by using $J_{-j}$ and $\mu_m^-$ instead of $J_j$ and $\mu_m$ and the $V_{-j}$’s are defined with the same procedure as the $V_j$’s, starting from $V_{-2}$ and proceeding by induction.
Define $$\label{eq:ch4hatf}
\hat f_m(x)={\ensuremath {\mathbb{I}}}_{[-\varepsilon_m,\varepsilon_m]}(x)+\sum_{j=2}^m \bigg(V_j(x)\int_{J_j} f(y)\nu_0(dy)+V_{-j}(x)\int_{J_{-j}} f(y)\nu_0(dy)\bigg).$$ The definitions of the $V_j$’s above are modeled on the following example:
\[ex:Vj\] Let $\nu_0$ be the Lebesgue measure on $[0,1]$ and $\varepsilon_m=0$. Then $v_j=\frac{j-1}{m-1}$ and $x_j^*=\frac{2j-3}{2m-2}$, $j=2,\dots,m$. The standard choice for $V_j$ (based on the construction by [@cmultinomial]) is given by the piecewise linear functions interpolating the values in the points $x_j^*$ specified above:
The function $\hat f_m$ has been defined in such a way that the rate of convergence of the $L_2$ norm between the restriction of $f$ and $\hat f_m$ on $I\setminus[-\varepsilon_m,\varepsilon_m]$ is compatible with the rate of convergence of the other quantities appearing in the statements of Theorems \[ch4teo1\] and \[ch4teo2\]. For that reason, as in [@cmultinomial], we have not chosen a piecewise constant approximation of $f$ but an approximation that is, at least in the simplest cases, a piecewise linear approximation of $f$. Such a choice allows us to gain an order of magnitude on the convergence rate of $\|f-\hat f_m\|_{L_2(\nu_0|{I\setminus{[-\varepsilon_m,\varepsilon_m]}})}$ at least when ${\ensuremath {\mathscr{F}}}$ is a class of sufficiently smooth functions.
We now explain the assumptions we will need to make on the parameter $f \in {\ensuremath {\mathscr{F}}}= {\ensuremath {\mathscr{F}}}^{\nu_0, I}$. The superscripts $\nu_0$ and $I$ will be suppressed whenever this can lead to no confusion. We require that:
1. There exist constants $\kappa, M >0$ such that $\kappa\leq f(y)\leq M$, for all $y\in I$ and $f\in {\ensuremath {\mathscr{F}}}$.
For every integer $m=m_n$, we can consider $\widehat{\sqrt{f}}_m$, the approximation of $\sqrt{f}$ constructed as $\hat f_m$ above, i.e. $\widehat{\sqrt{f}}_m(x)=\displaystyle{{\ensuremath {\mathbb{I}}}_{[-\varepsilon_m,\varepsilon_m]}(x)+\sum_{\substack{j=-m\dots,m\\ j\neq -1,0,1.}}V_j(x)\int_{J_j} \sqrt{f(y)}\nu_0(dy)}$, and introduce the quantities: $$\begin{aligned}
A_m^2(f)&:= \int_{I\setminus \big[-\varepsilon_m,\varepsilon_m\big]}\Big(\widehat{\sqrt {f}}_m(y)-\sqrt{f(y)}\Big)^2\nu_0(dy),\\
B_m^2(f)&:= \sum_{\substack{j=-m\dots,m\\ j\neq -1,0,1.}}\bigg(\int_{J_j}\frac{\sqrt{f(y)}}{\sqrt{\nu_0(J_j)}}\nu_0(dy)-\sqrt{\nu(J_j)}\bigg)^2,\\
C_m^2(f)&:= \int_{-\varepsilon_m}^{\varepsilon_m}\big(\sqrt{f(t)}-1\big)^2\nu_0(dt).
\end{aligned}$$ The conditions defining the parameter space ${\ensuremath {\mathscr{F}}}$ are expressed by asking that the quantities introduced above converge quickly enough to zero. To state the assumptions of Theorem \[ch4teo1\] precisely, we will assume the existence of sequences of discretizations $m = m_n\to\infty$, of positive numbers $\varepsilon_m=\varepsilon_{m_n}\to 0$ and of functions $V_j$, $j = \pm 2, \dots, \pm m$, such that:
1. \[cond:ch4hellinger\] $\lim\limits_{n \to \infty}n\Delta_n\sup\limits_{f \in{\ensuremath {\mathscr{F}}}}\displaystyle{\int_{I\setminus(-\varepsilon_m,\varepsilon_m)}}\Big(f(x)-\hat f_m(x)\Big)^2 \nu_0(dx) = 0$.
2. \[cond:ch4ABC\]$\lim\limits_{n \to \infty}n\Delta_n\sup\limits_{f \in{\ensuremath {\mathscr{F}}}} \big(A_m^2(f)+B_m^2(f)+C_m^2(f)\big)=0$.
Remark in particular that Condition (C\[cond:ch4ABC\]) implies the following:
1. $\displaystyle \sup_{f\in{\ensuremath {\mathscr{F}}}}\int_I (\sqrt{f(y)}-1)^2 \nu_0(dy) \leq L,$
where $L = \sup_{f \in {\ensuremath {\mathscr{F}}}} \int_{-\varepsilon_m}^{\varepsilon_m} (\sqrt{f(x)}-1)^2\nu_0(dx) + (\sqrt{M}+1)^2\nu_0\big(I\setminus (-\varepsilon_m, \varepsilon_m)\big)$, for any choice of $m$ such that the quantity in the limit appearing in Condition (C\[cond:ch4ABC\]) is finite.
Theorem \[ch4teo2\] has slightly stronger hypotheses, defining possibly smaller parameter spaces: We will assume the existence of sequences $m_n$, $\varepsilon_m$ and $V_j$, $j = \pm 2, \dots, \pm m$ (possibly different from the ones above) such that Condition (C1) is verified and the following stronger version of Condition (C2) holds:
1. $\lim\limits_{n \to \infty}n\Delta_n\sup\limits_{f \in{\ensuremath {\mathscr{F}}}} \big(A_m^2(f)+B_m^2(f)+nC_m^2(f)\big)=0$.
Finally, some of our results have a more explicit statement under the hypothesis of finite variation which we state as:
- $\int_I (|x|\wedge 1)\nu_0(dx)<\infty$.
The Condition (C1) and those involving the quantities $A_m(f)$ and $B_m(f)$ all concern similar but slightly different approximations of $f$. In concrete examples, they may all be expected to have the same rate of convergence but to keep the greatest generality we preferred to state them separately. On the other hand, conditions on the quantity $C_m(f)$ are purely local around zero, requiring the parameters $f$ to converge quickly enough to 1.
\[ex:ch4esempi\] To get a grasp on Conditions (C1), (C2) we analyze here three different examples according to the different behavior of $\nu_0$ near $0\in I$. In all of these cases the parameter space ${\ensuremath {\mathscr{F}}}^{\nu_0, I}$ will be a subclass of ${\ensuremath {\mathscr{F}}}_{(\gamma,K,\kappa,M)}^I$ defined as in . Recall that the conditions (C1), (C2) and (C2’) depend on the choice of sequences $m_n$, $\varepsilon_m$ and functions $V_j$. For the first two of the three examples, where $I = [0,1]$, we will make the standard choice for $V_j$ of triangular and trapezoidal functions, similarly to those in Example \[ex:Vj\]. Namely, for $j = 3, \dots, m-1$ we have $$\label{eq:ch4vj}
V_j(x) = {\ensuremath {\mathbb{I}}}_{(x_{j-1}^*, x_j^*]}(x) \frac{x-x_{j-1}^*}{x_j^*-x_{j-1}^*} \frac{1}{\mu_m} + {\ensuremath {\mathbb{I}}}_{(x_{j}^*, x_{j+1}^*]}(x) \frac{x_{j+1}^*-x}{x_{j+1}^*-x_{j}^*} \frac{1}{\mu_m};$$ the two extremal functions $V_2$ and $V_m$ are chosen so that $V_2 \equiv \frac{1}{\mu_m}$ on $(\varepsilon_m, x_2^*]$ and $V_m \equiv \frac{1}{\mu_m}$ on $(x_m^*, 1]$. In the second example, where $\nu_0$ is infinite, one is forced to take $\varepsilon_m > 0$ and to keep in mind that the $x_j^*$ are not uniformly distributed on $[\varepsilon_m,1]$. Proofs of all the statements here can be found in Section \[subsec:esempi\].
**1. The finite case:** $\nu_0\equiv {\ensuremath{\textnormal{Leb}}}([0,1])$.
In this case we are free to choose ${\ensuremath {\mathscr{F}}}^{{\ensuremath{\textnormal{Leb}}}, [0,1]} = {\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^{[0,1]}$. Indeed, as $\nu_0$ is finite, there is no need to single out the first interval $J_1=[0,\varepsilon_m]$, so that $C_m(f)$ does not enter in the proofs and the definitions of $A_m(f)$ and $B_m(f)$ involve integrals on the whole of $[0,1]$. Also, the choice of the $V_j$’s as in guarantees that $\int_0^1 V_j(x) dx = 1$. Then, the quantities $\|f-\hat f_m\|_{L_2([0,1])}$, $A_m(f)$ and $B_m(f)$ all have the same rate of convergence, which is given by: $$\sqrt{\int_0^1\Big(f(x)-\hat f_m(x)\Big)^2 \nu_0(dx)}+A_m(f)+B_m(f)=O\Big(m^{-\gamma-1}+m^{-\frac{3}{2}}\Big),$$ uniformly on $f$. See Section \[subsec:esempi\] for a proof.
**2. The finite variation case:** $\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}}(x)=x^{-1}{\ensuremath {\mathbb{I}}}_{[0,1]}(x)$.
In this case, the parameter space ${\ensuremath {\mathscr{F}}}^{\nu_0, [0,1]}$ is a proper subset of ${\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^{[0,1]}$. Indeed, as we are obliged to choose $\varepsilon_m > 0$, we also need to impose that $C_m(f) = o\big(\frac{1}{n\sqrt{\Delta_n}}\big)$, with uniform constants with respect to $f$, that is, that all $f \in {\ensuremath {\mathscr{F}}}$ converge to 1 quickly enough as $x \to 0$. Choosing $\varepsilon_m = m^{-1-\alpha}$, $\alpha> 0$ we have that $\mu_m=\frac{\ln (\varepsilon_m^{-1})}{m-1}$, $v_j =\varepsilon_m^{\frac{m-j}{m-1}}$ and $x_j^* =\frac{(v_{j}-v_{j-1})}{\mu_m}$. In particular, $\max_j|v_{j-1}-v_j|=|v_m-v_{m-1}|=O\Big(\frac{\ln m}{m}\Big)$. Also in this case one can prove that the standard choice of $V_j$ described above leads to $\int_{\varepsilon_m}^1 V_j(x) \frac{dx}{x} = 1$. Again, the quantities $\|f-\hat f_m\|_{L_2(\nu_0|{I\setminus{[0,\varepsilon_m]}})}$, $A_m(f)$ and $B_m(f)$ have the same rate of convergence given by: $$\label{eq:ch4ex2}
\sqrt{\int_{\varepsilon_m}^1\Big(f(x)-\hat f_m(x)\Big)^2 \nu_0(dx)} +A_m(f)+B_m(f)=O\bigg(\bigg(\frac{\ln m}{m}\bigg)^{\gamma+1} \sqrt{\ln (\varepsilon_m^{-1})}\bigg),$$ uniformly on $f$. The condition on $C_m(f)$ depends on the behavior of $f$ near $0$. For example, it is ensured if one considers a parametric family of the form $f(x)=e^{-\lambda x}$ with a bounded $\lambda > 0$. See Section \[subsec:esempi\] for a proof.
**3. The infinite variation, non-compactly supported case:** $\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}}(x)=x^{-2}{\ensuremath {\mathbb{I}}}_{{\ensuremath {\mathbb{R}}}_+}(x)$.
This example involves significantly more computations than the preceding ones, since the classical triangular choice for the functions $V_j$ would not have integral equal to 1 (with respect to $\nu_0$), and the support is not compact. The parameter space ${\ensuremath {\mathscr{F}}}^{\nu_0, [0, \infty)}$ can still be chosen as a proper subclass of ${\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^{[0,\infty)}$, again by imposing that $C_m(f)$ converges to zero quickly enough (more details about this condition are discussed in Example \[ex3\]). We divide the interval $[0, \infty)$ in $m$ intervals $J_j = [v_{j-1}, v_j)$ with: $$v_0 = 0; \quad v_1 = \varepsilon_m; \quad v_j = \frac{\varepsilon_m(m-1)}{m-j};\quad v_m = \infty; \quad \mu_m = \frac{1}{\varepsilon_m(m-1)}.$$ To deal with the non-compactness problem, we choose some “horizon” $H(m)$ that goes to infinity slowly enough as $m$ goes to infinity and we bound the $L_2$ distance between $f$ and $\hat f_m$ for $x > H(m)$ by $2\sup\limits_{x\geq H(m)}\frac{f(x)^2}{H(m)}$. We have: $$\|f-\hat f_m\|_{L_2(\nu_0|{I\setminus{[0,\varepsilon_m]}})}^2+A_m^2(f)+B_m^2(f)=O\bigg(\frac{H(m)^{3+4\gamma}}{(\varepsilon_m m)^{2+2\gamma}}+\sup_{x\geq H(m)}\frac{f(x)^2}{H(m)}\bigg).$$ In the general case where the best estimate for $\displaystyle{\sup_{x\geq H(m)}f(x)^2}$ is simply given by $M^2$, an optimal choice for $H(m)$ is $\sqrt{\varepsilon_m m}$, that gives a rate of convergence: $$\|f-\hat f_m\|_{L_2(\nu_0|{I\setminus{[0,\varepsilon_m]}})}^2+A_m^2(f)+B_m^2(f) =O\bigg( \frac{1}{\sqrt{\varepsilon_m m}}\bigg),$$ independently of $\gamma$. See Section \[subsec:esempi\] for a proof.
Definition of the experiments {#subsec:ch4experiments}
-----------------------------
Let $(x_t)_{t\geq 0}$ be the canonical process on the Skorokhod space $(D,{\ensuremath {\mathscr{D}}})$ and denote by $P^{(b,0,\nu)}$ the law induced on $(D,{\ensuremath {\mathscr{D}}})$ by a Lévy process with characteristic triplet $(b,0,\nu)$. We will write $P_t^{(b,0,\nu)}$ for the restriction of $P^{(b,0,\nu)}$ to the $\sigma$-algebra ${\ensuremath {\mathscr{D}}}_t$ generated by $\{x_s:0\leq s\leq t\}$ (see \[sec:ch4levy\] for the precise definitions). Let $Q_t^{(b,0,\nu)}$ be the marginal law at time $t$ of a Lévy process with characteristic triplet ${(b,0,\nu)}$. In the case where $\int_{|y|\leq 1}|y|\nu(dy)<\infty$ we introduce the notation $\gamma^{\nu}:=\int_{|y|\leq 1}y\nu(dy)$; then, Condition (H2) guarantees the finiteness of $\gamma^{\nu-\nu_0}$ (see Remark 33.3 in [@sato] for more details).
Recall that we introduced the discretization $t_i=T_n\frac{i}{n}$ of $[0,T_n]$ and denote by $\textbf Q_n^{(\gamma^{\nu-\nu_0},0,\nu)}$ the laws of the $n+1$ marginals of $(x_t)_{t\geq 0}$ at times $t_i$, $i=0,\dots,n$. We will consider the following statistical models, depending on a fixed, possibly infinite, Lévy measure $\nu_0$ concentrated on $I$ (clearly, the models with the subscript $FV$ are meaningful only under the assumption (FV)): $$\begin{aligned}
{\ensuremath {\mathscr{P}}}_{n,FV}^{\nu_0}&=\bigg(D,{\ensuremath {\mathscr{D}}}_{T_n},\Big\{P_{T_n}^{(\gamma^{\nu},0,\nu)}:f:=\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}}^{\nu_0,I}\Big\}\bigg),\\
{\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0}&=\bigg({\ensuremath {\mathbb{R}}}^{n+1},{\ensuremath {\mathscr{B}}}({\ensuremath {\mathbb{R}}}^{n+1}),\Big\{ \textbf Q_{n}^{(\gamma^{\nu},0,\nu)}:f:=\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}}^{\nu_0,I}\Big\}\bigg),\\
{\ensuremath {\mathscr{P}}}_{n}^{\nu_0}&=\bigg(D,{\ensuremath {\mathscr{D}}}_{T_n},\Big\{P_{T_n}^{(\gamma^{\nu-\nu_0},0,\nu)}:f:=\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}}^{\nu_0,I}\Big\}\bigg),\\
{\ensuremath {\mathscr{Q}}}_{n}^{\nu_0}&=\bigg({\ensuremath {\mathbb{R}}}^{n+1},{\ensuremath {\mathscr{B}}}({\ensuremath {\mathbb{R}}}^{n+1}),\Big\{\textbf Q_{n}^{(\gamma^{\nu-\nu_0},0,\nu)}:f:=\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}}^{\nu_0,I}\Big\}\bigg).
\end{aligned}$$ Finally, let us introduce the Gaussian white noise model that will appear in the statement of our main results. For that, let us denote by $(C(I),{\ensuremath {\mathscr{C}}})$ the space of continuous mappings from $I$ into ${\ensuremath {\mathbb{R}}}$ endowed with its standard filtration, by $g$ the density of $\nu_0$ with respect to the Lebesgue measure. We will require $g>0$ and let $\mathbb W_n^f$ be the law induced on $(C(I),{\ensuremath {\mathscr{C}}})$ by the stochastic process satisfying: $$\begin{aligned}
\label{eqn:ch4Wf}
dy_t=\sqrt{f(t)}dt+\frac{dW_t}{2\sqrt{T_n}\sqrt{g(t)}}, \quad t\in I,\end{aligned}$$ where $(W_t)_{t\in{\ensuremath {\mathbb{R}}}}$ denotes a Brownian motion on ${\ensuremath {\mathbb{R}}}$ with $W_0=0$. Then we set: $${\ensuremath {\mathscr{W}}}_n^{\nu_0}=\Big(C(I),{\ensuremath {\mathscr{C}}},\{\mathbb W_n^{f}:f\in{\ensuremath {\mathscr{F}}}^{\nu_0,I}\}\Big).$$ Observe that when $\nu_0$ is a finite Lévy measure, then ${\ensuremath {\mathscr{W}}}_n^{\nu_0}$ is equivalent to the statistical model associated with the continuous observation of a process $(\tilde y_t)_{t\in I}$ defined by: $$\begin{aligned}
d\tilde y_t=\sqrt{f(t)g(t)}dt+\frac{d W_t}{2\sqrt{T_n}}, \quad t\in I.\end{aligned}$$
Main results {#subsec:ch4mainresults}
------------
Using the notation introduced in Section \[subsec:ch4parameter\], we now state our main results. For brevity of notation, we will denote by $H(f,\hat f_m)$ (resp. $L_2(f,\hat f_m)$) the Hellinger distance (resp. the $L_2$ distance) between the Lévy measures $\nu$ and $\hat\nu_m$ restricted to $I\setminus{[-\varepsilon_m,\varepsilon_m]}$, i.e.: $$\begin{aligned}
H^2(f,\hat f_m)&:=\int_{I\setminus{[-\varepsilon_m,\varepsilon_m]}}\Big(\sqrt{f(x)}-\sqrt{\hat f_m(x)}\Big)^2 \nu_0(dx),\\
L_2(f,\hat f_m)^2&:=\int_{I\setminus{[-\varepsilon_m,\varepsilon_m]}}\big(f(y)-\hat f_m(y)\big)^2\nu_0(dy).\end{aligned}$$ Observe that Condition (H1) implies (see Lemma \[lemma:ch4hellinger\]) $$\frac{1}{4M}L_2(f,\hat f_m)^2\leq H^2(f,\hat f_m)\leq \frac{1}{4\kappa}L_2(f,\hat f_m)^2.$$
\[ch4teo1\] Let $\nu_0$ be a known Lévy measure concentrated on a (possibly infinite) interval $I\subseteq {\ensuremath {\mathbb{R}}}$ and having strictly positive density with respect to the Lebesgue measure. Let us choose a parameter space ${\ensuremath {\mathscr{F}}}^{\nu_0, I}$ such that there exist a sequence $m = m_n$ of integers, functions $V_j$, $j = \pm 2, \dots, \pm m$ and a sequence $\varepsilon_m \to 0$ as $m \to \infty$ such that Conditions [(H1), (C1), (C2)]{.nodecor} are satisfied for ${\ensuremath {\mathscr{F}}}= {\ensuremath {\mathscr{F}}}^{\nu_0, I}$. Then, for $n$ big enough we have: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{P}}}_n^{\nu_0}, {\ensuremath {\mathscr{W}}}_n^{\nu_0}) &= O\bigg(\sqrt{n\Delta_n}\sup_{f\in {\ensuremath {\mathscr{F}}}}\Big(A_m(f)+B_m(f)+C_m(f)\Big)\bigg) \nonumber \\
& +O\bigg(\sqrt{n\Delta_n}\sup_{f\in{\ensuremath {\mathscr{F}}}}L_2(f, \hat f_m)+\sqrt{\frac{m}{n\Delta_n}\Big(\frac{1}{\mu_m}+\frac{1}{\mu_m^-}\Big)}\bigg). \label{eq:teo1}\end{aligned}$$
\[ch4teo2\] Let $\nu_0$ be a known Lévy measure concentrated on a (possibly infinite) interval $I\subseteq {\ensuremath {\mathbb{R}}}$ and having strictly positive density with respect to the Lebesgue measure. Let us choose a parameter space ${\ensuremath {\mathscr{F}}}^{\nu_0, I}$ such that there exist a sequence $m = m_n$ of integers, functions $V_j$, $j = \pm 2, \dots, \pm m$ and a sequence $\varepsilon_m \to 0$ as $m \to \infty$ such that Conditions [(H1), (C1), (C2’)]{.nodecor} are satisfied for ${\ensuremath {\mathscr{F}}}= {\ensuremath {\mathscr{F}}}^{\nu_0, I}$. Then, for $n$ big enough we have: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{Q}}}_n^{\nu_0}, {\ensuremath {\mathscr{W}}}_n^{\nu_0})& = O\bigg( \nu_0\Big(I\setminus[-\varepsilon_m,\varepsilon_m]\Big)\sqrt{n\Delta_n^2}+\frac{m\ln m}{\sqrt{n}}+\sqrt{n\sqrt{\Delta_n}\sup_{f\in{\ensuremath {\mathscr{F}}}}C_m(f)}\bigg) \nonumber \\
&+O\bigg(\sqrt{n\Delta_n}\sup_{f\in{\ensuremath {\mathscr{F}}}}\Big(A_m(f)+B_m(f)+H(f,\hat f_m)\Big)\bigg).\label{eq:teo2}\end{aligned}$$
\[cor:ch4generale\] Let $\nu_0$ be as above and let us choose a parameter space ${\ensuremath {\mathscr{F}}}^{\nu_0, I}$ so that there exist sequences $m_n'$, $\varepsilon_m'$, $V_j'$ and $m_n''$, $\varepsilon_m''$, $V_j''$ such that:
- Conditions (H1), (C1) and (C2) hold for $m_n'$, $\varepsilon_m'$, $V_j'$, and $\frac{m'}{n\Delta_n}\Big(\frac{1}{\mu_{m'}}+\frac{1}{\mu_{m'}^-}\Big)$ tends to zero.
- Conditions (H1), (C1) and (C2’) hold for $m_n''$, $\varepsilon_m''$, $V_j''$, and $\nu_0\Big(I\setminus[-\varepsilon_{m''},\varepsilon_{m''}]\Big)\sqrt{n\Delta_n^2}+\frac{m''\ln m''}{\sqrt{n}}$ tends to zero.
Then the statistical models ${\ensuremath {\mathscr{P}}}_{n}^{\nu_0}$ and ${\ensuremath {\mathscr{Q}}}_{n}^{\nu_0}$ are asymptotically equivalent: $$\lim_{n\to\infty}\Delta({\ensuremath {\mathscr{P}}}_{n}^{\nu_0},{\ensuremath {\mathscr{Q}}}_{n}^{\nu_0})=0,$$
If, in addition, the Lévy measures have finite variation, i.e. if we assume (FV), then the same results hold replacing ${\ensuremath {\mathscr{P}}}_{n}^{\nu_0}$ and ${\ensuremath {\mathscr{Q}}}_{n}^{\nu_0}$ by ${\ensuremath {\mathscr{P}}}_{n,FV}^{\nu_0}$ and ${\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0}$, respectively (see Lemma \[ch4LC\]).
Examples {#sec:ch4experiments}
========
We will now analyze three different examples, underlining the different behaviors of the Lévy measure $\nu_0$ (respectively, finite, infinite with finite variation and infinite with infinite variation). The three chosen Lévy measures are ${\ensuremath {\mathbb{I}}}_{[0,1]}(x) dx$, ${\ensuremath {\mathbb{I}}}_{[0,1]}(x) \frac{dx}{x}$ and ${\ensuremath {\mathbb{I}}}_{{\ensuremath {\mathbb{R}}}_+}(x)\frac{dx}{x^2}$. In all three cases we assume the parameter $f$ to be uniformly bounded and with uniformly $\gamma$-Hölder derivatives: We will describe adequate subclasses ${\ensuremath {\mathscr{F}}}^{\nu_0, I} \subseteq {\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^I$ defined as in . It seems very likely that the same results that are highlighted in these examples hold true for more general Lévy measures; however, we limit ourselves to these examples in order to be able to explicitly compute the quantities involved ($v_j$, $x_j^*$, etc.) and hence estimate the distance between $f$ and $\hat f_m$ as in Examples \[ex:ch4esempi\].
In the first of the three examples, where $\nu_0$ is the Lebesgue measure on $I=[0,1]$, we are considering the statistical models associated with the discrete and continuous observation of a compound Poisson process with Lévy density $f$. Observe that ${\ensuremath {\mathscr{W}}}_n^{{\ensuremath{\textnormal{Leb}}}}$ reduces to the statistical model associated with the continuous observation of a trajectory from: $$dy_t=\sqrt{f(t)}dt+\frac{1}{2\sqrt{T_n}}dW_t,\quad t\in [0,1].$$ In this case we have:
\[ex:ch4CPP\](Finite Lévy measure). Let $\nu_0$ be the Lebesgue measure on $I=[0,1]$ and let ${\ensuremath {\mathscr{F}}}= {\ensuremath {\mathscr{F}}}^{{\ensuremath{\textnormal{Leb}}}, [0,1]}$ be any subclass of ${\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^{[0,1]}$ for some strictly positive constants $K$, $\kappa$, $M$ and $\gamma\in(0,1]$. Then: $$\lim_{n\to\infty}\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}},{\ensuremath {\mathscr{W}}}_n^{{\ensuremath{\textnormal{Leb}}}})=0 \ \textnormal{ and } \ \lim_{n\to\infty}\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}},{\ensuremath {\mathscr{W}}}_n^{{\ensuremath{\textnormal{Leb}}}})=0.$$ More precisely, $$\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}},{\ensuremath {\mathscr{W}}}_n^{{\ensuremath{\textnormal{Leb}}}})=\begin{cases}O\Big((n\Delta_n)^{-\frac{\gamma}{4+2\gamma}}\Big)\quad \textnormal{if } \ \gamma\in\big(0,\frac{1}{2}\big],\\
O\Big((n \Delta_n)^{-\frac{1}{10}}\Big)\quad \textnormal{if } \ \gamma\in\big(\frac{1}{2},1\big].
\end{cases}$$ In the case where $\Delta_n = n^{-\beta}$, $\frac{1}{2} < \beta < 1$, an upper bound for the rate of convergence of $\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}}, {\ensuremath {\mathscr{W}}}_n^{{\ensuremath{\textnormal{Leb}}}})$ is $$\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}}, {\ensuremath {\mathscr{W}}}_n^{{\ensuremath{\textnormal{Leb}}}})=\begin{cases}
O\Big(n^{-\frac{\gamma+\beta}{4+2\gamma}}\ln n\Big)\quad \textnormal{if } \ \gamma\in\big(0,\frac{1}{2}\big) \text{ and }\frac{2+2\gamma}{3+2\gamma} \leq \beta < 1,\\
O\Big(n^{\frac{1}{2}-\beta}\ln n\Big)\quad \textnormal{if } \ \gamma\in\big(0,\frac{1}{2}\big) \text{ and } \frac{1}{2} < \beta < \frac{2+2\gamma}{3+2\gamma},\\
O\Big(n^{-\frac{2\beta+1}{10}}\ln n\Big)\quad \textnormal{if } \ \gamma\in\big[\frac{1}{2},1\big] \text{ and } \frac{3}{4} \leq \beta < 1,\\
O\Big(n^{\frac{1}{2}-\beta}\ln n\Big)\quad \textnormal{if } \ \gamma\in\big[\frac{1}{2},1\big] \text{ and } \frac{1}{2} < \beta < \frac{3}{4}.
\end{cases}$$ See Section \[subsec:ch4ex1\] for a proof.
\[ch4ex2\](Infinite Lévy measure with finite variation). Let $X$ be a truncated Gamma process with (infinite) Lévy measure of the form: $$\nu(A)=\int_A \frac{e^{-\lambda x}}{x}dx,\quad A\in{\ensuremath {\mathscr{B}}}([0,1]).$$ Here ${\ensuremath {\mathscr{F}}}^{\nu_0, I}$ is a 1-dimensional parametric family in $\lambda$, assuming that there exists a known constant $\lambda_0$ such that $0<\lambda\leq \lambda_0<\infty$, $f(t) = e^{-\lambda t}$ and $d\nu_0(x)=\frac{1}{x}dx$. In particular, the $f$ are Lipschitz, i.e. ${\ensuremath {\mathscr{F}}}^{\nu_0, [0,1]} \subset {\ensuremath {\mathscr{F}}}_{(\gamma = 1, K, \kappa, M)}^{[0,1]}$. The discrete or continuous observation (up to time $T_n$) of $X$ are asymptotically equivalent to ${\ensuremath {\mathscr{W}}}_n^{\nu_0}$, the statistical model associated with the observation of a trajectory of the process $(y_t)$: $$dy_t=\sqrt{f(t)}dt+\frac{\sqrt tdW_t}{2\sqrt{T_n}},\quad t\in[0,1].$$ More precisely, in the case where $\Delta_n = n^{-\beta}$, $\frac{1}{2} < \beta < 1$, an upper bound for the rate of convergence of $\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0}, {\ensuremath {\mathscr{W}}}_n^{\nu_0})$ is $$\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0}) = \begin{cases}
O\big(n^{\frac{1}{2}-\beta} \ln n\big) & \text{if } \frac{1}{2} < \beta \leq \frac{9}{10}\\
O\big(n^{-\frac{1+2\beta}{7}} \ln n\big) & \text{if } \frac{9}{10} < \beta < 1.
\end{cases}$$ Concerning the continuous setting we have: $$\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\Big(n^{\frac{\beta-1}{6}} \big(\ln n\big)^{\frac{5}{2}}\Big) = O\Big(T_n^{-\frac{1}{6}} \big(\ln T_n\big)^\frac{5}{2}\Big).$$ See Section \[subsec:ch4ex2\] for a proof.
\[ex3\](Infinite Lévy measure, infinite variation). Let $X$ be a pure jump Lévy process with infinite Lévy measure of the form: $$\nu(A)=\int_A \frac{2-e^{-\lambda x^3}}{x^2}dx,\quad A\in{\ensuremath {\mathscr{B}}}({\ensuremath {\mathbb{R}}}^+).$$ Again, we are considering a parametric family in $\lambda > 0$, assuming that the parameter stays bounded below a known constant $\lambda_0$. Here, $f(t) =2- e^{-\lambda t^3}$, hence $1\leq f(t)\leq 2$, for all $t\geq 0$, and $f$ is Lipschitz, i.e. ${\ensuremath {\mathscr{F}}}^{\nu_0, {\ensuremath {\mathbb{R}}}_+} \subset {\ensuremath {\mathscr{F}}}_{(\gamma = 1, K, \kappa, M)}^{{\ensuremath {\mathbb{R}}}_+}$. The discrete or continuous observations (up to time $T_n$) of $X$ are asymptotically equivalent to the statistical model associated with the observation of a trajectory of the process $(y_t)$: $$dy_t=\sqrt{f(t)}dt+\frac{tdW_t}{2\sqrt{T_n}},\quad t\geq 0.$$ More precisely, in the case where $\Delta_n = n^{-\beta}$, $0 < \beta < 1$, an upper bound for the rate of convergence of $\Delta({\ensuremath {\mathscr{Q}}}_{n}^{\nu_0}, {\ensuremath {\mathscr{W}}}_n^{\nu_0})$ is $$\Delta({\ensuremath {\mathscr{Q}}}_{n}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0}) = \begin{cases}
O\big(n^{\frac{1}{2} - \frac{2}{3}\beta}\big)& \text{if } \frac{3}{4} < \beta < \frac{12}{13}\\
O\big(n^{-\frac{1}{6}+\frac{\beta}{18}} (\ln n)^{\frac{7}{6}}\big) &\text{if } \frac{12}{13}\leq \beta<1.
\end{cases}$$
In the continuous setting, we have $$\Delta({\ensuremath {\mathscr{P}}}_{n}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\big(n^{\frac{3\beta-3}{34}}(\ln n)^{\frac{7}{6}}\big) = O\big(T_n^{-\frac{3}{34}} (\ln T_n)^{\frac{7}{6}}\big).$$ See Section \[subsec:ch4ex3\] for a proof.
Proofs of the main results {#sec:ch4proofs}
==========================
In order to simplify notations, the proofs will be presented in the case $I\subseteq {\ensuremath {\mathbb{R}}}^+$. Nevertheless, this allows us to present all the main difficulties, since they can only appear near 0. To prove Theorems \[ch4teo1\] and \[ch4teo2\] we need to introduce several intermediate statistical models. In that regard, let us denote by $Q_j^f$ the law of a Poisson random variable with mean $T_n\nu(J_j)$ (see for the definition of $J_{j}$). We will denote by $\mathscr{L}_m$ the statistical model associated with the family of probabilities $\big\{\bigotimes_{j=2}^m Q_j^f:f\in{\ensuremath {\mathscr{F}}}\big\}$: $$\label{eq:ch4l}
\mathscr{L}_m=\bigg(\bar{{\ensuremath {\mathbb{N}}}}^{m-1},\mathcal P(\bar{{\ensuremath {\mathbb{N}}}}^{m-1}), \bigg\{\bigotimes_{j=2}^m Q_j^f:f\in{\ensuremath {\mathscr{F}}}\bigg\}\bigg).$$
By $N_{j}^f$ we mean the law of a Gaussian random variable ${\ensuremath {\mathscr{Nn}}}(2\sqrt{T_n\nu(J_j)},1)$ and by $\mathscr{N}_m$ the statistical model associated with the family of probabilities $\big\{\bigotimes_{j=2}^m N_j^f:f\in{\ensuremath {\mathscr{F}}}\big\}$: $$\label{eq:ch4n}
\mathscr{N}_m=\bigg({\ensuremath {\mathbb{R}}}^{m-1},\mathscr B({\ensuremath {\mathbb{R}}}^{m-1}), \bigg\{\bigotimes_{j=2}^m N_j^f:f\in{\ensuremath {\mathscr{F}}}\bigg\}\bigg).$$
For each $f\in{\ensuremath {\mathscr{F}}}$, let $\bar \nu_m$ be the measure having $\bar f_m$ as a density with respect to $\nu_0$ where, for every $f\in{\ensuremath {\mathscr{F}}}$, $\bar f_m$ is defined as follows. $$\label{eq:ch4barf}
\bar f_m(x):=
\begin{cases}
\quad 1 & \textnormal{if } x\in J_1,\\
\frac{\nu(J_j)}{{\nu_0}(J_{j})} & \textnormal{if } x\in J_{j}, \quad j = 2,\dots,m.
\end{cases}$$ Furthermore, define $$\label{eq:ch4modellobar}
\bar{\ensuremath {\mathscr{P}}}_{n}^{\nu_0}=\bigg(D,{\ensuremath {\mathscr{D}}}_{T_n},\Big\{P_{T_n}^{(\gamma^{\bar \nu_m-\nu_0},0,\bar\nu_m)}:\frac{d\bar\nu_m}{d\nu_0}\in{\ensuremath {\mathscr{F}}}\Big\}\bigg).$$
Proof of Theorem \[ch4teo1\]
----------------------------
We begin by a series of lemmas that will be needed in the proof. Before doing so, let us underline the scheme of the proof. We recall that the goal is to prove that estimating $f=\frac{d\nu}{d\nu_0}$ from the continuous observation of a Lévy process $(X_t)_{t\in[0,T_n]}$ without Gaussian part and having Lévy measure $\nu$ is asymptotically equivalent to estimating $f$ from the Gaussian white noise model:
$$dy_t=\sqrt{f(t)}dt+\frac{1}{2\sqrt{T_n g(t)}}dW_t,\quad g=\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}},\quad t\in I.$$
Also, recall the definition of $\hat \nu_m$ given in and read ${\ensuremath {\mathscr{P}}}_1 \overset{\Delta} \Longleftrightarrow {\ensuremath {\mathscr{P}}}_2$ as ${\ensuremath {\mathscr{P}}}_1$ is asymptotically equivalent to ${\ensuremath {\mathscr{P}}}_2$. Then, we can outline the proof in the following way.
- Step 1: $P_{T_n}^{(\gamma^{\nu-\nu_0},0,\nu)} \overset{\Delta} \Longleftrightarrow P_{T_n}^{(\gamma^{\hat\nu_m-\nu_0},0,\hat\nu_m)}$;
- Step 2: $P_{T_n}^{(\gamma^{\hat\nu_m-\nu_0},0,\hat\nu_m)} \overset{\Delta} \Longleftrightarrow \bigotimes_{j=2}^m {\ensuremath {\mathscr{P}}}(T_n\nu(J_j))$ (Poisson approximation).
Here $\bigotimes_{j=2}^m {\ensuremath {\mathscr{P}}}(T_n\nu(J_j))$ represents a statistical model associated with the observation of $m-1$ independent Poisson r.v. of parameters $T_n\nu(J_j)$;
- Step 3: $\bigotimes_{j=2}^m {\ensuremath {\mathscr{P}}}(T_n \nu(J_j)) \overset{\Delta} \Longleftrightarrow \bigotimes_{j=2}^m {\ensuremath {\mathscr{Nn}}}(2\sqrt{T_n\nu(J_j)},1)$ (Gaussian approximation);
- Step 4: $\bigotimes_{j=2}^m {\ensuremath {\mathscr{Nn}}}(2\sqrt{T_n\nu(J_j)},1)\overset{\Delta} \Longleftrightarrow (y_t)_{t\in I}$.
Lemmas \[lemma:ch4poisson\]–\[lemma:ch4kernel\], below, are the key ingredients of Step 2.
\[lemma:ch4poisson\] Let $\bar{\ensuremath {\mathscr{P}}}_{n}^{\nu_0}$ and $\mathscr{L}_m$ be the statistical models defined in and , respectively. Under the Assumption (H2) we have: $$\Delta(\bar{\ensuremath {\mathscr{P}}}_{n}^{\nu_0}, \mathscr{L}_m)=0, \textnormal{ for all } m.$$
Denote by $\bar {\ensuremath {\mathbb{N}}}={\ensuremath {\mathbb{N}}}\cup \{\infty\}$ and consider the statistics $S:(D,{\ensuremath {\mathscr{D}}}_{T_n})\to \big(\bar{\ensuremath {\mathbb{N}}}^{m-1},\mathcal{P}(\bar{\ensuremath {\mathbb{N}}}^{m-1})\big)$ defined by $$\label{eq:ch4S}
S(x)=\Big(N_{T_n}^{x;\,2},\dots,N_{T_n}^{x;\,m}\bigg)\quad \textnormal{with} \quad
N_{T_n}^{x;\,j}=\sum_{r\leq T_n}{\ensuremath {\mathbb{I}}}_{J_{j}}(\Delta x_r).$$ An application of Theorem \[ch4teosato\] to $P_{T_n}^{(\gamma^{\bar \nu_m-\nu_0},0,\bar \nu_m)}$ and $P_{T_n}^{(0,0,\nu_0)}$, yields $$\frac{d P_{T_n}^{(\gamma^{\bar \nu_m-\nu_0},0,\bar \nu_m)}}{dP_{T_n}^{(0,0,\nu_0)}}(x)=\exp\bigg(\sum_{j=2}^m \bigg(\ln\Big(\frac{\nu(J_j)}{\nu_0(J_j)}\Big)\bigg) N_{T_n}^{x;j}-T_n\int_I(\bar f_m(y)-1)\nu_0(dy)\bigg).$$ Hence, by means of the Fisher factorization theorem, we conclude that $S$ is a sufficient statistics for $\bar{\ensuremath {\mathscr{P}}}_{n}^{\nu_0}$. Furthermore, under $P_{T_n}^{(\gamma^{\bar \nu_m-\nu_0},0,\bar \nu_m)}$, the random variables $N_{T_n}^{x;j}$ have Poisson distributions $Q_{j}^f$ with means $T_n\nu(J_j)$. Then, by means of Property \[ch4fatto3\], we get $\Delta(\bar{\ensuremath {\mathscr{P}}}_{n}^{\nu_0}, \mathscr{L}_m)=0, \textnormal{ for all } m.$
Let us denote by $\hat Q_j^f$ the law of a Poisson random variable with mean $T_n\int_{J_j}\hat f_m(y)\nu_0(dy)$ and let $\hat{\mathscr{L}}_m$ be the statistical model associated with the family of probabilities $\{\bigotimes_{j=2}^m \hat Q_j^f:f\in {\ensuremath {\mathscr{F}}}\}$.
\[lemma:ch4poissonhatf\] $$\Delta(\mathscr L_m,\hat{\mathscr{L}}_m)\leq \sup_{f\in {\ensuremath {\mathscr{F}}}}\sqrt{\frac{T_n}{\kappa}\int_{I\setminus[0,\varepsilon_m]}\big(f(y)-\hat f_m(y)\big)^2\nu_0(dy)}.$$
By means of Facts \[ch4h\]–\[fact:ch4hellingerpoisson\], we get: $$\begin{aligned}
\Delta(\mathscr L_m,\hat{\mathscr{L}}_m)&\leq \sup_{f\in{\ensuremath {\mathscr{F}}}}H\bigg(\bigotimes_{j=2}^m Q_j^f,\bigotimes_{j=2}^m \hat Q_j^f\bigg)\\
&\leq \sup_{f\in{\ensuremath {\mathscr{F}}}}\sqrt{\sum_{j=2}^m 2 H^2(Q_j^f,\hat Q_j^f)}\\
& =\sup_{f\in{\ensuremath {\mathscr{F}}}}\sqrt 2\sqrt{\sum_{j=2}^m\bigg(1-\exp\bigg(-\frac{T_n}{2}\bigg[\sqrt{\int_{J_j}\hat f(y)\nu_0(dy)}-\sqrt{\int_{J_j} f(y)\nu_0(dy)}\bigg]^2\bigg)\bigg)}.\end{aligned}$$ By making use of the fact that $1-e^{-x}\leq x$ for all $x\geq 0$ and the equality $\sqrt a-\sqrt b= \frac{a-b}{\sqrt a+\sqrt b}$ combined with the lower bound $f\geq \kappa$ (that also implies $\hat f_m\geq \kappa$) and finally the Cauchy-Schwarz inequality, we obtain: $$\begin{aligned}
&1-\exp\bigg(-\frac{T_n}{2}\bigg[\sqrt{\int_{J_j}\hat f(y)\nu_0(dy)}-\sqrt{\int_{J_j} f(y)\nu_0(dy)}\bigg]^2\bigg)\\
&\leq \frac{T_n}{2}\bigg[\sqrt{\int_{J_j}\hat f(y)\nu_0(dy)}-\sqrt{\int_{J_j} f(y)\nu_0(dy)}\bigg]^2\\
& \leq \frac{T_n}{2} \frac{\bigg(\int_{J_j}(f(y)-\hat f_m(y))\nu_0(dy)\bigg)^2}{\kappa \nu_0(J_j)}\\
&\leq \frac{T_n}{2\kappa} \int_{J_j}\big(f(y)-\hat f_m(y)\big)^2\nu_0(dy).
\end{aligned}$$ Hence, $$H\bigg(\bigotimes_{j=2}^m Q_j^f,\bigotimes_{j=2}^m \hat Q_j^f\bigg)\leq \sqrt{\frac{T_n}{\kappa}\int_{I\setminus[0,\varepsilon_m]}\big(f(y)-\hat f_m(y)\big)^2\nu_0(dy)}.$$
\[lemma:ch4kernel\] Let $\hat\nu_m$ and $\bar \nu_m$ the Lévy measures defined as in and , respectively. For every $f\in {\ensuremath {\mathscr{F}}}$, there exists a Markov kernel $K$ such that $$KP_{T_n}^{(\gamma^{\bar\nu_m-\nu_0},0,\bar\nu_m)}=P_{T_n}^{(\gamma^{\hat \nu_m-\nu_0},0,\hat \nu_m)}.$$
By construction, $\bar\nu_m$ and $\hat\nu_m$ coincide on $[0,\varepsilon_m]$. Let us denote by $\bar \nu_m^{\textnormal{res}}$ and $\hat\nu_m^{\textnormal{res}}$ the restriction on $I\setminus[0,\varepsilon_m]$ of $\bar\nu_m$ and $\hat\nu_m$ respectively, then it is enough to prove: $KP_{T_n}^{(\gamma^{\bar\nu_m^{\textnormal{res}}-\nu_0},0,\bar\nu_m^{\textnormal{res}})}=P_{T_n}^{(\gamma^{\hat \nu_m^{\textnormal{res}}-\nu_0},0,\hat \nu_m^{\textnormal{res}})}.$ First of all, let us observe that the kernel $M$: $$M(x,A)=\sum_{j=2}^m{\ensuremath {\mathbb{I}}}_{J_j}(x)\int_A V_j(y)\nu_0(dy),\quad x\in I\setminus[0,\varepsilon_m],\quad A\in{\ensuremath {\mathscr{B}}}(I\setminus[0,\varepsilon_m])$$ is defined in such a way that $M \bar\nu_m^{\textnormal{res}} = \hat \nu_m^{\textnormal{res}}$. Indeed, for all $A\in{\ensuremath {\mathscr{B}}}(I\setminus[0,\varepsilon_m])$, $$\begin{aligned}
M\bar\nu_m^{\textnormal{res}}(A)&=\sum_{j=2}^m\int_{J_j}M(x,A)\bar\nu_m^{\textnormal{res}}(dx)=\sum_{j=2}^m \int_{J_j}\bigg(\int_A V_j(y)\nu_0(dy)\bigg)\bar\nu_m^{\textnormal{res}}(dx)\nonumber\\
&=\sum_{j=2}^m \bigg(\int_A V_j(y)\nu_0(dy)\bigg)\nu(J_j)=\int_A \hat f_m(y)\nu_0(dy)=\hat \nu_m^{\textnormal{res}}(A). \label{eqn:M}
\end{aligned}$$
Observe that $(\gamma^{\bar\nu_m^{\textnormal{res}}-\nu_0},0,\bar\nu_m^{\textnormal{res}})$ and $(\gamma^{\hat \nu_m^{\textnormal{res}}-\nu_0},0,\hat \nu_m^{\textnormal{res}})$ are Lévy triplets associated with compound Poisson processes since $\bar\nu_m^{\textnormal{res}}$ and $\hat \nu_m^{\textnormal{res}}$ are finite Lévy measures. The Markov kernel $K$ interchanging the laws of the Lévy processes is constructed explicitly in the case of compound Poisson processes. Indeed if $\bar X$ is the compound Poisson process having Lévy measure $\bar\nu_m^{\textnormal{res}}$, then $\bar X_{t} = \sum_{i=1}^{N_t} \bar Y_{i}$, where $N_t$ is a Poisson process of intensity $\iota_m:=\bar\nu_m^{\textnormal{res}}(I\setminus [0,\varepsilon_m])$ and the $\bar Y_{i}$ are i.i.d. random variables with probability law $\frac{1}{\iota_m}\bar\nu_m^{\textnormal{res}}$. Moreover, given a trajectory of $\bar X$, both the trajectory $(n_t)_{t\in[0,T_n]}$ of the Poisson process $(N_t)_{t\in[0,T_n]}$ and the realizations $\bar y_i$ of $\bar Y_i$, $i=1,\dots,n_{T_n}$ are uniquely determined. This allows us to construct $n_{T_n}$ i.i.d. random variables $\hat Y_i$ as follows: For every realization $\bar y_i$ of $\bar Y_i$, we define the realization $\hat y_i$ of $\hat Y_i$ by throwing it according to the probability law $M(\bar y_i,\cdot)$. Hence, thanks to , $(\hat Y_i)_i$ are i.i.d. random variables with probability law $\frac{1}{\iota_m} \hat \nu_m^{\text{res}}$. The desired Markov kernel $K$ (defined on the Skorokhod space) is then given by: $$K : (\bar X_{t})_{t\in[0,T_n]} \longmapsto \bigg(\hat X_{t} := \sum_{i=1}^{N_t} \hat Y_{i}\bigg)_{t\in[0,T_n]}.$$ Finally, observe that, since $$\begin{aligned}
\iota_m=\int_{I\setminus[0,\varepsilon_m]}\bar f_m(y)\nu_0(dy)&=\int_{I\setminus[0,\varepsilon_m]} f(y)\nu_0(dy)=\int_{I\setminus[0,\varepsilon_m]}\hat f_m(y)\nu_0(dy),
\end{aligned}$$ $(\hat X_t)_{t\in[0,T_n]}$ is a compound Poisson process with Lévy measure $\hat\nu_m^{\textnormal{res}}.$
Let us now state two lemmas needed to understand Step 4.
\[lemma:ch4wn\] Denote by ${\ensuremath {\mathscr{W}}}_m^\#$ the statistical model associated with the continuous observation of a trajectory from the Gaussian white noise: $$dy_t=\sqrt{f(t)}dt+\frac{1}{2\sqrt{T_n}\sqrt{g(t)}}dW_t,\quad t\in I\setminus [0,\varepsilon_m].$$ Then, according with the notation introduced in Section \[subsec:ch4parameter\] and at the beginning of Section \[sec:ch4proofs\], we have $$\Delta(\mathscr{N}_m,{\ensuremath {\mathscr{W}}}_m^\#)\leq 2\sqrt{T_n}\sup_{f\in {\ensuremath {\mathscr{F}}}} \big(A_m(f)+B_m(f)\big).$$
As a preliminary remark observe that ${\ensuremath {\mathscr{W}}}_m^\#$ is equivalent to the model that observes a trajectory from: $$d\bar y_t=\sqrt{f(t)}g(t)dt+\frac{\sqrt{g(t)}}{2\sqrt{T_n}}dW_t,\quad t\in I\setminus [0,\varepsilon_m].$$ Let us denote by $\bar Y_j$ the increments of the process $(\bar y_t)$ over the intervals $J_j$, $j=2,\dots,m$, i.e. $$\bar Y_j:=\bar y_{v_j}-\bar y_{v_{j-1}}\sim{\ensuremath {\mathscr{Nn}}}\bigg(\int_{J_j}\sqrt{f(y)}\nu_0(dy),\frac{\nu_0(J_j)}{4T_n}\bigg)$$ and denote by $\bar{\mathscr{N}}_m$ the statistical model associated with the distributions of these increments. As an intermediate result, we will prove that $$\label{eq:ch4normali}
\Delta(\mathscr{N}_m,\bar{\mathscr{N}}_m)\leq 2\sqrt{T_n} \sup_{f\in {\ensuremath {\mathscr{F}}}} B_m(f), \ \textnormal{ for all m}.$$ To that aim, remark that the experiment $\bar{\mathscr{N}}_m$ is equivalent to observing $m-1$ independent Gaussian random variables of means $\frac{2\sqrt{T_n}}{\sqrt{\nu_0(J_j)}}\int_{J_j}\sqrt{f(y)}\nu_0(dy)$, $j=2,\dots,m$ and variances identically $1$, name this last experiment $\mathscr{N}^{\#}_m$. Hence, using also Property \[ch4delta0\], Facts \[ch4h\] and \[fact:ch4gaussiane\] we get: $$\begin{aligned}
\Delta(\mathscr{N}_m, \bar{\mathscr{N}}_m)\leq\Delta(\mathscr{N}_m, \mathscr{N}^{\#}_m)&\leq \sqrt{\sum_{j=2}^m\bigg(\frac{2\sqrt{T_n}}{\sqrt{\nu_0(J_j)}}\int_{J_j}\sqrt{f(y)}\nu_0(dy)-2\sqrt{T_n\nu(J_j)}\bigg)^2}.\end{aligned}$$ Since it is clear that $\delta({\ensuremath {\mathscr{W}}}_m^\#,\bar{\mathscr{N}}_m)=0$, in order to bound $\Delta(\mathscr{N}_m,{\ensuremath {\mathscr{W}}}_m^\#)$ it is enough to bound $\delta(\bar{\mathscr{N}}_m,{\ensuremath {\mathscr{W}}}_m^\#)$. Using similar ideas as in [@cmultinomial] Section 8.2, we define a new stochastic process as: $$Y_t^*=\sum_{j=2}^m\bar Y_j\int_{\varepsilon_m}^t V_j(y)\nu_0(dy)+\frac{1}{2\sqrt{T_n}}\sum_{j=2}^m\sqrt{\nu_0(J_j)}B_j(t),\quad t\in I\setminus [0,\varepsilon_m],$$ where the $(B_j(t))$ are independent centered Gaussian processes independent of $(W_t)$ and with variances $$\textnormal{Var}(B_j(t))=\int_{\varepsilon_m}^tV_j(y)\nu_0(dy)-\bigg(\int_{\varepsilon_m}^tV_j(y)\nu_0(dy)\bigg)^2.$$ These processes can be constructed from a standard Brownian bridge $\{B(s), s\in[0,1]\}$, independent of $(W_t)$, via $$B_i(t)=B\bigg(\int_{\varepsilon_m}^t V_i(y)\nu_0(dy)\bigg).$$ By construction, $(Y_t^*)$ is a Gaussian process with mean and variance given by, respectively: $$\begin{aligned}
{\ensuremath {\mathbb{E}}}[Y_t^*]&=\sum_{j=2}^m{\ensuremath {\mathbb{E}}}[\bar Y_j]\int_{\varepsilon_m}^t V_j(y)\nu_0(dy)=\sum_{j=2}^m\bigg(\int_{J_j}\sqrt{f(y)}\nu_0(dy)\bigg)\int_{\varepsilon_m}^t V_j(y)\nu_0(dy),\\
\textnormal{Var}[Y_t^*]&=\sum_{j=2}^m\textnormal{Var}[\bar Y_j]\bigg(\int_{\varepsilon_m}^t V_j(y)\nu_0(dy)\bigg)^2+\frac{1}{4T_n}\sum_{j=2}^m \nu_0(J_j)\textnormal{Var}(B_j(t))\\
&= \frac{1}{4T_n}\int_{\varepsilon_m}^t \sum_{j=2}^m \nu_0(J_j) V_j(y)\nu_0(dy)= \frac{1}{4T_n}\int_{\varepsilon_m}^t \nu_0(dy)=\frac{\nu_0([\varepsilon_m,t])}{4T_n}.\end{aligned}$$ One can compute in the same way the covariance of $(Y_t^*)$ finding that $$\textnormal{Cov}(Y_s^*,Y_t^*)=\frac{\nu_0([\varepsilon_m,s])}{4 T_n}, \ \forall s\leq t.$$ We can then deduce that $$Y^*_t=\int_{\varepsilon_m}^t \widehat{\sqrt {f}}_m(y)\nu_0(dy)+\int_{\varepsilon_m}^t\frac{\sqrt{g(s)}}{2\sqrt{T_n}}dW^*_s,\quad t\in I\setminus [0,\varepsilon_m],$$ where $(W_t^*)$ is a standard Brownian motion and $$\widehat{\sqrt {f}}_m(x):=\sum_{j=2}^m\bigg(\int_{J_j}\sqrt{f(y)}\nu_0(dy)\bigg)V_j(x).$$
Applying Fact \[fact:ch4processigaussiani\], we get that the total variation distance between the process $(Y_t^*)_{t\in I\setminus [0,\varepsilon_m]}$ constructed from the random variables $\bar Y_j$, $j=2,\dots,m$ and the Gaussian process $(\bar y_t)_{t\in I\setminus [0,\varepsilon_m]}$ is bounded by $$\sqrt{4 T_n\int_{I\setminus [0,\varepsilon_m]}\big(\widehat{\sqrt {f}}_m-\sqrt{f(y)}\big)^2\nu_0(dy)},$$ which gives the term in $A_m(f)$.
\[lemma:ch4limitewn\] In accordance with the notation of Lemma \[lemma:ch4wn\], we have: $$\label{eq:ch4wn}
\Delta({\ensuremath {\mathscr{W}}}_m^\#,{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\bigg(\sup_{f\in{\ensuremath {\mathscr{F}}}}\sqrt{T_n\int_0^{\varepsilon_m}\big(\sqrt{f(t)}-1\big)^2\nu_0(dt)}\bigg).$$
Clearly $\delta({\ensuremath {\mathscr{W}}}_n^{\nu_0},{\ensuremath {\mathscr{W}}}_m^\#)=0$. To show that $\delta({\ensuremath {\mathscr{W}}}_m^\#,{\ensuremath {\mathscr{W}}}_n^{\nu_0})\to 0$, let us consider a Markov kernel $K^\#$ from $C(I\setminus [0,\varepsilon_m])$ to $C(I)$ defined as follows: Introduce a Gaussian process, $(B_t^m)_{t\in[0,\varepsilon_m]}$ with mean equal to $t$ and covariance $$\textnormal{Cov}(B_s^m,B_t^m)=\int_0^{\varepsilon_m}\frac{1}{4 T_n g(s)}{\ensuremath {\mathbb{I}}}_{[0,s]\cap [0,t]}(z)dz.$$ In particular, $$\textnormal{Var}(B_t^m)=\int_0^t\frac{1}{4 T_n g(s)}ds.$$ Consider it as a process on the whole of $I$ by defining $B_t^m=B_{\varepsilon_m}^m$ $\forall t>\varepsilon_m$. Let $\omega_t$ be a trajectory in $C(I\setminus [0,\varepsilon_m])$, which again we constantly extend to a trajectory on the whole of $I$. Then, we define $K^\#$ by sending the trajectory $\omega_t$ to the trajectory $\omega_t + B_t^m$. If we define $\mathbb{\tilde W}_n$ as the law induced on $C(I)$ by $$d\tilde{y}_t = h(t) dt + \frac{dW_t}{2\sqrt{T_n g(t)}}, \quad t \in I,\quad h(t) = \begin{cases}
1 & t \in [0, \varepsilon_m]\\
\sqrt{f(t)} & t \in I\setminus [0,\varepsilon_m],
\end{cases}$$ then $K^\# \mathbb{W}_n^f|_{I\setminus [0,\varepsilon_m]} = \mathbb{\tilde W}_n$, where $\mathbb{W}_n^f$ is defined as in . By means of Fact \[fact:ch4processigaussiani\] we deduce .
The proof of the theorem follows by combining the previous lemmas together:
- Step 1: Let us denote by $\hat{\ensuremath {\mathscr{P}}}_{n,m}^{\nu_0}$ the statistical model associated with the family of probabilities $(P_{T_n}^{(\gamma^{\hat\nu_m-\nu_0},0,\hat\nu_m)}:\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}})$. Thanks to Property \[ch4delta0\], Fact \[ch4h\] and Theorem \[teo:ch4bound\] we have that $$\Delta({\ensuremath {\mathscr{P}}}_n^{\nu_0},\hat{\ensuremath {\mathscr{P}}}_{n,m}^{\nu_0})\leq \sqrt{\frac{T_n}{2}}\sup_{f\in {\ensuremath {\mathscr{F}}}}H(f,\hat f_m).$$
- Step 2: On the one hand, thanks to Lemma \[lemma:ch4poisson\], one has that the statistical model associated with the family of probability $(P_{T_n}^{(\gamma^{\bar \nu_m-\nu_0},0,\bar\nu_m)}:\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}})$ is equivalent to $\mathscr{L}_m$. By means of Lemma \[lemma:ch4poissonhatf\] we can bound $\Delta(\mathscr{L}_m,\hat{\mathscr{L}}_m)$. On the other hand it is easy to see that $\delta(\hat{\ensuremath {\mathscr{P}}}_{n,m}^{\nu_0}, \hat{\mathscr{L}}_m)=0$. Indeed, it is enough to consider the statistics $$S: x \mapsto \bigg(\sum_{r\leq T_n}{\ensuremath {\mathbb{I}}}_{J_2}(\Delta x_r),\dots,\sum_{r\leq T_n}{\ensuremath {\mathbb{I}}}_{J_m}(\Delta x_r)\bigg)$$ since the law of the random variable $\sum_{r\leq T_n}{\ensuremath {\mathbb{I}}}_{J_j}(\Delta x_r)$ under $P_{T_n}^{(\gamma^{\hat\nu_m-\nu_0},0,\hat\nu_m)}$ is Poisson of parameter $T_n\int_{J_j}\hat f_m(y)\nu_0(dy)$ for all $j=2,\dots,m$. Finally, Lemmas \[lemma:ch4poisson\] and \[lemma:ch4kernel\] allows us to conclude that $\delta(\mathscr{L}_m,\hat{\ensuremath {\mathscr{P}}}_{n,m}^{\nu_0})=0$. Collecting all the pieces together, we get $$\Delta(\hat{\ensuremath {\mathscr{P}}}_{n,m}^{\nu_0},\mathscr{L}_m)\leq \sup_{f\in {\ensuremath {\mathscr{F}}}}\sqrt{\frac{T_n}{\kappa}\int_{I\setminus[0,\varepsilon_m]}\big(f(y)-\hat f_m(y)\big)^2\nu_0(dy)}.$$
- Step 3: Applying Theorem \[ch4teomisto\] and Fact \[ch4hp\] we can pass from the Poisson approximation given by $\mathscr{L}_m$ to a Gaussian one obtaining $$\Delta(\mathscr{L}_m,\mathscr{N}_m)=C\sup_{f\in {\ensuremath {\mathscr{F}}}}\sqrt{\sum_{j=2}^m\frac{2}{T_n\nu(J_j)}}\leq C\sqrt{\sum_{j=2}^m\frac{2\kappa}{T_n\nu_0(J_j)}}=C\sqrt{\frac{(m-1)2\kappa}{T_n\mu_m}}.$$
- Step 4: Finally, Lemmas \[lemma:ch4wn\] and \[lemma:ch4limitewn\] allow us to conclude that: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{P}}}_n^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})&=O\bigg(\sqrt{T_n}\sup_{f\in {\ensuremath {\mathscr{F}}}}\big(A_m(f)+B_m(f)+C_m\big)\bigg)\\
& \quad + O\bigg(\sqrt{T_n}\sup_{f\in {\ensuremath {\mathscr{F}}}}\sqrt{\int_{I\setminus{[0,\varepsilon_m]}}\big(f(y)-\hat f_m(y)\big)^2\nu_0(dy)}+\sqrt{\frac{m}{T_n\mu_m}}\bigg).\end{aligned}$$
Proof of Theorem \[ch4teo2\]
----------------------------
Again, before stating some technical lemmas, let us highlight the main ideas of the proof. We recall that the goal is to prove that estimating $f=\frac{d\nu}{d\nu_0}$ from the discrete observations $(X_{t_i})_{i=0}^n$ of a Lévy process without Gaussian component and having Lévy measure $\nu$ is asymptotically equivalent to estimating $f$ from the Gaussian white noise model
$$dy_t=\sqrt{f(t)}dt+\frac{1}{2\sqrt{T_n g(t)}}dW_t,\quad g=\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}},\quad t\in I.$$ Reading ${\ensuremath {\mathscr{P}}}_1 \overset{\Delta} \Longleftrightarrow {\ensuremath {\mathscr{P}}}_2$ as ${\ensuremath {\mathscr{P}}}_1$ is asymptotically equivalent to ${\ensuremath {\mathscr{P}}}_2$, we have:
- Step 1. Clearly $(X_{t_i})_{i=0}^n \overset{\Delta} \Longleftrightarrow (X_{t_i}-X_{t_{i-1}})_{i=1}^n$. Moreover, $(X_{t_i}-X_{t_{i-1}})_i\overset{\Delta} \Longleftrightarrow (\epsilon_iY_i)$ where $(\epsilon_i)$ are i.i.d Bernoulli r.v. with parameter $\alpha=\iota_m \Delta_n e^{-\iota_m\Delta_n}$, $\iota_m:=\int_{I\setminus [0,\varepsilon_m]} f(y)\nu_0(dy)$ and $(Y_i)_i$ are i.i.d. r.v. independent of $(\epsilon_i)_{i=1}^n$ and of density $\frac{ f}{\iota_m}$ with respect to ${\nu_0}_{|_{I\setminus [0,\varepsilon_m]}}$;
- Step 2. $(\epsilon_iY_i)_i \overset{\Delta} \Longleftrightarrow \mathcal M(n;(\gamma_j)_{j=1}^m)$, where $\mathcal M(n;(\gamma_j)_{j=1}^m)$ is a multinomial distribution with $\gamma_1=1-\alpha$ and $\gamma_i:=\alpha\nu(J_i)$ $i=2,\dots,m$;
- Step 3. Gaussian approximation: $\mathcal M(n;(\gamma_1,\dots\gamma_m)) \overset{\Delta} \Longleftrightarrow \bigotimes_{j=2}^m {\ensuremath {\mathscr{Nn}}}(2\sqrt{T_n\nu(J_j)},1)$;
- Step 4. $\bigotimes_{j=2}^m {\ensuremath {\mathscr{Nn}}}(2\sqrt{T_n\nu(J_j)},1)\overset{\Delta} \Longleftrightarrow (y_t)_{t\in I}$.
\[lemma:ch4discreto\] Let $\nu_i$, $i=1,2$, be Lévy measures such that $\nu_1\ll\nu_2$ and $b_1-b_2=\int_{|y|\leq 1}y(\nu_1-\nu_2)(dy)<\infty$. Then, for all $0<t<\infty$, we have: $$\Big\|Q_t^{(b_1,0,\mu_1)}-Q_t^{(b_2,0,\mu_2)}\Big\|_{TV}\leq \sqrt \frac{t}{2} H(\nu_1,\nu_2).$$
For all given $t$, let $K_t$ be the Markov kernel defined as $K_t(\omega,A):={\ensuremath {\mathbb{I}}}_A(\omega_t)$, $\forall \ A\in{\ensuremath {\mathscr{B}}}({\ensuremath {\mathbb{R}}})$, $\forall \ \omega\in D$. Then we have: $$\begin{aligned}
\big\|Q_t^{(b_1,0,\nu_1)}-Q_t^{(b_2,0,\nu_2)}\big\|_{TV}&=\big\|K_tP_t^{(b_1,0,\nu_1)}-K_tP_t^{(b_2,0,\nu_2)}\big\|_{TV}\\
&\leq \big\|P_t^{(b_1,0,\nu_1)}-P_t^{(b_2,0,\nu_2)}\big\|_{TV}\\
&\leq \sqrt \frac{t}{2} H(\nu_1,\nu_2),
\end{aligned}$$ where we have used that Markov kernels reduce the total variation distance and Theorem \[teo:ch4bound\].
\[lemma:ch4bernoulli\] Let $(P_i)_{i=1}^n$, $(Y_i)_{i=1}^n$ and $(\epsilon_i)_{i=1}^n$ be samples of, respectively, Poisson random variables ${\ensuremath {\mathscr{P}}}(\lambda_i)$, random variables with common distribution and Bernoulli random variables of parameters $\lambda_i e^{-\lambda_i}$, which are all independent. Let us denote by $Q_{(Y_i,P_i)}$ (resp. $Q_{(Y_i,\epsilon_i)}$) the law of $\sum_{j=1}^{P_i} Y_j$ (resp., $\epsilon_i Y_i$). Then: $$\label{eq:ch4lambda}
\Big\|\bigotimes_{i=1}^n Q_{(Y_i,P_i)}-\bigotimes_{i=1}^n Q_{(Y_i,\epsilon_i)}\Big\|_{TV}\leq 2\sqrt{\sum_{i=1}^n\lambda_i^2}.$$
The proof of this Lemma can be found in [@esterESAIM], Section 2.1.
\[lemma:ch4troncatura\] Let $f_m^{\textnormal{tr}}$ be the truncated function defined as follows: $$f_m^{\textnormal{tr}}(x)=\begin{cases}
1 &\mbox{ if } x\in[0,\varepsilon_m]\\
f(x) &\mbox{ otherwise}
\end{cases}$$ and let $\nu_m^{\textnormal{tr}}$ (resp. $\nu_m^{\textnormal{res}}$) be the Lévy measure having $f_m^{\textnormal{tr}}$ (resp. ${f|_{I\setminus [0,\varepsilon_m]}}$) as a density with respect to $\nu_0$. Denote by ${\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{tr},\nu_0}$ the statistical model associated with the family of probabilities $\Big(\bigotimes_{i=1}^nQ_{t_i-t_{i-1}}^{(\gamma^{\nu_m^{\textnormal{tr}}-\nu_0},0,\nu_m^{\textnormal{tr}})}:\frac{d\nu_m^{\textnormal{tr}}}{d\nu_0}\in{\ensuremath {\mathscr{F}}}\Big)$ and by ${\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{res},\nu_0}$ the model associated with the family of probabilities $\Big(\bigotimes_{i=1}^nQ_{t_i-t_{i-1}}^{(\gamma^{\nu_m^{\textnormal{res}}-\nu_0},0,\nu_m^{\textnormal{res}})}:\frac{d\nu_m^{\textnormal{res}}}{d\nu_0}\in{\ensuremath {\mathscr{F}}}\Big)$. Then: $$\Delta({\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{tr},\nu_0},{\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{res},\nu_0})=0.$$
Let us start by proving that $\delta({\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{tr},\nu_0},{\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{res},\nu_0})=0.$ For that, let us consider two independent Lévy processes, $X^{\textnormal{tr}}$ and $X^0$, of Lévy triplets given by $\big(\gamma^{\nu_m^{\textnormal{tr}}-\nu_0},0,\nu_m^{\textnormal{tr}-\nu_0}\big)$ and $\big(0,0,\nu_0|_{[0,\varepsilon_m]}\big)$, respectively. Then it is clear (using the *Lévy-Khintchine formula*) that the random variable $X_t^{\textnormal{tr}}- X_t^0$ is a randomization of $X_t^{\textnormal{tr}}$ (since the law of $X_t^0$ does not depend on $\nu$) having law $Q_t^{(\gamma^{\nu_m^{\textnormal{res}}-\nu_0},0,\nu_m^{\textnormal{res}})}$, for all $t\geq 0$. Similarly, one can prove that $\delta({\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{res},\nu_0},{\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{tr},\nu_0})=0.$
As a preliminary remark, observe that the model ${\ensuremath {\mathscr{Q}}}_n^{\nu_0}$ is equivalent to the one that observes the increments of $\big((x_t),P_{T_n}^{(\gamma^{\nu-\nu_0},0,\nu)}\big)$, that is, the model $\tilde{\ensuremath {\mathscr{Q}}}_n^{\nu_0}$ associated with the family of probabilities $\Big(\bigotimes_{i=1}^nQ_{t_i-t_{i-1}}^{(\gamma^{\nu-\nu_0},0,\nu)}:\frac{d\nu}{d\nu_0}\in{\ensuremath {\mathscr{F}}}\Big)$.
- Step 1: Facts \[ch4h\]–\[ch4hp\] and Lemma \[lemma:ch4discreto\] allow us to write $$\begin{aligned}
&\Big\|\bigotimes_{i=1}^nQ_{\Delta_n}^{(\gamma^{\nu-\nu_0},0,\nu)}-\bigotimes_{i=1}^nQ_{\Delta_n}^{(\gamma^{\nu_m^{\textnormal{tr}}-\nu_0},0, \nu_m^{\textnormal{tr}})}\Big\|_{TV}\leq \sqrt{n\sqrt\frac{\Delta_n}{2}H(\nu,\nu_m^{\textnormal{tr}})}\\&=\sqrt{n\sqrt\frac{\Delta_n}{2}\sqrt{\int_0^{\varepsilon_m}\big(\sqrt{f(y)}-1\big)^2\nu_0(dy)}}.\end{aligned}$$ Using this bound together with Lemma \[lemma:ch4troncatura\] and the notation therein, we get $\Delta({\ensuremath {\mathscr{Q}}}_n^{\nu_0}, {\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{res},\nu_0})\leq \sqrt{n\sqrt\frac{\Delta_n}{2}\sup_{f\in {\ensuremath {\mathscr{F}}}}H(f, f_m^{\textnormal{tr}})}$. Observe that $\nu_m^{\textnormal{res}}$ is a finite Lévy measure, hence $\Big((x_t),P_{T_n}^{(\gamma^{\nu_m^{\textnormal{res}}},0,\nu_m^{\textnormal{res}})}\Big)$ is a compound Poisson process with intensity equal to $\iota_m:=\int_{I\setminus [0,\varepsilon_m]} f(y)\nu_0(dy)$ and jumps size density $\frac{ f(x)g(x)}{\iota_m}$, for all $x\in I\setminus [0,\varepsilon_m]$ (recall that we are assuming that $\nu_0$ has a density $g$ with respect to Lebesgue). In particular, this means that $Q_{\Delta_n}^{(\gamma^{\nu_m^{\textnormal{res}}},0,\nu_m^{\textnormal{res}})}$ can be seen as the law of the random variable $\sum_{j=1}^{P_i}Y_j$ where $P_i$ is a Poisson variable of mean $\iota_m \Delta_n$, independent from $(Y_i)_{i\geq 0}$, a sequence of i.i.d. random variables with density $\frac{ fg}{\iota_m}{\ensuremath {\mathbb{I}}}_{I\setminus[0,\varepsilon_m]}$ with respect to Lebesgue. Remark also that $\iota_m$ is confined between $\kappa \nu_0\big(I\setminus [0,\varepsilon_m]\big)$ and $M\nu_0\big(I\setminus [0,\varepsilon_m]
\big)$.
Let $(\epsilon_i)_{i\geq 0}$ be a sequence of i.i.d. Bernoulli variables, independent of $(Y_i)_{i\geq 0}$, with mean $\iota_m \Delta_n e^{-\iota_m\Delta_n}$. For $i=1,\dots,n$, denote by $Q_i^{\epsilon,f}$ the law of the variable $\epsilon_iY_i$ and by ${\ensuremath {\mathscr{Q}}}_n^{\epsilon}$ the statistical model associated with the observations of the vector $(\epsilon_1Y_1,\dots,\epsilon_nY_n)$, i.e. $${\ensuremath {\mathscr{Q}}}_n^{\epsilon}=\bigg(I^n,{\ensuremath {\mathscr{B}}}(I^n),\bigg\{\bigotimes_{i=1}^n Q_i^{\epsilon,f}:f\in{\ensuremath {\mathscr{F}}}\bigg\}\bigg).$$ Furthermore, denote by $\tilde Q_i^f$ the law of $\sum_{j=1}^{P_i}Y_j$. Then an application of Lemma \[lemma:ch4bernoulli\] yields: $$\begin{aligned}
\Big\|\bigotimes_{i=1}^n\tilde Q_i^f&-\bigotimes_{i=1}^nQ_i^{\epsilon,f}\Big\|_{TV} \leq 2\iota_m\sqrt{n\Delta_n^2}\leq 2M\nu_0\big(I\setminus [0,\varepsilon_m]\big)\sqrt{n\Delta_n^2}.\end{aligned}$$ Hence, we get: $$\label{eq:ch4bernoulli}
\Delta({\ensuremath {\mathscr{Q}}}_{n}^{\textnormal{res},\nu_0},{\ensuremath {\mathscr{Q}}}_n^{\epsilon})=O\bigg(\nu_0\big(I\setminus [0,\varepsilon_m]\big)\sqrt{n\Delta_n^2}\bigg).$$ Here the O depends only on $M$.
- Step 2: Let us introduce the following random variables: $$Z_1=\sum_{j=1}^n{\ensuremath {\mathbb{I}}}_{\{0\}}(\epsilon_jY_j); \quad Z_i=\sum_{j=1}^n{\ensuremath {\mathbb{I}}}_{J_i}(\epsilon_jY_j),\ i=2,\dots,m.$$ Observe that the law of the vector $(Z_1,\dots,Z_m)$ is multinomial $\mathcal M(n;\gamma_1,\dots,\gamma_m)$ where $$\gamma_1=1-\iota_m \Delta_n e^{-\iota_m \Delta_n},\quad \gamma_i=\Delta_n e^{-\iota_m \Delta_n}\nu(J_i),\quad i=2,\dots,m.$$ Let us denote by $\mathcal M_n$ the statistical model associated with the observation of $(Z_1,\dots,Z_m)$. Clearly $\delta({\ensuremath {\mathscr{Q}}}_n^{\epsilon},\mathcal M_n)=0$. Indeed, $\mathcal M_n$ is the image experiment by the random variable $S:I^n\to\{1,\dots,n\}^{m}$ defined as $$S(x_1,\dots,x_n)=\Big(\#\{j: x_j=0\}; \#\big\{j: x_j\in J_2\big\};\dots;\#\big\{j: x_j\in J_m\big\}\Big),$$ where $\# A$ denotes the cardinal of the set $A$.
We shall now prove that $\delta(\mathcal M_n,{\ensuremath {\mathscr{Q}}}_n^{\epsilon}) \leq \sup_{f\in{\ensuremath {\mathscr{F}}}}\sqrt{n\Delta_n H^2(f,\hat f_m)}$. We start by defining a discrete random variable $X^*$ concentrated at the points $0$, $x_i^*$, $i=2,\dots,m$: $${\ensuremath {\mathbb{P}}}(X^*=y)=\begin{cases}
\gamma_i &\mbox{ if } y=x_i^*,\quad i=1,\dots,m,\\
0 &\mbox{ otherwise},
\end{cases}$$ with the convention $x_1^*=0$. It is easy to see that $\mathcal M_n$ is equivalent to the statistical model associated with $n$ independent copies of $X^*$. Let us introduce the Markov kernel $$K(x_i^*, A) = \begin{cases}
{\ensuremath {\mathbb{I}}}_A(0) & \text{if } i = 1,\\
\int_A V_i(x) \nu_0(dx) & \text{otherwise.}
\end{cases}$$ Denote by $P^*$ the law of the random variable $X^*$ and by $Q_i^{\epsilon,\hat f}$ the law of a random variable $\epsilon_i \hat Y_i$ where $\epsilon_i$ is Bernoulli independent of $\hat Y_i$, with mean $\iota_m\Delta_n e^{-\iota_m\Delta_n}$ and $\hat Y_i$ has a density $\frac{\hat f_m g}{\iota_m}{\ensuremath {\mathbb{I}}}_{I\setminus[0,\varepsilon_m]}$ with respect to Lebesgue. The same computations as in Lemma \[lemma:ch4kernel\] prove that $KP^*=Q_i^{\epsilon,\hat f}$. Hence, thanks to Remark \[ch4independentkernels\], we get the equivalence between $\mathcal M_n$ and the statistical model associated with the observations of $n$ independent copies of $\epsilon_i \hat Y_i$. In order to bound $\delta(\mathcal M_n,{\ensuremath {\mathscr{Q}}}_n^{\epsilon})$ it is enough to bound the total variation distance between the probabilities $\bigotimes_{i=1}^n Q_i^{\epsilon,f}$ and $\bigotimes_{i=1}^n Q_i^{\epsilon,\hat f}$. Alternatively, we can bound the Hellinger distance between each of the $Q_i^{\epsilon,f}$ and $Q_i^{\epsilon,\hat f}$, thanks to Facts \[ch4h\] and \[ch4hp\], which is: $$\begin{aligned}
\bigg\|\bigotimes_{i=1}^nQ_i^{\epsilon,f} -\bigotimes_{i=1}^nQ_i^{\epsilon,\hat f}\bigg\|_{TV} &\leq \sqrt{\sum_{i=1}^n H^2\big(Q_i^{\epsilon,f}, Q_i^{\epsilon,\hat f}\big)}\\
&= \sqrt{\sum_{i=1}^n \frac{1-\gamma_1}{\iota} H^2(f, \hat f_m)} \leq \sqrt{n\Delta_n H^2(f, \hat f_m)}.\end{aligned}$$ It follows that $$\delta(\mathcal M_n,{\ensuremath {\mathscr{Q}}}_n^{\epsilon})\leq \sqrt{n\Delta_n} \sup_{f \in {\ensuremath {\mathscr{F}}}}H(f,\hat f_m).$$
- Step 3: Let us denote by $\mathcal N_m^*$ the statistical model associated with the observation of $m$ independent Gaussian variables ${\ensuremath {\mathscr{Nn}}}(n\gamma_i,n\gamma_i)$, $i=1,\dots,m$. Very similar computations to those in [@cmultinomial] yield $$\Delta(\mathcal M_n,\mathcal N_m^*)=O\Big(\frac{m \ln m}{\sqrt{n}}\Big).$$ In order to prove the asymptotic equivalence between $\mathcal M_n$ and $\mathcal N_m$ defined as in we need to introduce some auxiliary statistical models. Let us denote by $\mathcal A_m$ the experiment obtained from $\mathcal{N}_m^*$ by disregarding the first component and by $\mathcal V_m$ the statistical model associated with the multivariate normal distribution with the same means and covariances as a multinomial distribution $\mathcal M(n,\gamma_1,\dots,\gamma_m)$. Furthermore, let us denote by $\mathcal N_m^{\#}$ the experiment associated with the observation of $m-1$ independent Gaussian variables ${\ensuremath {\mathscr{Nn}}}(\sqrt{n\gamma_i},\frac{1}{4})$, $i=2,\dots,m$. Clearly $\Delta(\mathcal V_m,\mathcal A_m)=0$ for all $m$: In one direction one only has to consider the projection disregarding the first component; in the other direction, it is enough to remark that $\mathcal V_m$ is the image experiment of $\mathcal A_m$ by the random variable $S:(x_2,\dots,x_m)\to (n(1-\frac{\sum_{i=2}^m x_i}{n}),x_2,\dots,x_m)$. Moreover, using two results contained in [@cmultinomial], see Sections 7.1 and 7.2, one has that $$\Delta(\mathcal A_m,\mathcal N_m^*)=O\bigg(\sqrt{\frac{m}{n}}\bigg),\quad \Delta(\mathcal A_m,\mathcal N_m^{\#})=O\bigg(\frac{m}{\sqrt n}\bigg).$$ Finally, using Facts \[ch4h\] and \[fact:ch4gaussiane\] we can write $$\begin{aligned}
\Delta(\mathcal N_m^{\#},\mathcal N_m)&\leq \sqrt{2\sum_{i=2}^m \Big(\sqrt{T_n\nu(J_i)}-\sqrt{T_n\nu(J_i)\exp(-\iota_m\Delta_n)}\Big)^2}\\
&\leq\sqrt{2T_n\Delta_n^2\iota_m^3}\leq \sqrt{2n\Delta_n^3M^3\big(\nu_0\big(I\setminus [0,\varepsilon_m]\big)\big)^3}.
\end{aligned}$$ To sum up, $\Delta(\mathcal M_n,\mathcal N_m)=O\Big(\frac{m \ln m}{\sqrt{n}}+\sqrt{n\Delta_n^3\big(\nu_0\big(I\setminus [0,\varepsilon_m]\big)\big)^3}\Big)$, with the $O$ depending only on $\kappa$ and $M$.
- Step 4: An application of Lemmas \[lemma:ch4wn\] and \[lemma:ch4limitewn\] yield $$\Delta(\mathcal N_m,{\ensuremath {\mathscr{W}}}_n^{\nu_0}) \leq 2\sqrt T_n \sup_{f\in{\ensuremath {\mathscr{F}}}} \big(A_m(f)+B_m(f)+C_m(f)\big).$$
Proofs of the examples
======================
The purpose of this section is to give detailed proofs of Examples \[ex:ch4esempi\] and Examples \[ex:ch4CPP\]–\[ex3\]. As in Section \[sec:ch4proofs\] we suppose $I\subseteq {\ensuremath {\mathbb{R}}}_+$. We start by giving some bounds for the quantities $A_m(f)$, $B_m(f)$ and $L_2(f, \hat f_m)$, the $L_2$-distance between the restriction of $f$ and $\hat f_m$ on $I\setminus[0,\varepsilon_m].$
Bounds for $A_m(f)$, $B_m(f)$, $L_2(f, \hat{f}_m)$ when $\hat f_m$ is piecewise linear.
---------------------------------------------------------------------------------------
In this section we suppose $f$ to be in ${\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^I$ defined as in . We are going to assume that the $V_j$ are given by triangular/trapezoidal functions as in . In particular, in this case $\hat f_m$ is piecewise linear.
\[lemma:ch4hellinger\] Let $0<\kappa < M$ be two constants and let $f_i$, $i=1,2$ be functions defined on an interval $J$ and such that $\kappa \leq f_i\leq M$, $i=1,2$. Then, for any measure $\nu_0$, we have: $$\begin{aligned}
\frac{1}{4 M} \int_J \big(f_1(x)-f_2(x)\big)^2 \nu_0(dx)&\leq\int_J \big(\sqrt{f_1(x)} - \sqrt{f_2(x)}\big)^2\nu_0(dx)\\
&\leq \frac{1}{4 \kappa} \int_J \big(f_1(x)-f_2(x)\big)^2\nu_0(dx).
\end{aligned}$$
This simply comes from the following inequalities: $$\begin{aligned}
\frac{1}{2\sqrt M} (f_1(x)-f_2(x)) &\leq \frac{f_1(x)-f_2(x)}{\sqrt{f_1(x)}+\sqrt{f_2(x)}} = \sqrt{f_1(x)} - \sqrt{f_2(x)}\\
&\leq \frac{1}{2 \sqrt{\kappa}} (f_1(x)-f_2(x)).
\end{aligned}$$
Recall that $x_i^*$ is chosen so that $\int_{J_i} (x-x_i^*) \nu_0(dx) = 0$. Consider the following Taylor expansions for $x \in J_i$: $$f(x) = f(x_i^*) + f'(x_i^*) (x-x_i^*) + R_i(x); \quad \hat{f}_m(x) = \hat{f}_m(x_i^*) + \hat{f}_m'(x_i^*) (x-x_i^*),$$ where $\hat{f}_m(x_i^*) = \frac{\nu(J_i)}{\nu_0(J_i)}$ and $\hat{f}_m'(x_i^*)$ is the left or right derivative in $x_i^*$ depending whether $x < x_i^*$ or $x > x_i^*$ (as $\hat f_m$ is piecewise linear, no rest is involved in its Taylor expansion).
\[lemma:ch4bounds\] The following estimates hold: $$\begin{aligned}
|R_i(x)| &\leq K |\xi_i - x_i^*|^\gamma |x-x_i^*|; \\
\big|f(x_i^*) - \hat{f}_m(x_i^*)\big| &\leq \|R_i\|_{L_\infty(\nu_0)} \text{ for } i = 2, \dots, m-1; \label{eqn:bounds}\\
\big|f(x)-\hat{f}_m(x)\big| &\leq
\begin{cases}
2 \|R_i\|_{L_\infty(\nu_0)} + K |x_i^*-\eta_i|^\gamma |x-x_i^*| & \text{ if } x \in J_i, \ i = 3, \dots, m-1;\\
C |x-\tau_i| & \text { if } x \in J_i, \ i \in \{2, m\}.
\end{cases}
\end{aligned}$$ for some constant $C$ and points $\xi_i \in J_i$, $\eta_i\in J_{i-1} \cup J_i\cup J_{i+1}$, $\tau_2 \in J_2 \cup J_3$ and $\tau_m \in J_{m-1} \cup J_m$.
By definition of $R_i$, we have $$|R_i(x)| = \Big| \big(f'(\xi_i) - f'(x_i^*)\big)(x-x_i^*) \Big| \leq K |\xi_i - x_i^*|^\gamma |x-x_i^*|,$$ for some point $\xi_i \in J_i$. For the second inequality, $$\begin{aligned}
|f(x_i^*)-\hat{f}_m(x_i^*)| &= \frac{1}{\nu_0(J_i)} \Big| \int_{J_i} (f(x_i^*)-f(x)) \nu_0(dx)\Big|\\
&= \frac{1}{\nu_0(J_i)} \bigg|\int_{J_i} R_i(x) \nu_0(dx)\bigg| \leq \|R_i\|_{L_\infty(\nu_0)},
\end{aligned}$$ where in the first inequality we have used the defining property of $x_i^*$. For the third inequality, let us start by proving that for all $2 < i < m-1$, $\hat{f}_m'(x_i^*) = f'(\chi_i)$ for some $\chi_i \in J_i\cup J_{i+1}$ (here, we are considering right derivatives; for left ones, this would be $J_{i-1} \cup J_i$). To see that, take $x\in J_i\cap [x_i^*,x_{i+1}^*]$ and introduce the function $h(x):=f(x)-l(x)$ where $$l(x)=\frac{x-x_i^*}{x_{i+1}^*-x_i^*}\big(\hat f_m(x_{i+1}^*)-\hat f_m(x_i^*)\big)+\hat f_m(x_i^*).$$ Then, using the fact that $\int_{J_i}(x-x_i^*)\nu_0(dx)=0$ joint with $\int_{J_{i+1}}(x-x_{i+1}^*)\nu_0(dx)=(x_{j+1}^*-x_j^*)\mu_m$, we get $$\int_{J_i}h(x)\nu_0(dx)=0=\int_{J_{i+1}}h(x)\nu_0(dx).$$ In particular, by means of the mean theorem, one can conclude that there exist two points $p_i\in J_i$ and $p_{i+1}\in J_{i+1}$ such that $$h(p_i)=\frac{\int_{J_i}h(x)\nu_0(dx)}{\nu_0(J_i)}=\frac{\int_{J_{i+1}}h(x)\nu_0(dx)}{\nu_0(J_{i+1})}=h(p_{i+1}).$$ As a consequence, we can deduce that there exists $\chi_i\in[p_i,p_{i+1}]\subseteq J_i\cup J_{i+1}$ such that $h'(\chi_i)=0$, hence $f'(\chi_i)=l'(\chi_i)=\hat f_m'(x_i^*)$. When $2 < i < m-1$, the two Taylor expansions joint with the fact that $\hat{f}_m'(x_i^*) = f'(\chi_i)$ for some $\chi_i \in J_i\cup J_{i+1}$, give $$\begin{aligned}
|f(x) - \hat{f}_m (x)| &\leq |f(x_i^*) - \hat{f}_m(x_i^*)| + |R_i(x)| + K |x_i^* - \chi_i|^\gamma |x-x_i^*|\\
& \leq 2 \|R_i\|_{L_\infty(\nu_0)} + K |x_i^* - \chi_i|^\gamma |x-x_i^*|
\end{aligned}$$ whenever $x \in J_i$ and $x > x_i^*$ (the case $x < x_i^*$ is handled similarly using the left derivative of $\hat f_m$ and $\xi_i \in J_{i-1} \cup J_i$). For the remaining cases, consider for example $i = 2$. Then $\hat{f}_m(x)$ is bounded by the minimum and the maximum of $f$ on $J_2 \cup J_3$, hence $\hat{f}_m(x) = f(\tau)$ for some $\tau \in J_2 \cup J_3$. Since $f'$ is bounded by $C = 2M +K$, one has $|f(x) - \hat{f}_m(x)| \leq C|x-\tau|$.
\[lemma:ch4abc\] With the same notations as in Lemma \[lemma:ch4bounds\], the estimates for $A_m^2(f)$, $B_m^2(f)$ and $L_2(f, \hat{f}_m)^2$ are as follows: $$\begin{aligned}
L_2(f, \hat{f}_m)^2&\leq \frac{1}{4\kappa} \bigg( \sum_{i=3}^m \int_{J_i} \Big(2 \|R_i\|_{L_\infty(\nu_0)} + K |x_i^*-\eta_i|^\gamma|x-x_i^*|\Big)^2 \nu_0(dx) \\
&\phantom{=}\ + C^2 \Big(\int_{J_2}|x-\tau_2|^2\nu_0(dx) + \int_{J_m}|x-\tau_m|^2\nu_0(dx)\Big).\\
A_m^2(f) &= L_2\big(\sqrt{f}, \widehat{\sqrt{f}}_m\big)^2 = O\Big(L_2(f, \hat{f}_m)^2\Big)\\
B_m^2(f) &= O\bigg( \sum_{i=2}^{m} \frac{1}{\sqrt{\kappa}} \nu_0(J_i) (2 \sqrt{M} + 1)^2 \|R_i\|_{L_\infty(\nu_0)}^2\bigg).
\end{aligned}$$
The $L_2$-bound is now a straightforward application of Lemmas \[lemma:ch4hellinger\] and \[lemma:ch4bounds\]. The one on $A_m(f)$ follows, since if $f \in {\ensuremath {\mathscr{F}}}_{(\gamma, K, \kappa, M)}^I$ then $\sqrt{f} \in {\ensuremath {\mathscr{F}}}_{(\gamma, \frac{K}{\sqrt{\kappa}}, \sqrt{\kappa}, \sqrt{M})}^I$. In order to bound $B_m^2(f)$ write it as: $$B_m^2(f)=\sum_{j=1}^m \nu_0(J_j)\bigg(\frac{\int_{J_j}\sqrt{f(y)}\nu_0(dy)}{\nu_0(J_j)}-\sqrt{\frac{\nu(J_j)}{\nu_0(J_j)}}\bigg)^2=:\sum_{j=1}^m \nu_0(J_j)E_j^2.$$ By the triangular inequality, let us bound $E_j$ by $F_j+G_j$ where: $$F_j=\bigg|\sqrt{\frac{\nu(J_j)}{\nu_0(J_j)}}-\sqrt{f(x_j^*)}\bigg| \quad \textnormal{ and }\quad G_j=\bigg|\sqrt{f(x_j^*)}-\frac{\int_{J_j}\sqrt{f(y)}\nu_0(dy)}{\nu_0(J_j)}\bigg|.$$ Using the same trick as in the proof of Lemma \[lemma:ch4hellinger\], we can bound: $$\begin{aligned}
F_j \leq 2 \sqrt{M} \bigg|\frac{\int_{J_j} \big(f(x)-f(x_i^*)\big)\nu_0(dx)}{\nu_0(J_j)}\bigg| \leq 2 \sqrt{M} \|R_j\|_{L_\infty(\nu_0)}.
\end{aligned}$$ On the other hand, $$\begin{aligned}
G_j&=\frac{1}{\nu_0(J_j)}\bigg|\int_{J_j}\big(\sqrt{f(x_j^*)}-\sqrt{f(y)}\big)\nu_0(dy)\bigg|\\
&=\frac{1}{\nu_0(J_j)}\bigg|\int_{J_j}\bigg(\frac{f'(x_j^*)}{2\sqrt{f(x_j^*)}}(x-x_j^*)+\tilde R_j(y)\bigg)\nu_0(dy)\bigg| \leq \|\tilde R_j\|_{L_\infty(\nu_0)},
\end{aligned}$$ which has the same magnitude as $\frac{1}{\kappa}\|R_j\|_{L_\infty(\nu_0)}$.
Observe that when $\nu_0$ is finite, there is no need for a special definition of $\hat{f}_m$ near $0$, and all the estimates in Lemma \[lemma:ch4bounds\] hold true replacing every occurrence of $i = 2$ by $i = 1$.
\[rmk:nonlinear\] The same computations as in Lemmas \[lemma:ch4bounds\] and \[lemma:ch4abc\] can be adapted to the general case where the $V_j$’s (and hence $\hat f_m$) are not piecewise linear. In the general case, the Taylor expansion of $\hat f_m$ in $x_i^*$ involves a rest as well, say $\hat R_i$, and one needs to bound this, as well.
Proofs of Examples \[ex:ch4esempi\] {#subsec:esempi}
-----------------------------------
In the following, we collect the details of the proofs of Examples \[ex:ch4esempi\].
**1. The finite case:** $\nu_0\equiv {\ensuremath{\textnormal{Leb}}}([0,1])$.
Remark that in the case where $\nu_0$ if finite there are no convergence problems near zero and so we can consider the easier approximation of $f$: $$\hat f_m(x):=
\begin{cases}
m\theta_1 & \textnormal{if } x\in \big[0,x_1^*\big],\\
m^2\big[\theta_{j+1}(x-x_j^*)+\theta_j(x_{j+1}^*-x)\big] & \textnormal{if } x\in (x_j^*,x_{j+1}^*] \quad j = 1,\dots,m-1,\\
m\theta_m & \textnormal{if } x\in (x_m^*,1]
\end{cases}$$ where $$x_j^*=\frac{2j-1}{2m},\quad J_j=\Big(\frac{j-1}{m},\frac{j}{m}\Big],\quad \theta_j=\int_{J_j}f(x)dx, \quad j=1,\dots,m.$$ In this case we take $\varepsilon_m = 0$ and Conditions $(C2)$ and $(C2')$ coincide: $$\lim_{n\to\infty}n\Delta_n\sup_{f\in {\ensuremath {\mathscr{F}}}}\Big(A_m^2(f)+B_m^2(f)\Big) = 0.$$ Applying Lemma \[lemma:ch4abc\], we get $$\sup_{f\in {\ensuremath {\mathscr{F}}}} \Big(L_2(f,\hat f_m)+ A_m(f)+ B_m(f)\Big)= O\big(m^{-\frac{3}{2}}+m^{-1-\gamma}\big);$$ (actually, each of the three terms on the left hand side has the same rate of convergence).
**2. The finite variation case:** $\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}}(x)=x^{-1}{\ensuremath {\mathbb{I}}}_{[0,1]}(x).$
To prove that the standard choice of $V_j$ described at the beginning of Examples \[ex:ch4esempi\] leads to $\displaystyle{\int_{\varepsilon_m}^1 V_j(x)\frac{dx}{x}=1}$, it is enough to prove that this integral is independent of $j$, since in general $\displaystyle{\int_{\varepsilon_m}^1 \sum_{j=2}^m V_j(x)\frac{dx}{x}=m-1}.$ To that aim observe that, for $j=3,\dots,m-1$, $$\mu_m\int_{\varepsilon_m}^1 V_j(x)\nu_0(dx)=\int_{x_{j-1}^*}^{x_j^*}\frac{x-x_{j-1}^*}{x_j^*-x_{j-1}^*}\frac{dx}{x}+\int_{x_j^*}^{x_{j+1}^*}\frac{x_{j+1}^*-x}{x_{j+1}^*-x_j^*}\frac{dx}{x}.$$ Let us show that the first addendum does not depend on $j$. We have $$\int_{x_{j-1}^*}^{x_j^*}\frac{dx}{x_j^*-x_{j-1}^*}=1\quad \textnormal{and}\quad -\frac{x_{j-1}^*}{x_j^*-x_{j-1}^*}\int_{x_{j-1}^*}^{x_j^*}\frac{dx}{x}=\frac{x_{j-1}^*}{x_j^*-x_{j-1}^*}\ln\Big(\frac{x_{j-1}^*}{x_j^*}\Big).$$ Since $x_j^*=\frac{v_j-v_{j-1}}{\mu_m}$ and $v_j=\varepsilon_m^{\frac{m-j}{m-1}}$, the quantities $\frac{x_j^*}{x_{j-1}^*}$ and, hence, $\frac{x_{j-1}^*}{x_j^*-x_{j-1}^*}$ do not depend on $j$. The second addendum and the trapezoidal functions $V_2$ and $V_m$ are handled similarly. Thus, $\hat f_m$ can be chosen of the form
$$\hat f_m(x):=
\begin{cases}
\quad 1 & \textnormal{if } x\in \big[0,\varepsilon_m\big],\\
\frac{\nu(J_2)}{\mu_m} & \textnormal{if } x\in \big(\varepsilon_m, x_2^*\big],\\
\frac{1}{x_{j+1}^*-x_j^*}\bigg[\frac{\nu(J_{j+1})}{\mu_m}(x-x_j^*)+\frac{\nu(J_{j})}{\mu_m}(x_{j+1}^*-x)\bigg] & \textnormal{if } x\in (x_j^*,x_{j+1}^*] \quad j = 2,\dots,m-1,\\
\frac{\nu(J_m)}{\mu_m} & \textnormal{if } x\in (x_m^*,1].
\end{cases}$$
A straightforward application of Lemmas \[lemma:ch4bounds\] and \[lemma:ch4abc\] gives $$\sqrt{\int_{\varepsilon_m}^1\Big(f(x)-\hat f_m(x)\Big)^2 \nu_0(dx)} +A_m(f)+B_m(f)=O\bigg(\bigg(\frac{\ln m}{m}\bigg)^{\gamma+1} \sqrt{\ln (\varepsilon_m^{-1})}\bigg),$$ as announced.
**3. The infinite variation, non-compactly supported case:** $\frac{d\nu_0}{d{\ensuremath{\textnormal{Leb}}}}(x)=x^{-2}{\ensuremath {\mathbb{I}}}_{{\ensuremath {\mathbb{R}}}_+}(x)$. Recall that we want to prove that $$L_2(f,\hat f_m)^2+A_m^2(f)+B_m^2(f)=O\bigg(\frac{H(m)^{3+4\gamma}}{(\varepsilon_m m)^{2\gamma}}+\sup_{x\geq H(m)}\frac{f(x)^2}{H(m)}\bigg),$$ for any given sequence $H(m)$ going to infinity as $m\to\infty$.
Let us start by addressing the problem that the triangular/trapezoidal choice for $V_j$ is not doable. Introduce the following notation: $V_j = {\ensuremath {\accentset{\triangle}{V}}}_j + A_j$, $j = 2, \dots, m$, where the ${\ensuremath {\accentset{\triangle}{V}}}_j$’s are triangular/trapezoidal function similar to those in . The difference is that here, since $x_m^*$ is not defined, ${\ensuremath {\accentset{\triangle}{V}}}_{m-1}$ is a trapezoid, linear between $x_{m-2}^*$ and $x_{m-1}^*$ and constantly equal to $\frac{1}{\mu_m}$ on $[x_{m-1}^*,v_{m-1}]$ and ${\ensuremath {\accentset{\triangle}{V}}}_m$ is supported on $[v_{m-1},\infty)$, where it is constantly equal to $\frac{1}{\mu_m}$. Each $A_j$ is chosen so that:
1. It is supported on $[x_{j-1}^*, x_{j+1}^*]$ (unless $j = 2$, $j = m-1$ or $j = m$; in the first case the support is $[x_2^*, x_3^*]$, in the second one it is $[x_{m-2}^*, x_{m-1}^*]$, and $A_m \equiv 0$);
2. ${A_j}$ coincides with $-A_{j-1}$ on $[x_{j-1}^*, x_j^*]$, $j = 3, \dots, m-1$ (so that $\sum V_j \equiv \frac{1}{\mu_n}$) and its first derivative is bounded (in absolute value) by $\frac{1}{\mu_m(x_j^* - x_{j-1}^*)}$ (so that $V_j$ is non-negative and bounded by $\frac{1}{\mu_n}$);
3. $A_j$ vanishes, along with its first derivatives, on $x_{j-1}^*$, $x_j^*$ and $x_{j+1}^*$.
We claim that these conditions are sufficient to assure that $\hat f_m$ converges to $f$ quickly enough. First of all, by Remark \[rmk:nonlinear\], we observe that, to have a good bound on $L_2(f, \hat f_m)$, the crucial property of $\hat f_m$ is that its first right (resp. left) derivative has to be equal to $\frac{1}{\mu_m(x_{j+1}^*-x_j^*)}$ (resp. $\frac{1}{\mu_m(x_{j}^*-x_{j-1}^*)}$) and its second derivative has to be small enough (for example, so that the rest $\hat R_j$ is as small as the rest $R_j$ of $f$ already appearing in Lemma \[lemma:ch4bounds\]).
The (say) left derivatives in $x_j^*$ of $\hat f_m$ are given by $$\hat f_m'(x_j^*) = \big({\ensuremath {\accentset{\triangle}{V}}}_j'(x_j^*) + A_j'(x_j^*)\big) \big(\nu(J_j)-\nu(J_{j-1})\big); \quad \hat f_m''(x_j^*) = A_j''(x_j^*)\big(\nu(J_j)-\nu(J_{j-1})\big).$$ Then, in order to bound $|\hat f_m''(x_j^*)|$ it is enough to bound $|A_j''(x_j^*)|$ because: $$\big|\hat f_m''(x_j^*)\big| \leq |A_j''(x_j^*)| \Big|\int_{J_j} f(x) \frac{dx}{x^2} - \int_{J_{j-1}} f(x) \frac{dx}{x^2}\Big| \leq |A_j''(x_j^*)| \displaystyle{\sup_{x\in I}}|f'(x)|(\ell_{j}+\ell_{j-1}) \mu_m,$$ where $\ell_{j}$ is the Lebesgue measure of $J_{j}$.
We are thus left to show that we can choose the $A_j$’s satisfying points 1-3, with a small enough second derivative, and such that $\int_I V_j(x) \frac{dx}{x^2} = 1$. To make computations easier, we will make the following explicit choice: $$A_j(x) = b_j (x-x_j^*)^2 (x-x_{j-1}^*)^2 \quad \forall x \in [x_{j-1}^*, x_j^*),$$ for some $b_j$ depending only on $j$ and $m$ (the definitions on $[x_j^*, x_{j+1}^*)$ are uniquely determined by the condition $A_j + A_{j+1} \equiv 0$ there).
Define $j_{\max}$ as the index such that $H(m) \in J_{j_{\max}}$; it is straightforward to check that $$j_{\max} \sim m- \frac{\varepsilon_m(m-1)}{H(m)}; \quad x_{m-k}^* = \varepsilon_m(m-1) \log \Big(1+\frac{1}{k}\Big), \quad k = 1, \dots, m-2.$$ One may compute the following Taylor expansions: $$\begin{aligned}
\int_{x_{m-k-1}^*}^{x_{m-k}^*} {\ensuremath {\accentset{\triangle}{V}}}_{m-k}(x) \nu_0(dx) &= \frac{1}{2} - \frac{1}{6k} + \frac{5}{24k^2} + O\Big(\frac{1}{k^3}\Big);\\
\int_{x_{m-k}^*}^{x_{m-k+1}^*} {\ensuremath {\accentset{\triangle}{V}}}_{m-k}(x) \nu_0(dx) &= \frac{1}{2} + \frac{1}{6k} + \frac{1}{24k^2} + O\Big(\frac{1}{k^3}\Big).
\end{aligned}$$ In particular, for $m \gg 0$ and $m-k \leq j_{\max}$, so that also $k \gg 0$, all the integrals $\int_{x_{j-1}^*}^{x_{j+1}^*} {\ensuremath {\accentset{\triangle}{V}}}_j(x) \nu_0(dx)$ are bigger than 1 (it is immediate to see that the same is true for ${\ensuremath {\accentset{\triangle}{V}}}_2$, as well). From now on we will fix a $k \geq \frac{\varepsilon_m m}{H(m)}$ and let $j = m-k$.
Summing together the conditions $\int_I V_i(x)\nu_0(dx)=1$ $\forall i>j$ and noticing that the function $\sum_{i = j}^m V_i$ is constantly equal to $\frac{1}{\mu_m}$ on $[x_j^*,\infty)$ we have: $$\begin{aligned}
\int_{x_{j-1}^*}^{x_j^*} A_j(x) \nu_0(dx) &= m-j+1 - \frac{1}{\mu_m} \nu_0([x_j^*, \infty)) - \int_{x_{j-1}^*}^{x_j^*} {\ensuremath {\accentset{\triangle}{V}}}_j(x) \nu_0(dx)\\
&= k+1- \frac{1}{\log(1+\frac{1}{k})} - \frac{1}{2} + \frac{1}{6k} + O\Big(\frac{1}{k^2}\Big) = \frac{1}{4k} + O\Big(\frac{1}{k^2}\Big)
\end{aligned}$$ Our choice of $A_j$ allows us to compute this integral explicitly: $$\int_{x_{j-1}^*}^{x_j^*} b_j (x-x_{j-1}^*)^2(x-x_j^*)^2 \frac{dx}{x^2} = b_j \big(\varepsilon_m (m-1)\big)^3 \Big(\frac{2}{3} \frac{1}{k^4} + O\Big(\frac{1}{k^5}\Big)\Big).$$ In particular one gets that asymptotically $$b_j \sim \frac{1}{(\varepsilon_m(m-1))^3} \frac{3}{2} k^4 \frac{1}{4k} \sim \bigg(\frac{k}{\varepsilon_m m}\bigg)^3.$$ This immediately allows us to bound the first order derivative of $A_j$ as asked in point 2: Indeed, it is bounded above by $2 b_j \ell_{j-1}^3$ where $\ell_{j-1}$ is again the length of $J_{j-1}$, namely $\ell_j = \frac{\varepsilon_m(m-1)}{k(k+1)} \sim \frac{\varepsilon_m m}{k^2}$. It follows that for $m$ big enough: $$\displaystyle{\sup_{x\in I}|A_j'(x)|} \leq \frac{1}{k^3} \ll \frac{1}{\mu_m(x_j^*-x_{j-1}^*)} \sim \bigg(\frac{k}{\varepsilon_m m}\bigg)^2.$$ The second order derivative of $A_j(x)$ can be easily computed to be bounded by $4 b_j \ell_j^2$. Also remark that the conditions that $|f|$ is bounded by $M$ and that $f'$ is Hölder, say $|f'(x) - f'(y)| \leq K |x-y|^\gamma$, together give a uniform $L_\infty$ bound of $|f'|$ by $2M + K$. Summing up, we obtain: $$|\hat f_m''(x_j^*)| \lesssim b_j \ell_m^3 \mu_m \sim \frac{1}{k^3\varepsilon_m m}$$ (here and in the following we use the symbol $\lesssim$ to stress that we work up to constants and to higher order terms). The leading term of the rest $\hat R_j$ of the Taylor expansion of $\hat f_m$ near $x_j^*$ is $$\hat f_m''(x_j^*) |x-x_j^*|^2 \sim |f_m''(x_j^*)| \ell_j^2 \sim \frac{\varepsilon_m m}{k^7}.$$
Using Lemmas \[lemma:ch4bounds\] and \[lemma:ch4abc\] (taking into consideration Remark \[rmk:nonlinear\]) we obtain $$\begin{aligned}
\int_{\varepsilon_m}^{\infty} |f(x) - \hat f_m(x)|^2 \nu_0(dx) &\lesssim \sum_{j=2}^{j_{\max}} \int_{J_j} |f(x) - \hat f_m(x)|^2 \nu_0(dx) + \int_{H(m)}^\infty |f(x)-\hat f_m(x)|^2 \nu_0(dx) \nonumber \\
&\lesssim \sum_{k=\frac{\varepsilon_m m}{H(m)}}^{m}\mu_m \bigg( \frac{(\varepsilon_m m)^{2+2\gamma}}{k^{4+4\gamma}} + \frac{(\varepsilon_m m)^2}{k^{14}}\bigg) + \frac{1}{H(m)}\sup_{x\geq H(m)}f(x)^2 \label{eq:xquadro} \\
&\lesssim \bigg(\frac{H(m)^{3+4\gamma}}{(\varepsilon_m m)^{2+2\gamma}} + \frac{H(m)^{13}}{(\varepsilon_m m)^{10}}\bigg) + \frac{1}{H(m)}. \nonumber
\end{aligned}$$ It is easy to see that, since $0 < \gamma \leq 1$, as soon as the first term converges, it does so more slowly than the second one. Thus, an optimal choice for $H(m)$ is given by $\sqrt{\varepsilon_m m}$, that gives a rate of convergence: $$L_2(f,\hat f_m)^2 \lesssim \frac{1}{\sqrt{\varepsilon_m m}}.$$ This directly gives a bound on $H(f, \hat f_m)$. Also, the bound on the term $A_m(f)$, which is $L_2(\sqrt f,\widehat{\sqrt{f}}_m)^2$, follows as well, since $f \in {\ensuremath {\mathscr{F}}}_{(\gamma,K,\kappa,M)}^I$ implies $\sqrt{f} \in {\ensuremath {\mathscr{F}}}_{(\gamma, \frac{K}{\sqrt\kappa}, \sqrt \kappa, \sqrt M)}^I$. Finally, the term $B_m^2(f)$ contributes with the same rates as those in : Using Lemma \[lemma:ch4abc\], $$\begin{aligned}
B_m^2(f) &\lesssim \sum_{j=2}^{\lceil m-\frac{\varepsilon_m(m-1)}{H(m)} \rceil} \nu_0(J_j) \|R_j\|_{L_\infty}^2 + \nu_0([H(m), \infty))\\
&\lesssim \mu_m \sum_{k=\frac{\varepsilon_m (m-1)}{H(m)}}^m \Big(\frac{\varepsilon_m m}{k^2}\Big)^{2+2\gamma} + \frac{1}{H(m)}\\
&\lesssim \frac{H(m)^{3+4\gamma}}{(\varepsilon_m m)^{2+2\gamma}} + \frac{1}{H(m)}.
\end{aligned}$$
Proof of Example \[ex:ch4CPP\] {#subsec:ch4ex1}
------------------------------
In this case, since $\varepsilon_m = 0$, the proofs of Theorems \[ch4teo1\] and \[ch4teo2\] simplify and give better estimates near zero, namely: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}}, {\ensuremath {\mathscr{W}}}_n^{\nu_0}) &\leq C_1 \bigg(\sqrt{T_n}\sup_{f\in {\ensuremath {\mathscr{F}}}}\Big(A_m(f)+ B_m(f)+L_2(f,\hat f_m)\Big)+\sqrt{\frac{m^2}{T_n}}\bigg)\nonumber \\
\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}}, {\ensuremath {\mathscr{W}}}_n^{\nu_0}) &\leq C_2\bigg(\sqrt{n\Delta_n^2}+\frac{m\ln m}{\sqrt{n}}+\sqrt{T_n}\sup_{f\in{\ensuremath {\mathscr{F}}}}\Big( A_m(f)+ B_m(f)+H\big(f,\hat f_m\big)\Big) \bigg) \label{eq:CPP},\end{aligned}$$ where $C_1$, $C_2$ depend only on $\kappa,M$ and $$\begin{aligned}
&A_m(f)=\sqrt{\int_0^1\Big(\widehat{\sqrt f}_m(y)-\sqrt{f(y)}\Big)^2dy},\quad
B_m(f)=\sum_{j=1}^m\bigg(\sqrt m\int_{J_j}\sqrt{f(y)}dy-\sqrt{\theta_j}\bigg)^2.\end{aligned}$$
As a consequence we get: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}},{\ensuremath {\mathscr{W}}}_n^{\nu_0})&\leq O\bigg(\sqrt{T_n}(m^{-\frac{3}{2}}+m^{-1-\gamma})+\sqrt{m^2T_n^{-1}}\bigg).\end{aligned}$$ To get the bounds in the statement of Example \[ex:ch4CPP\] the optimal choices are $m_n = T_n^{\frac{1}{2+\gamma}}$ when $\gamma \leq \frac{1}{2}$ and $m_n = T_n^{\frac{2}{5}}$ otherwise. Concerning the discrete model, we have: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{{\ensuremath{\textnormal{Leb}}}},{\ensuremath {\mathscr{W}}}_n^{\nu_0})&\leq O\bigg(\sqrt{n\Delta_n^2}+\frac{m\ln m}{\sqrt{n}}+ \sqrt{n\Delta_n}\big(m^{-\frac{3}{2}}+m^{-1-\gamma}\big)\bigg).\end{aligned}$$ There are four possible scenarios: If $\gamma>\frac{1}{2}$ and $\Delta_n=n^{-\beta}$ with $\frac{1}{2}<\beta<\frac{3}{4}$ (resp. $\beta\geq \frac{3}{4}$) then the optimal choice is $m_n=n^{1-\beta}$ (resp. $m_n=n^{\frac{2-\beta}{5}}$).
If $\gamma\geq\frac{1}{2}$ and $\Delta_n=n^{-\beta}$ with $\frac{1}{2}<\beta<\frac{2+2\gamma}{3+2\gamma}$ (resp. $\beta\geq \frac{2+2\gamma}{3+2\gamma}$) then the optimal choice is $m_n=n^{\frac{2-\beta}{4+2\gamma}}$ (resp. $m_n=n^{1-\beta}$).
Proof of Example \[ch4ex2\] {#subsec:ch4ex2}
---------------------------
As in Examples \[ex:ch4esempi\], we let $\varepsilon_m=m^{-1-\alpha}$ and consider the standard triangular/trapezoidal $V_j$’s. In particular, $\hat f_m$ will be piecewise linear. Condition (C2’) is satisfied and we have $C_m(f)=O(\varepsilon_m)$. This bound, combined with the one obtained in , allows us to conclude that an upper bound for the rate of convergence of $\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})$ is given by: $$\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})\leq C \bigg(\sqrt{\sqrt{n^2\Delta_n}\varepsilon_m}+\sqrt{n\Delta_n}\Big(\frac{\ln (\varepsilon_m^{-1})}{m}\Big)^{2}+\frac{m\ln m}{\sqrt n}+\sqrt{n\Delta_n^2}\ln (\varepsilon_m^{-1}) \bigg),$$ where $C$ is a constant only depending on the bound on $\lambda > 0$.
The sequences $\varepsilon_m$ and $m$ can be chosen arbitrarily to optimize the rate of convergence. It is clear from the expression above that, if we take $\varepsilon_m = m^{-1-\alpha}$ with $\alpha > 0$, bigger values of $\alpha$ reduce the first term $\sqrt{\sqrt{n^2\Delta_n}\varepsilon_m}$, while changing the other terms only by constants. It can be seen that taking $\alpha \geq 15$ is enough to make the first term negligeable with respect to the others. In that case, and under the assumption $\Delta_n = n^{-\beta}$, the optimal choice for $m$ is $m = n^\delta$ with $\delta = \frac{5-4\beta}{14}$. In that case, the global rate of convergence is $$\Delta({\ensuremath {\mathscr{Q}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0}) = \begin{cases}
O\big(n^{\frac{1}{2}-\beta} \ln n\big) & \text{if } \frac{1}{2} < \beta \leq \frac{9}{10}\\
O\big(n^{-\frac{1+2\beta}{7}} \ln n\big) & \text{if } \frac{9}{10} < \beta < 1.
\end{cases}$$
In the same way one can find $$\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\bigg( \sqrt{n\Delta_n} \Big(\frac{\ln m}{m}\Big)^2 \sqrt{\ln(\varepsilon_m^{-1})} + \sqrt{\frac{m^2}{n\Delta_n \ln(\varepsilon_m)}} + \sqrt{n \Delta_n} \varepsilon_m \bigg).$$ As above, we can freely choose $\varepsilon_m$ and $m$ (in a possibly different way from above). Again, as soon as $\varepsilon_m = m^{-1-\alpha}$ with $\alpha \geq 1$ the third term plays no role, so that we can choose $\varepsilon_m = m^{-2}$. Letting $\Delta_n = n^{-\beta}$, $0 < \beta < 1$, and $m = n^\delta$, an optimal choice is $\delta = \frac{1-\beta}{3}$, giving $$\Delta({\ensuremath {\mathscr{P}}}_{n,FV}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\Big(n^{\frac{\beta-1}{6}} \big(\ln n\big)^{\frac{5}{2}}\Big) = O\Big(T_n^{-\frac{1}{6}} \big(\ln T_n\big)^\frac{5}{2}\Big).$$
Proof of Example \[ex3\] {#subsec:ch4ex3}
------------------------
Using the computations in , combined with $\big(f(y)-\hat f_m(y)\big)^2\leq 4 \exp(-2\lambda_0 y^3) \leq 4 \exp(-2\lambda_0 H(m)^3)$ for all $y \geq H(m)$, we obtain: $$\begin{aligned}
\int_{\varepsilon_m}^\infty \big|f(x) - \hat f_m(x)\big|^2 \nu_0(dx) &\lesssim \frac{H(m)^{7}}{(\varepsilon_m m)^{4}} + \int_{H(m)}^\infty \big|f(x) - \hat f_m(x)\big|^2 \nu_0(dx)\\
&\lesssim \frac{H(m)^{7}}{(\varepsilon_m m)^{4}} + \frac{e^{-2\lambda_0 H(m)^3}}{H(m)}.
\end{aligned}$$ As in Example \[ex:ch4esempi\], this bounds directly $H^2(f, \hat f_m)$ and $A_m^2(f)$. Again, the first part of the integral appearing in $B_m^2(f)$ is asymptotically smaller than the one appearing above: $$\begin{aligned}
B_m^2(f) &= \sum_{j=1}^m \bigg(\frac{1}{\sqrt{\mu_m}} \int_{J_j} \sqrt{f} \nu_0 - \sqrt{\int_{J_j} f(x) \nu_0(dx)}\bigg)^2\\
&\lesssim \frac{H(m)^{7}}{(\varepsilon_m m)^{4}} + \sum_{k=1}^{\frac{\varepsilon_m m}{H(m)}} \bigg( \frac{1}{\sqrt{\mu_m}} \int_{J_{m-k}} \sqrt{f} \nu_0 - \sqrt{\int_{J_{m-k}} f(x) \nu_0(dx)}\bigg)^2\\
&\lesssim \frac{H(m)^{7}}{(\varepsilon_m m)^{4}} + \frac{e^{-\lambda_0 H(m)^3}}{H(m)}.
\end{aligned}$$ As above, for the last inequality we have bounded $f$ in each $J_{m-k}$, $k \leq \frac{\varepsilon_m m}{H(m)}$, with $\exp(-\lambda_0 H(m)^3)$. Thus the global rate of convergence of $L_2(f,\hat f_m)^2 + A_m^2(f) + B_m^2(f)$ is $\frac{H(m)^{7}}{(\varepsilon_m m)^{4}} + \frac{e^{-\lambda_0 H(m)^3}}{H(m)}$.
Concerning $C_m(f)$, we have $C_m^2(f) = \int_0^{\varepsilon_m} \frac{(\sqrt{f(x)} - 1)^2}{x^2} dx \lesssim \varepsilon_m^5$. To write the global rate of convergence of the Le Cam distance in the discrete setting we make the choice $H(m) = \sqrt[3]{\frac{\eta}{\lambda_0}\ln m}$, for some constant $\eta$, and obtain: $$\begin{aligned}
\Delta({\ensuremath {\mathscr{Q}}}_{n}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0}) &= O \bigg( \frac{\sqrt{n} \Delta_n}{\varepsilon_m} + \frac{m \ln m}{\sqrt{n}} + \sqrt{n \Delta_n} \Big( \frac{(\ln m)^{\frac{7}{6}}}{(\varepsilon_m m)^2} + \frac{m^{-\frac{\eta}{2}}}{\sqrt[3]{\ln m}} \Big) + \sqrt[4]{n^2 \Delta_n \varepsilon_m^5}\bigg).
\end{aligned}$$ Letting $\Delta_n = n^{-\beta}$, $\varepsilon_m = n^{-\alpha}$ and $m = n^\delta$, optimal choices give $\alpha = \frac{\beta}{3}$ and $\delta = \frac{1}{3}+\frac{\beta}{18}$. We can also take $\eta = 2$ to get a final rate of convergence: $$\Delta({\ensuremath {\mathscr{Q}}}_{n}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0}) = \begin{cases}
O\big(n^{\frac{1}{2} - \frac{2}{3}\beta}\big)& \text{if } \frac{3}{4} < \beta < \frac{12}{13}\\
O\big(n^{-\frac{1}{6}+\frac{\beta}{18}} (\ln n)^{\frac{7}{6}}\big) &\text{if } \frac{12}{13} \leq \beta < 1.
\end{cases}$$
In the continuous setting, we have $$\Delta({\ensuremath {\mathscr{P}}}_{n}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\bigg(\sqrt{n\Delta_n} \Big( \frac{(\ln m)^\frac{7}{6}}{(\varepsilon_m m)^2} + \frac{m^{-\frac{\eta}{2}}}{\sqrt[3]{\ln m}} + \varepsilon_m^{\frac{5}{2}}\Big) + \sqrt{\frac{\varepsilon_m m^2}{n\Delta_n}} \bigg).$$ Using $T_n = n\Delta_n$, $\varepsilon_m = T_n^{-\alpha}$ and $m = T_n^\delta$, optimal choices are given by $\alpha = \frac{4}{17}$, $\delta = \frac{9}{17}$; choosing any $\eta \geq 3$ we get the rate of convergence $$\Delta({\ensuremath {\mathscr{P}}}_{n}^{\nu_0},{\ensuremath {\mathscr{W}}}_n^{\nu_0})=O\big(T_n^{-\frac{3}{34}} (\ln T_n)^{\frac{7}{6}}\big).$$
Background
==========
Le Cam theory of statistical experiments {#sec:ch4lecam}
----------------------------------------
A *statistical model* or *experiment* is a triplet ${\ensuremath {\mathscr{P}}}_j=({\ensuremath {\mathscr{X}}}_j,{\ensuremath {\mathscr{A}}}_j,\{P_{j,\theta}; \theta\in\Theta\})$ where $\{P_{j,\theta}; \theta\in\Theta\}$ is a family of probability distributions all defined on the same $\sigma$-field ${\ensuremath {\mathscr{A}}}_j$ over the *sample space* ${\ensuremath {\mathscr{X}}}_j$ and $\Theta$ is the *parameter space*. The *deficiency* $\delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)$ of ${\ensuremath {\mathscr{P}}}_1$ with respect to ${\ensuremath {\mathscr{P}}}_2$ quantifies “how much information we lose” by using ${\ensuremath {\mathscr{P}}}_1$ instead of ${\ensuremath {\mathscr{P}}}_2$ and it is defined as $\delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)=\inf_K\sup_{\theta\in \Theta}||KP_{1,\theta}-P_{2,\theta}||_{TV},$ where TV stands for “total variation” and the infimum is taken over all “transitions” $K$ (see [@lecam], page 18). The general definition of transition is quite involved but, for our purposes, it is enough to know that Markov kernels are special cases of transitions. By $KP_{1,\theta}$ we mean the image measure of $P_{1,\theta}$ via the Markov kernel $K$, that is $$KP_{1,\theta}(A)=\int_{{\ensuremath {\mathscr{X}}}_1}K(x,A)P_{1,\theta}(dx),\quad\forall A\in {\ensuremath {\mathscr{A}}}_2.$$ The experiment $K{\ensuremath {\mathscr{P}}}_1=({\ensuremath {\mathscr{X}}}_2,{\ensuremath {\mathscr{A}}}_2,\{KP_{1,\theta}; \theta\in\Theta\})$ is called a *randomization* of ${\ensuremath {\mathscr{P}}}_1$ by the Markov kernel $K$. When the kernel $K$ is deterministic, that is $K(x,A)={\ensuremath {\mathbb{I}}}_{A}S(x)$ for some random variable $S:({\ensuremath {\mathscr{X}}}_1,{\ensuremath {\mathscr{A}}}_1)\to({\ensuremath {\mathscr{X}}}_2,{\ensuremath {\mathscr{A}}}_2)$, the experiment $K{\ensuremath {\mathscr{P}}}_1$ is called the *image experiment by the random variable* $S$. The Le Cam distance is defined as the symmetrization of $\delta$ and it defines a pseudometric. When $\Delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)=0$ the two statistical models are said to be *equivalent*. Two sequences of statistical models $({\ensuremath {\mathscr{P}}}_{1}^n)_{n\in{\ensuremath {\mathbb{N}}}}$ and $({\ensuremath {\mathscr{P}}}_{2}^n)_{n\in{\ensuremath {\mathbb{N}}}}$ are called *asymptotically equivalent* if $\Delta({\ensuremath {\mathscr{P}}}_{1}^n,{\ensuremath {\mathscr{P}}}_{2}^n)$ tends to zero as $n$ goes to infinity. A very interesting feature of the Le Cam distance is that it can be also translated in terms of statistical decision theory. Let ${\ensuremath {\mathscr{D}}}$ be any (measurable) decision space and let $L:\Theta\times {\ensuremath {\mathscr{D}}}\mapsto[0,\infty)$ denote a loss function. Let $\|L\|=\sup_{(\theta,z)\in\Theta\times{\ensuremath {\mathscr{D}}}}L(\theta,z)$. Let $\pi_i$ denote a (randomized) decision procedure in the $i$-th experiment. Denote by $R_i(\pi_i,L,\theta)$ the risk from using procedure $\pi_i$ when $L$ is the loss function and $\theta$ is the true value of the parameter. Then, an equivalent definition of the deficiency is: $$\begin{aligned}
\delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)=\inf_{\pi_1}\sup_{\pi_2}\sup_{\theta\in\Theta}\sup_{L:\|L\|=1}\big|R_1(\pi_1,L,\theta)-R_2(\pi_2,L,\theta)\big|.\end{aligned}$$ Thus $\Delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)<\varepsilon$ means that for every procedure $\pi_i$ in problem $i$ there is a procedure $\pi_j$ in problem $j$, $\{i,j\}=\{1,2\}$, with risks differing by at most $\varepsilon$, uniformly over all bounded $L$ and $\theta\in\Theta$. In particular, when minimax rates of convergence in a nonparametric estimation problem are obtained in one experiment, the same rates automatically hold in any asymptotically equivalent experiment. There is more: When explicit transformations from one experiment to another are obtained, statistical procedures can be carried over from one experiment to the other one.
There are various techniques to bound the Le Cam distance. We report below only the properties that are useful for our purposes. For the proofs see, e.g., [@lecam; @strasser].
\[ch4delta0\] Let ${\ensuremath {\mathscr{P}}}_j=({\ensuremath {\mathscr{X}}},{\ensuremath {\mathscr{A}}},\{P_{j,\theta}; \theta\in\Theta\})$, $j=1,2$, be two statistical models having the same sample space and define $\Delta_0({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2):=\sup_{\theta\in\Theta}\|P_{1,\theta}-P_{2,\theta}\|_{TV}.$ Then, $\Delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)\leq \Delta_0({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)$.
In particular, Property \[ch4delta0\] allows us to bound the Le Cam distance between statistical models sharing the same sample space by means of classical bounds for the total variation distance. To that aim, we collect below some useful results.
\[ch4h\] Let $P_1$ and $P_2$ be two probability measures on ${\ensuremath {\mathscr{X}}}$, dominated by a common measure $\xi$, with densities $g_{i}=\frac{dP_{i}}{d\xi}$, $i=1,2$. Define $$\begin{aligned}
L_1(P_1,P_2)&=\int_{{\ensuremath {\mathscr{X}}}} |g_{1}(x)-g_{2}(x)|\xi(dx), \\
H(P_1,P_2)&=\bigg(\int_{{\ensuremath {\mathscr{X}}}} \Big(\sqrt{g_{1}(x)}-\sqrt{g_{2}(x)}\Big)^2\xi(dx)\bigg)^{1/2}.
\end{aligned}$$ Then, $$\|P_1-P_2\|_{TV}=\frac{1}{2}L_1(P_1,P_2)\leq H(P_1,P_2).$$
\[ch4hp\] Let $P$ and $Q$ be two product measures defined on the same sample space: $P=\otimes_{i=1}^n P_i$, $Q=\otimes_{i=1}^n Q_i$. Then $$H ^2(P,Q)\leq \sum_{i=1}^nH^2(P_i,Q_i).$$
\[fact:ch4hellingerpoisson\] Let $P_i$, $i=1,2$, be the law of a Poisson random variable with mean $\lambda_i$. Then $$H^2(P_1,P_2)=1-\exp\bigg(-\frac{1}{2}\Big(\sqrt{\lambda_1}-\sqrt{\lambda_2}\Big)^2\bigg).$$
\[fact:ch4gaussiane\] Let $Q_1\sim{\ensuremath {\mathscr{Nn}}}(\mu_1,\sigma_1^2)$ and $Q_2\sim{\ensuremath {\mathscr{Nn}}}(\mu_2,\sigma_2^2)$. Then $$\|Q_1-Q_2\|_{TV}\leq \sqrt{2\bigg(1-\frac{\sigma_1^2}{\sigma_2^2}\bigg)^2+\frac{(\mu_1-\mu_2)^2}{2\sigma_2^2}}.$$
\[fact:ch4processigaussiani\] For $i=1,2$, let $Q_i$, $i=1,2$, be the law on $(C,{\ensuremath {\mathscr{C}}})$ of two Gaussian processes of the form $$X^i_t=\int_{0}^t h_i(s)ds+ \int_0^t \sigma(s)dW_s,\ t\in[0,T]$$ where $h_i\in L_2({\ensuremath {\mathbb{R}}})$ and $\sigma\in{\ensuremath {\mathbb{R}}}_{>0}$. Then: $$L_1\big(Q_1,Q_2\big)\leq \sqrt{\int_{0}^T\frac{\big(h_1(y)-h_2(y)\big)^2}{\sigma^2(s)}ds}.$$
\[ch4fatto3\] Let ${\ensuremath {\mathscr{P}}}_i=({\ensuremath {\mathscr{X}}}_i,{\ensuremath {\mathscr{A}}}_i,\{P_{i,\theta}, \theta\in\Theta\})$, $i=1,2$, be two statistical models. Let $S:{\ensuremath {\mathscr{X}}}_1\to{\ensuremath {\mathscr{X}}}_2$ be a sufficient statistics such that the distribution of $S$ under $P_{1,\theta}$ is equal to $P_{2,\theta}$. Then $\Delta({\ensuremath {\mathscr{P}}}_1,{\ensuremath {\mathscr{P}}}_2)=0$.
\[ch4independentkernels\] Let $P_i$ be a probability measure on $(E_i,\mathcal{E}_i)$ and $K_i$ a Markov kernel on $(G_i,\mathcal G_i)$. One can then define a Markov kernel $K$ on $(\prod_{i=1}^n E_i,\otimes_{i=1}^n \mathcal{G}_i)$ in the following way: $$K(x_1,\dots,x_n; A_1\times\dots\times A_n):=\prod_{i=1}^nK_i(x_i,A_i),\quad \forall x_i\in E_i,\ \forall A_i\in \mathcal{G}_i.$$ Clearly $K\otimes_{i=1}^nP_i=\otimes_{i=1}^nK_iP_i$.
Finally, we recall the following result that allows us to bound the Le Cam distance between Poisson and Gaussian variables.
\[ch4teomisto\](See [@BC04], Theorem 4) Let $\tilde P_{\lambda}$ be the law of a Poisson random variable $\tilde X_{\lambda}$ with mean $\lambda$. Furthermore, let $P_{\lambda}^*$ be the law of a random variable $Z^*_{\lambda}$ with Gaussian distribution ${\ensuremath {\mathscr{Nn}}}(2\sqrt{\lambda},1)$, and let $\tilde U$ be a uniform variable on $\big[-\frac{1}{2},\frac{1}{2}\big)$ independent of $\tilde X_{\lambda}$. Define $$\tilde Z_{\lambda}=2\textnormal{sgn}\big(\tilde X_{\lambda}+\tilde U\big)\sqrt{\big|\tilde X_{\lambda}+\tilde U\big|}.$$ Then, denoting by $P_{\lambda}$ the law of $\tilde Z_{\lambda}$, $$H ^2\big(P_{\lambda}, P_{\lambda}^*\big)=O(\lambda^{-1}).$$
Thanks to Theorem \[ch4teomisto\], denoting by $\Lambda$ a subset of ${\ensuremath {\mathbb{R}}}_{>0}$, by $\tilde {\ensuremath {\mathscr{P}}}$ (resp. ${\ensuremath {\mathscr{P}}}^*$) the statistical model associated with the family of probabilities $\{\tilde P_\lambda: \lambda \in \Lambda\}$ (resp. $\{P_\lambda^* : \lambda \in \Lambda\}$), we have $$\Delta\big(\tilde {\ensuremath {\mathscr{P}}}, {\ensuremath {\mathscr{P}}}^*\big) \leq \sup_{\lambda \in \Lambda} \frac{C}{\lambda},$$ for some constant $C$. Indeed, the correspondence associating $\tilde Z_\lambda$ to $\tilde X_\lambda$ defines a Markov kernel; conversely, associating to $\tilde Z_\lambda$ the closest integer to its square, defines a Markov kernel going in the other direction.
Lévy processes {#sec:ch4levy}
--------------
A stochastic process $\{X_t:t\geq 0\}$ on ${\ensuremath {\mathbb{R}}}$ defined on a probability space $(\Omega,{\ensuremath {\mathscr{A}}},{\ensuremath {\mathbb{P}}})$ is called a *Lévy process* if the following conditions are satisfied.
1. $X_0=0$ ${\ensuremath {\mathbb{P}}}$-a.s.
2. For any choice of $n\geq 1$ and $0\leq t_0<t_1<\ldots<t_n$, random variables $X_{t_0}$, $X_{t_1}-X_{t_0},\dots ,X_{t_n}-X_{t_{n-1}}$are independent.
3. The distribution of $X_{s+t}-X_s$ does not depend on $s$.
4. There is $\Omega_0\in {\ensuremath {\mathscr{A}}}$ with ${\ensuremath {\mathbb{P}}}(\Omega_0)=1$ such that, for every $\omega\in \Omega_0$, $X_t(\omega)$ is right-continuous in $t\geq 0$ and has left limits in $t>0$.
5. It is stochastically continuous.
Thanks to the *Lévy-Khintchine formula*, the characteristic function of any Lévy process $\{X_t\}$ can be expressed, for all $u$ in ${\ensuremath {\mathbb{R}}}$, as: $$\label{caratteristica}
{\ensuremath {\mathbb{E}}}\big[e^{iuX_t}\big]=\exp\bigg(-t\Big(iub-\frac{u^2\sigma^2}{2}-\int_{{\ensuremath {\mathbb{R}}}}(1-e^{iuy}+iuy{\ensuremath {\mathbb{I}}}_{\vert y\vert \leq 1})\nu(dy)\Big)\bigg),$$ where $b,\sigma\in {\ensuremath {\mathbb{R}}}$ and $\nu$ is a measure on ${\ensuremath {\mathbb{R}}}$ satisfying $$\nu(\{0\})=0 \textnormal{ and } \int_{{\ensuremath {\mathbb{R}}}}(|y|^2\wedge 1)\nu(dy)<\infty.$$ In the sequel we shall refer to $(b,\sigma^2,\nu)$ as the characteristic triplet of the process $\{X_t\}$ and $\nu$ will be called the *Lévy measure*. This data characterizes uniquely the law of the process $\{X_t\}$.
Let $D=D([0,\infty),{\ensuremath {\mathbb{R}}})$ be the space of mappings $\omega$ from $[0,\infty)$ into ${\ensuremath {\mathbb{R}}}$ that are right-continuous with left limits. Define the *canonical process* $x:D\to D$ by $$\forall \omega\in D,\quad x_t(\omega)=\omega_t,\;\;\forall t\geq 0.$$
Let ${\ensuremath {\mathscr{D}}}_t$ and ${\ensuremath {\mathscr{D}}}$ be the $\sigma$-algebras generated by $\{x_s:0\leq s\leq t\}$ and $\{x_s:0\leq s<\infty\}$, respectively (here, we use the same notations as in [@sato]).
By the condition (4) above, any Lévy process on ${\ensuremath {\mathbb{R}}}$ induces a probability measure $P$ on $(D,{\ensuremath {\mathscr{D}}})$. Thus $\{X_t\}$ on the probability space $(D,{\ensuremath {\mathscr{D}}},P)$ is identical in law with the original Lévy process. By saying that $(\{x_t\},P)$ is a Lévy process, we mean that $\{x_t:t\geq 0\}$ is a Lévy process under the probability measure $P$ on $(D,{\ensuremath {\mathscr{D}}})$. For all $t>0$ we will denote $P_t$ for the restriction of $P$ to ${\ensuremath {\mathscr{D}}}_t$. In the case where $\int_{|y|\leq 1}|y|\nu(dy)<\infty$, we set $\gamma^{\nu}:=\int_{|y|\leq 1}y\nu(dy)$. Note that, if $\nu$ is a finite Lévy measure, then the process having characteristic triplet $(\gamma^{\nu},0,\nu)$ is a compound Poisson process.
Here and in the sequel we will denote by $\Delta x_r$ the jump of process $\{x_t\}$ at the time $r$: $$\Delta x_r = x_r - \lim_{s \uparrow r} x_s.$$ For the proof of Theorems \[ch4teo1\], \[ch4teo2\] we also need some results on the equivalence of measures for Lévy processes. By the notation $\ll$ we will mean “is absolutely continuous with respect to”.
\[ch4teosato\] Let $P^1$ (resp. $P^2$) be the law induced on $(D,{\ensuremath {\mathscr{D}}})$ by a Lévy process of characteristic triplet $(\eta,0,\nu_1)$ (resp. $(0,0,\nu_2)$), where $$\label{ch4gamma*}
\eta=\int_{\vert y \vert \leq 1}y(\nu_1-\nu_2)(dy)$$ is supposed to be finite. Then $P_t^1\ll P_t^2$ for all $t\geq 0$ if and only if $\nu_1\ll\nu_2$ and the density $\frac{d\nu_1}{d\nu_2}$ satisfies $$\label{ch4Sato}
\int\bigg(\sqrt{\frac{d\nu_1}{d\nu_2}(y)}-1\bigg)^2\nu_2(dy)<\infty.$$ Remark that the finiteness in implies that in . When $P_t^1\ll P_t^2$, the density is $$\frac{dP_t^1}{dP_t^2}(x)=\exp(U_t(x)),$$ with $$\label{ch4U}
U_t(x)=\lim_{\varepsilon\to 0} \bigg(\sum_{r\leq t}\ln \frac{d\nu_1}{d\nu_2}(\Delta x_r){\ensuremath {\mathbb{I}}}_{\vert\Delta x_r\vert>\varepsilon}-
\int_{\vert y\vert > \varepsilon} t\bigg(\frac{d\nu_1}{d\nu_2}(y)-1\bigg)\nu_2(dy)\bigg),\\ P^{(0,0,\nu_2)}\textnormal{-a.s.}$$ The convergence in is uniform in $t$ on any bounded interval, $P^{(0,0,\nu_2)}$-a.s. Besides, $\{U_t(x)\}$ defined by is a Lévy process satisfying ${\ensuremath {\mathbb{E}}}_{P^{(0,0,\nu_2)}}[e^{U_t(x)}]=1$, $\forall t\geq 0$.
Finally, let us consider the following result giving an explicit bound for the $L_1$ and the Hellinger distances between two Lévy processes of characteristic triplets of the form $(b_i,0,\nu_i)$, $i=1,2$ with $b_1-b_2=\int_{\vert y \vert \leq 1}y(\nu_1-\nu_2)(dy)$.
\[teo:ch4bound\] For any $0<T<\infty$, let $P_T^i$ be the probability measure induced on $(D,{\ensuremath {\mathscr{D}}}_T)$ by a Lévy process of characteristic triplet $(b_i,0,\nu_i)$, $i=1,2$ and suppose that $\nu_1\ll\nu_2$.
If $H^2(\nu_1,\nu_2):=\int\big(\sqrt{\frac{d\nu_1}{d\nu_2}(y)}-1\big)^2\nu_2(dy)<\infty,$ then $$H^2(P_T^1,P_T^2)\leq \frac{T}{2}H^2(\nu_1,\nu_2).$$
We conclude the Appendix with a technical statement about the Le Cam distance for finite variation models.
\[ch4LC\] $$\Delta({\ensuremath {\mathscr{P}}}_n^{\nu_0},{\ensuremath {\mathscr{P}}}_{n,FV}^{\nu_0})=0.$$
Consider the Markov kernels $\pi_1$, $\pi_2$ defined as follows $$\pi_1(x,A)={\ensuremath {\mathbb{I}}}_{A}(x^d), \quad
\pi_2(x,A)={\ensuremath {\mathbb{I}}}_{A}(x-\cdot \gamma^{\nu_0}),
\quad \forall x\in D, A \in {\ensuremath {\mathscr{D}}},$$ where we have denoted by $x^d$ the discontinuous part of the trajectory $x$, i.e. $\Delta x_r = x_r - \lim_{s \uparrow r} x_s,\ x_t^d=\sum_{r \leq t}\Delta x_r$ and by $x-\cdot \gamma^{\nu_0}$ the trajectory $x_t-t\gamma{\nu_0}$, $t\in[0,T_n]$. On the one hand we have: $$\begin{aligned}
\pi_1 P^{(\gamma^{\nu-\nu_0},0,\nu)}(A)&=\int_D \pi_1(x,A)P^{(\gamma^{\nu-\nu_0},0,\nu)}(dx)=\int_D {\ensuremath {\mathbb{I}}}_A(x^d)P^{(\gamma^{\nu-\nu_0},0,\nu)}(dx)\\
&=P^{(\gamma^{\nu},0,\nu)}(A),\end{aligned}$$ where in the last equality we have used the fact that, under $P^{(\gamma^{\nu-\nu_0},0,\nu)}$, $\{x_t^d\}$ is a Lévy process with characteristic triplet $(\gamma^{\nu},0,\nu)$ (see [@sato], Theorem 19.3). On the other hand: $$\begin{aligned}
\pi_2 P^{(\gamma^{\nu},0,\nu)}(A)&=\int_D \pi_2(x,A)P^{(\gamma^{\nu_0},0,\nu)}(dx)=\int_D {\ensuremath {\mathbb{I}}}_A(x-\cdot \gamma^{\nu_0})P^{(\gamma^{\nu},0,\nu)}(dx)\\
&=P^{(\gamma^{\nu-\nu_0},0,\nu)}(A),\end{aligned}$$ since, by definition, $\gamma^{\nu}-\gamma^{\nu_0}$ is equal to $\gamma^{\nu-\nu_0}$. The conclusion follows by the definition of the Le Cam distance.
Acknowledgements {#acknowledgements .unnumbered}
----------------
I am very grateful to Markus Reiss for several interesting discussions and many insights; this paper would never have existed in the present form without his advice and encouragement. My deepest thanks go to the anonymous referee, whose insightful comments have greatly improved the exposition of the paper; some gaps in the proofs have been corrected thanks to his/her remarks.
|
|
Moss (Physcomitrella patens) GH3 proteins act in auxin homeostasis.
Auxins are hormones involved in many cellular, physiological and developmental processes in seed plants and in mosses such as Physcomitrella patens. Control of auxin levels is achieved in higher plants via synthesis of auxin conjugates by members of the GH3 family. The role of the two GH3-like proteins from P. patens for growth and auxin homeostasis was therefore analysed. The in vivo-function of the two P. patens GH3 genes was investigated using single and double knockout mutants. The two P. patens GH3 proteins were also heterologously expressed to determine their enzymatic activity. Both P. patens GH3 enzymes accepted the auxin indole acetic acid (IAA) as substrate, but with different preferences for the amino acid to which it is attached. Cytoplasmic localization was shown for PpGH3-1 tagged with green fluorescent protein (GFP). Targeted knock-out of either gene exhibited an increased sensitivity to auxin, resulting in growth inhibition. On plain mineral media mutants had higher levels of free IAA and less conjugated IAA than the wild type, and this effect was enhanced when auxin was supplied. The DeltaPpGH3-1/DeltaPpGH3-2 double knockout had almost no IAA amide conjugates but still synthesized ester conjugates. Taken together, these data suggest a developmentally controlled involvement of P. patens GH3 proteins in auxin homeostasis by conjugating excess of physiologically active free auxin to inactive IAA-amide conjugates.
|
|
Deposits in your Bank of Internet savings account are fully FDIC insured, so your money is absolutely safe when you invest your funds in a Bank of Internet account.
The Bank of Internet online savings account has no maintenance fees, so it’s a great opportunity to earn a high interest rate with a free online bank account.
There are no monthly maintenance fees for this Bank of Internet account, plus there are no minimum balance requirements and no direct deposit requirements to avoid fees or to earn the great interest rate.
There is a $100 minimum opening deposit requirement, but once you open your account, you are not required to maintain a minimum balance thereafter to avoid fees or to earn the high APY.
The Bank of Internet High Yield Savings Account provides free online statements, and an ATM card is also available if needed.
You can also open this online savings account in conjunction with a free High Interest Checking Account from Bank of Internet for easy transfers between Bank of Internet accounts.
Check out our Bank of Internet Review for more details on Bank of Internet online banking services including money market accounts and CDs as well as home equity loans and home mortgage refinancing.
Then compare the Bank of Internet savings account with other High APY Online Bank Rates before opening this fee-free online bank account.
Open a High Yield Savings Account from Bank of Internet today to take advantage of the high interest rate with no fees for online banking.
|
|
Summer Flowers at Danckerts
Summer is now well and truly on its way now as we come upon another Bank Holiday this weekend.
We have some lovely gardens plants and pots at the shop, as well as a new range of "Vivid Arts" garden animals on display, which are a fantastically realistic range of life size animals and birds to enhance the garden...from frogs to foxes, and rabbits to robins, pop in and take a look!
The gardens in Wednesbury are going to be coming alive with plants, animals, and barbies! The summer flower collection is now in full swing, with some delightful bouquets and vases full of Snaps, Sweet Williams, and other summer favourites.
Keep in touch via Facebook, and we'll keep you notified of any Special Offers that are coming up!
We recently had St Georges day, and the St Georges Day March was hugely popular, starting at Stone Cross, just past the Wednesbury/ West Bromwich border, and finishing up at Dartmouth Park in the Sandwell Valley.
|
|
From 1 July 2018, the Tax Office is advising Australians that if they find an error in their tax return or activity statement they will not incur a penalty but will advise of the error and how to get it right next time.
Penalty relief will only apply to eligible taxpayers or entities (i.e., turnover of less than $10 million) every three years.
Eligible individuals will only be given penalty relief on their tax return or activity statement if they make an inadvertent error because they either:
– took a position on income tax that is not reasonably arguable, or
– failed to take reasonable care
The ATO will not provide penalty relief when individuals have (in the past three years):
Received penalty relief
– Avoided tax payment or committed fraud
– Accrued taxation debts with no intention of being able to pay (i.e., phoenix activity)
– Previously penalised for reckless or intentional disregard of the law
– Participated in the management or control of another entity which has evaded tax.
Individuals can not apply for penalty relief. The ATO is reminding individuals that they will provide relief during an audit should it apply.
Penalty relief will not be applied to:
– Wealthy individuals and their businesses
– Associates of wealthy individuals (that may be deemed a small business entity in their own right)
– Public groups, significant global entities and associates
Penalty relief will also not be applied to certain taxes, i.e., fringe benefits tax (FBT) or super guarantee (SG).
|
|
[Central venous dialysis catheter. Silicone rubber dialysis catheter used for permanent vascular access].
51 dual lumen jugularis dialysis catheters (Permcath, Quinton) were placed by surgical technique in 34 patients and by percutaneous technique in eight patients. Mean catheter life-time was 4.1 months. Seven catheters were removed due to complications (infection three catheters, clotting four catheters). Minor flow problems occurred in 8.8% of all procedures. Seven occluded catheters were successfully reopened by use of locally applied streptokinase. A strict aseptic technique is essential to avoid infection. Permcath is an acceptable vascular access device for patients in whom it is impossible to create an arterio-venous fistula.
|
|
Marine Air Control Group 38
Marine Air Control Group 38 (MACG-38) is a United States Marine Corps aviation command and control unit based at Marine Corps Air Station Miramar that is currently composed of five squadrons and one battalion that provide the 3rd Marine Aircraft Wing's tactical headquarters, positive and procedural control to aircraft, and air defense support for the I Marine Expeditionary Force.
Mission
Subordinate units
3rd Low Altitude Air Defense Battalion
Marine Air Control Squadron 1
Marine Air Support Squadron 3
Marine Tactical Air Command Squadron 38
Marine Wing Communications Squadron 38
History
Marine Air Control Group 38 was activated on September 1, 1967 at Marine Corps Air Station El Toro, California. The Group deployed to Saudi Arabia in August 1990 and later supported Operation Desert Storm. Elements of the group have supported Operation Restore Hope, Operation Safe Departure, Operation Southern Watch and Operation Stabilise. The group relocated to MCAS Miramar in October 1998. MACG-38 units began deploying to Kuwait in 2002 and the entire control group would eventually take part in the 2003 invasion of Iraq and continued to deploy today in support of Operation Iraqi Freedom through early 2009. They were headquartered at Al Asad Airbase in the Al Anbar Province from 2004 through the end of their last Iraq deployment in early 2009.
Most recently the Group deployed to Camp Leatherneck, Afghanistan in March 2010. They are responsible for providing aviation command and control for the I Marine Expeditionary Force (I MEF) in support of Operation Enduring Freedom. They returned to The United States in Spring of 2011.
See also
United States Marine Corps Aviation
List of United States Marine Corps aircraft groups
List of United States Marine Corps aircraft squadrons
References
External links
Category:United States Marine Corps air control groups
Category:Military units and formations in California
|
|
The verbals: sports quotes of 1994
There are no small accidents on this circuit. Ayrton Senna, before the San Marino Grand Prix, during which he suffered a fatal crash.
One of my best friends has been killed on the curve where I escaped death. I was lucky; he wasn't. It's like having a cheque book. You start pulling out the pages until one day no pages are left. He was the one driver so perfect nobody thought anything could happen to him. Gerhard Berger, Formula 1 driver, on Ayrton Senna.
It was at the bottom of our hearts to dedicate this victory to our great friend, Ayrton Senna. He was also heading for his fourth title. Claudio Taffarel, Brazil's goalkeeper, following victory in the World Cup final.
There will never be another Senna. The poet of speed is dead. El Diario, Bolivian sports newspaper.
Senna was the greatest driver ever and when someone like him is killed you have to ask yourself what is the point of it all. Nikki Lauda.
When I saw him crash and realised there was no way he was going to be able to continue the race, I cheered with joy. I thought: `He'll be home earlier tonight'. Adrienne Galisteu, Senna's girlfriend.
|
|
**B Grade** CNPS12X Ultimate Performance Triple Fan CPU Cooler
Below is the original description for this product, any reference to warranty is to be ignored. Warranty for this item is 90 days as with all B Grade items.
B Grade items may have been used, have damaged packaging, missing accessories or a combination of these.
Some items may have scuff marks or slight scratches but should otherwise be an operable product.
Renowned for producing some of the world best CPU coolers, Zalman have now released their newest flagship cooler, the CNPS12X. It is the world's first "out of the box" triple fan cooler and is compatible with Intel latest LGA2011 Sandy Bridge E processors.
Worlds first "out of the box" triple fan CPU coolerThere are many CPU coolers available on the market that can accommodate three fans, but to make this happen at least one additional fan needs to be purchased which add to the expense. With the Zalman CNPS12X you get three 120mm blue LED fans built into the cooler so there is no extra costs. Also all three fans run off one fan header, making powering the fans extremely easy.
Six W-DTH composite heatpipes for excellent heat transferFirst seen on the CNPS11X, composite heatpipes help transfer the heat from the CPU up to 50% faster than standard heatpipes. This helps to increase the performance of the cooler even further. The six heatpipes are U-shaped, which effectively double the heat transfer compared to none U-shaped heatpipes.At the base of the cooler (where the heatpipes make contact with the CPU) the heatpipes utilise what Zalman call Whole-Direct Touch Heatpipes (W-DTH). This allows the heatpipes to make direct contact with the CPU, another feature to help increase performance. But not only that, the area of the Direct Touch will cover the whole CPU. Even the new Intel CPUs for LGA2011 will also be covered by W-DTH.
100% nickel plated with blue LED fans for amazing aestheticsMost CPU coolers are hidden inside the computer case where they go about their business unseen. But if you like to show off the internals of the PC you may want a CPU cooler than looks the part, and boy the CNPS12X does look the part!The entire heatsink of CNPS12X is plated with "Black-Pearl" nickel for a long-term corrosion resistance, while the deep "Black-Pearl" tone, along with the high intensity from the blue LED fans helps this cooler stand head and shoulders above the rest.
|
|
Further studies on hepatitis C virus NS5A-SH3 domain interactions: identification of residues critical for binding and implications for viral RNA replication and modulation of cell signalling.
The NS5A protein of hepatitis C virus has been shown to interact with a subset of Src homology 3 (SH3) domain-containing proteins. The molecular mechanisms underlying these observations have not been fully characterized, therefore a previous analysis of NS5A-SH3 domain interactions was extended. By using a semi-quantitative ELISA assay, a hierarchy of binding between various SH3 domains for NS5A was demonstrated. Molecular modelling of a polyproline motif within NS5A (termed PP2.2) bound to the FynSH3 domain predicted that the specificity-determining RT-loop region within the SH3 domain did not interact directly with the PP2.2 motif. However, it was demonstrated that the RT loop did contribute to the specificity of binding, implicating the involvement of other intermolecular contacts between NS5A and SH3 domains. The modelling analysis also predicted a critical role for a conserved arginine located at the C terminus of the PP2.2 motif; this was confirmed experimentally. Finally, it was demonstrated that, in comparison with wild-type replicon cells, inhibition of the transcription factor AP-1, a function previously assigned to NS5A, was not observed in cells harbouring a subgenomic replicon containing a mutation within the PP2.2 motif. However, the ability of the mutated replicon to establish itself within Huh-7 cells was unaffected. The highly conserved nature of the PP2.2 motif within NS5A suggests that functions involving this motif are of importance, but are unlikely to play a role in replication of the viral RNA genome. It is more likely that they play a role in altering the cellular environment to favour viral persistence.
|
|
How do I edit my profile?
You have a profile on this site. It was created for you on registration. Having a profile means other users can recognize you when you leave a reply or like a comment. Please keep it up to date and all the fields filled.
To edit your profile simply click on your name in the top right corner.
Fill in any missing fields and make sure to click ‘Save Changes’ when you are finished.
|
|
You can make an appointment to meet with your Financial Aid counselor using Orange Success through MySlice. Once logged in, select 'Orange SUccess' under 'Advising' in the Student Services panel. Within your Orange SUccess portal, navigate to 'My Success Network,' select your financial aid advisor and schedule an appointment at a day and time convenient for you.
If your counselor is not available at a time that suits your schedule, please call or visit our office to schedule an appointment with the next available counselor.
|
|
Every industry has its own characteristics and requirements. For detailed benefits of our systems related to your industry please make a selection in the left menu. General benefits of using Hitec Power Protection rotary UPS systems are:
Most reliable systemThe simple design has fewer components than for example static UPS systems. This highly improves the reliability (MTBF). Our systems have a lifetime expectancy of more than 25 years.
Most cost and energy efficient systemOperating efficiency of our systems can exceed 97%, because they do not require power conversion in the power path or a conditioned, energy consuming battery room during operation. You also do not need battery replacement every 3 to 5 years, resulting in a lower total cost of ownership (TCO) compared to for example static UPS technologies.
Most environmental friendly systemOur rotary systems have high energy efficiency and do not use batteries. Static UPS systems for example produce a considerable amount of chemical waste during its lifetime, because batteries need to be replaced every 3 to 5 years. Click here to find out more about the environmental benefits of our systems.
Most space efficient systemA static UPS system requires a diesel generator set, power electronics, batteries and numerous auxiliary equipment. Our compact and simple diesel rotary UPS design combines all these components in one, reducing the footprint with 40 up to 60%.
|
|
Soluble di- and aminopeptidases in Escherichia K-12. Dispensible enzymes.
As part of a study of the peptidase content of Escherichia coli K-12, two peptidase-deficient amino acid auxotrophs isolated and characterized by Miller as pepD- (strain CM17) and pepD- pepN- pepA- pepB- pepQ- (strain CM89) were examined for the presence of several peptidases previously obtained from strain K-12 in this laboratory. The soluble fraction of each mutant was found to lack the broad-specificity strain K-12 dipeptidase DP and the strain CM89 fraction also lacked activity characteristic of the strain K-12 aminopeptidases AP, L, and OP; like strain CM17, strain CM89 contained the tripeptide-specific aminopeptidase TP. Strain CM89 (but not CM17) appeared to contain little if any activity attributable to the ribosome-bound aminopeptidase I of strain K-12. Whereas loss of DP, AP, OP, and aminopeptidase I activity may be attributed to the pepD-, pepB-, pepN-, and pepA- mutations, respectively, the reason for the loss of L activity remains uncertain. Grown responses of strain CM89 in liquid media containing di- or tripeptides were in accord with absence of enzymes catalyzing rapid hydrolysis of dipeptides. In synthetic liquid media supplemented with the required amino acids per se or with peptone, cultures of both CM strains grew more slowly than strain K-12 and produced smaller cell-yields than those produced by strain K-12.
|
|
1. Field of the Invention
The present invention relates generally to wireless communication systems, and more particularly, to the reporting of Power Headroom (PH) from a User Equipment (UE) in a wireless communication system that supports carrier aggregation.
2. Description of the Related Art
Mobile communication systems were originally designed to provide users with voice communication services while they are on the move. Current mobile communication systems are capable of supporting both voice communication services and data communication services for mobile users.
Standardization for a next generation of mobile communication technology for the 3rd Generation Partnership Project (3GPP) is being conducted for Long Term Evolution (LTE). LTE is a broadband packet-based communication technology that is expected to provide download speeds that improve upon existing data transmission rates by up to 100 Megabytes/second (Mbps). In attempting to achieve such a high data rate, studies have been conducted that use a minimum number of nodes in connection with a simplified network topology, and that place a radio protocol as close as possible to radio channels.
FIG. 1 is a diagram illustrating an LTE wireless communication system. The LTE wireless communication system includes a plurality of Evolved Node Bs (ENBs) 105, 110, 115 and 120, a Mobility Management Entity (MME) 125, and a Serving Gateway (S-GW) 130. ENBs 105, 110, 115 and 120 are coupled to the S-GW 130, enabling a UE 135 to connect to a core network. The ENBs 105, 110, 115 and 120 correspond to Node Bs of a Universal Mobile Telecommunications System (UMTS) and perform more complex functions than those of a legacy Node B. In the LTE system, all user traffic, including real time services such as Voice over Internet Protocol (VoIP), are provided through a shared channel. Each of the ENBs 105, 110, 115 and 120 manage one or more cells, and are responsible for the collection of status information from UEs and for the scheduling of traffic.
In order to support transmission bandwidths of up to 20 megahertz (MHz), LTE employs Orthogonal Frequency Division Multiplexing (OFDM) as its basic modulation scheme. LTE also uses Adaptive Modulation and Coding (AMC) to improve data throughput. AMC varies downlink modulation and coding schemes based on channel conditions for each UE. The S-GW 130 is responsible for managing data bearers and establishes or releases data bearers under the control of the MME 125. The MME 125 is in communication with the S-GW 130 and is responsible for control plane functions.
FIG. 2 is a diagram illustrating a user plane protocol stack for use in the LTE architecture of FIG. 1. A mobile terminal, or UE, 200 has a protocol stack having a Packet Data Convergence Protocol (PDCP) layer 205, a Radio Link Control (RLC) layer 210, a Media Access Control (MAC) layer 215, and a Physical (PHY) layer 220. A base station, or ENB, 201 has a protocol stack having a PDCP layer 240, an RLC layer 235, a MAC layer 230, and a PHY layer 225. The PDCP layers 205 and 240 are responsible for Internet Protocol (IP) header compression/decompression. The RLC layers 210 and 235 pack the PDCP Packet Data Units (PDUs) into a size appropriate for transmission and perform an Automatic Repeat reQuest (ARQ) function. The MAC layers 215 and 230 serve multiple RLC layer entities. These layers are capable of multiplexing the RLC PDUs into a MAC PDU, and demultiplexing the MAC PDU into the RLC PDUs. The PHY layers 220 and 225 perform encoding and modulation on upper layer data for transmission through a radio channel, and perform demodulation and decoding on the OFDM symbol received through the radio channel for delivery to upper layers. A data unit that is input to a protocol entity is referred to as a Service Data Unit (SDU) and a data unit that is output from the protocol entity is referred to as a Protocol Data Unit.
A voice communication service of a wireless communication system requires a relatively small amount of dedicated bandwidth. However, a data communication service must allocate resources in consideration of a data amount and a channel condition so that transmission throughput may increase. Thus, a mobile communication system is provided with a scheduler that manages resource allocation with respect to available resources, channel conditions, an amount of transmission data, etc. Resource scheduling is also required in LTE, and a scheduler that is incorporated into a base station, or ENB, is used to manage radio transmission resources.
In order to meet International Mobile Telephony (IMT)-Advanced requirements that extend beyond those of IMT-2000, further technological advancements have allowed for the evolution of LTE into LTE-Advanced (LTE-A). LTE-A is provided with technological components, such as carrier aggregation, to fulfill the IMT-Advanced requirements. Carrier aggregation aggregates multiple carriers to form a larger bandwidth, thereby allowing a UE to transmit and receive data at higher data rates.
FIG. 3 is a schematic diagram illustrating an LTE-A wireless communication system supporting carrier aggregation. An ENB 305 operates on two different carriers 310 and 315, having center frequencies of f3 and f1, respectively. A conventional wireless communication system allows a UE 330 to communicate with the ENB 305 using only one of carriers 310 and 315. However, the LTE-A system supporting carrier aggregation enables the UE 330 to use both carriers 310 and 315 in order to increase transmission throughput. The maximum data rate between the ENB 305 and the UE 330 increases in proportion to the number of carriers that are aggregated.
Due to the fact that uplink transmissions cause inter-cell interference, it is preferable for a UE to calculate an uplink transmission power using a predetermined function, and to control uplink transmission based on the calculation. The predetermined function may utilize variables such as an allocated transmission resource amount, a Modulation and Coding Scheme (MCS), and a path loss value in calculating a required uplink transmission power. The uplink transmission power is limited to a UE maximum transmission power. When the required uplink transmission power is greater than the UE maximum transmission power, the UE performs the uplink transmission using the UE maximum transmission power. However, use of the maximum transmission power instead of the required transmission power degrades the uplink transmission quality. Thus, it is preferable for the ENB to perform scheduling for UE transmissions such that a required transmission power for the UE transmission will not exceed the UE maximum transmission power.
Some parameters utilized in scheduling at the ENB, such as channel path loss, are not capable of being measured at the ENB. When required, the UE may transmit a Power Headroom Report (PHR) to the ENB to report UE Power Headroom (PH) with respect to path loss. However, conventional uplink transmission power determination procedures are performed with respect to a single downlink carrier and a single uplink carrier. Thus, the conventional procedures are not applicable to the LTE-A system supporting carrier aggregation.
|
|
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx1024m
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
|
|
Neighbors (novel)
Neighbors is a 1980 novel by American author Thomas Berger. It is a satire of manners and suburbia, and a comment on emotional alienation with echoes of the works of Franz Kafka. Earl Keese’s character and situation begin realistically but become increasingly fantastic. Keese is an Everyman whose life is swiftly turned upside down. As he scrambles to reclaim his sense of normalcy and dignity, he comes to think that everyone, including his family, is against him.
Plot summary
Earl Keese is a middle-aged, middle-class suburbanite with a wife, Enid, and teenage daughter, Elaine. Earl is content with his dull, unexceptional life, but this changes when a younger, less sophisticated couple, Harry and Ramona, move in next door. Harry is physically intimidating and vulgar; Ramona is sexually aggressive, and both impose themselves on the Keese household. Their free-spirited personalities and overbearing and boorish behavior endear them to Enid and Elaine, but Earl fears that he is losing control of his life and his family. Over the course of one night, the antagonism between Earl and his new neighbors escalates into suburban warfare.
Analysis
Berger's off-kilter tone blurs the line between paranoia and reality, defense and offense, action and intention, ally and adversary. Harry and Ramona seem to constantly undergo changes in their respective personalities and Enid and Elaine appear to choose sides against Earl at random, but Berger also implies that it is Earl’s sense of reality that is skewed and deluded.
Earl is frustrated because he can never prove that Harry and Ramona are doing anything wrong on purpose, and the more he attempts to expose them, the more ridiculous he makes himself. Yet Earl comes to realize that Harry and Ramona have served as the crucible of his redemption: being forced out of his comfort zone of complacency and habit has provided him with an excitement he has never known before. As Earl comes to recognize value in his neighbors, he realizes that his wife is a distrustful alcoholic, his daughter is an underachiever and petty thief, and that his new neighbors can provide him with an escape from his existence of insignificance and emotional impotence. From a nightmare comes hope and a strengthened resolve to survive. In his study of Berger, writer Stanley Trachtenberg describes Neighbors as an existentialist parable in which "the loss of coherence between various aspects of self comically fragments the notion of identity and thus fictionalizes the existential concept of authenticity as a shaping condition of it."
In a 1980 newspaper interview, Berger said of Neighbors, "As my 10th novel, begun at the close of my 20th year as a published novelist, it is appropriately a bizarre celebration of whatever gift I have, the strangest of all my narratives . . . the morality of this work, like that of all my other volumes, will be in doubt until the end of the narrative – and perhaps to the end of eternity, now that I think about it."
Characters
Earl Keese
Enid Keese
Elaine Keese
Harry
Ramona
Adaptations
A film version was released in 1981, starring John Belushi and Dan Aykroyd. It was also adapted into a play by Eve Summer, which premiered in Worcester, Massachusetts in 2007.
References
External links
NPR.org | Tom Perrotta Hails Suburban Sendup 'Neighbors'
Category:1980 American novels
Category:American novels adapted into films
Category:American novels adapted into plays
Category:Novels by Thomas Berger (novelist)
|
|
Longitudinal impedance variability in patients with chronically implanted DBS devices.
Deep brain stimulation (DBS) is an effective therapy for advanced movement disorders, but its optimal use is still controversial. One factor that could play a role in the proper delivery of therapeutic stimulation by current DBS devices is the variability of the impedance at the interface between the electrode surface and surrounding tissue. To analyze variability and trends in the impedance of chronically-implanted DBS electrodes in subjects with movement disorders. We reviewed impedance values from medical records of DBS patients at an academic tertiary-care movement disorders center. The standard deviation of data recorded within individual subjects and single contacts were used as measures of longitudinal impedance variability. A generalized linear mixed model (GLMM) determined if a number of effects had significant influences on impedance. We analyzed 2863 impedance measurements from 94 subjects. Median variability, for subjects with follow-up from 6 months to 5 years (n = 77), was 194 Ω for individual subjects and 141 Ω for individual contacts, with a range spanning from 18 to over 600 Ω. The GLMM, incorporating all subjects (n = 94), identified time, electrical activity, implanted target, contact position on the electrode and side of implantation as significant predictors of impedance. Age and disease duration at surgery, gender or ethnicity were not significant predictors. Our analysis suggests that a significant amount of impedance variability can be expected in chronically implanted DBS electrodes and indicates a number of factors with possible predictive value. Further studies are needed to link impedance characteristics to clinical outcomes.
|
|
Music from McLeod's Daughters
McLeod's Daughters have had many different songs for their closing credits which are written by Posie Graeme-Evans & Chris Harriot and performed by singer Rebecca Lavelle who also had a guest role in series 6 as Bindi Martin
Song List
Other
Hey You by Abi Tucker who plays Grace McLeod from 2007 - 2008 and featured the song in Episode 196, My Enemy, My Friend.
List of Released Songs
Rebecca Lavelle
Understand Me
Common Ground
Never Enough
Don't Judge
Love You, Hate You
Heat
Am I Crazy?
We Got It Wrong
The Siren's Song
Hopeless Case
Just A Child
My Heart Is Like A River
Theme Song - Version 1
Hey Girl (You Got A New Life)
Take The Rain Away
The Stranger
Sometimes
Too Young
The First Touch
In His Eyes
By My Side
Did I Tell You?
Don't Give Up
Gentle Gentle (Life of Your Life)
Theme Song - Version 2
You Believed
Had To Happen
It Comes To This
Charlotte's Song
One True Thing
I Wish The Past Was Different
Locked Away Inside My Heart
Our Home, Our Place
Strip Jack Naked
Broken Dreams
This Perfect Day
Trust The Night
The Man I Loved (We Had No Time)
Time Turn Over
Drover's Run (My Heart's Home)
Abi Tucker
Hey You
Speak My Angel
List of Unreleased Songs
Feet on The Ground by Rebecca Lavelle
Room To Move by Rebecca Lavelle
A Matter of Time by Rebecca Lavelle
All I Ever Wanted was Love by Rebecca Lavelle
Alone & Afraid by Rebecca Lavelle
Belonging by Rebecca Lavelle
I Reach Out by Naomi Starr
Life Makes A Fool of Us by Rebecca Lavelle
Love is Endless by Rebecca Lavelle
Something So Strong by Rebecca Lavelle
Sorrow by Rebecca Lavelle
Stay by Rebecca Lavelle
Tears on My Pillow by Rebecca Lavelle & Glenda Linscott
Kate's Lullaby by Michala Banas
Wake Up Gungellan by Doris Younane (Abi Tucker & Gillian Alexy Short Clip)
Truckstop Woman by Doris Younane, Simmone Jade Mackinnon, Luke Jacobz, Gillian Alexy & Chorus
Forever by Doris Youanne, Peter Hardy, Abi Tucker & Matt Passmore
References
External links
McLeod's Daughters Official Website
Dutch McLeod's Daughters Website
Category:McLeod's Daughters
|
|
The long term objective is to characterize key functionalities of the epithelial cells of the larval mosquito gut as these cellular functions influence and regulate the anionic basis of alkalinization of the gut lumen. A detailed understanding of how gut epithelial cells produce the remarkable and biologically unique pH extremes (i.e. > 10.5) that drive the digestive process will provide new avenues for the development of environmentally safe and specific larvacides. Two specific gene families have been targeted as they have central roles in anion production and transport in the gut alkalinization process: carbonic anhydrases and transmembrane anion transporters. This project will produce molecular and physiological characterizations of members of these two gene families. Their distributions in the tissue and specific roles in larval mosquito gut alkalinization will be defined. Specific cellular phenotypes throughout the gut will be defined and the role of each in the alkalinization process assessed. AIM 1 will examine the expression of multiple carbonic anhydrases. AIM 2 will define and characterize members of the anion transporter gene family. AIM 3 will define the cellular distributions of carbonic anhydrases and anion transporters in the gut and as functions of larval development. AIM 4 will produce a global analysis of gene expression in the specific functional domains of the larval mosquito gut identifying key functionalities which define the gut domains. AIM 5 will bring the localization of specific gene products together with physiological measurements of the activity of individual cells to produce a cell-specific and spatial analysis of anion dynamics in the gut epithelium. As mosquitoes are the number one threat to human health world wide and recognized as potential agents for bioterrorism, the development of new strategies for control based on unique aspects of their biology (i.e. gut alkalinization) has important potential. [unreadable] [unreadable] [unreadable]
|
|
Effect of two prophylaxis methods on adherence of Streptococcus mutans to microfilled composite resin and giomer surfaces.
Surface attributes of a restoration play an important role in adherence of plaque bacteria. Prophylaxis methods may be involved in modification of or damaging the restoration surface. The aim of the present study was to evaluate the effect of two prophylaxis methods on adherence of Streptococcus mutans to the surface of two restorative materials. A total of 60 specimens were prepared from each material; a microfilled composite resin (HelioProgress) and a giomer (Beautifil II). For each material, the specimens were randomly divided into three groups (n=20). Group 1: no prophylaxis treatment (control); Group 2: prophylaxis with pumice and rubber cup; Group 3: prophylaxis with air-powder polishing device (APD). The surfaces of selected specimens from each group were evaluated under a scanning electron microscope (SEM), and the surface topography formed by the two prophylaxis methods was determined by atomic force microscopy (AFM). Adherence of Streptococcus mutans to the surface of specimens was determined by the plate counting method following immersion in a bacterial innoculum for 4 hours, rinsing and sonication. Data were analyzed by two-way ANOVA and post hoc Tukey test for multiple comparisons. Statistical significance was set at P<0.05. Bacterial adherence was significantly affected by both factors: restorative material type and prophylaxis method (P<0.0005). Mean bacterial adhesion was significantly higher in composite groups compared to corresponding giomer groups. Within each material, bacterial adherence was significantly lower in the control group compared to prophylaxis groups. Prophylaxis with pumice and rubber cup resulted in a significantly lower bacterial adherence compared to prophylaxis with APD. Based on the results of the present study, giomer specimens demonstrated lower bacterial adherence compared to composite resin specimens. In both materials, the highest bacterial adherence was observed with prophylaxis with APD, pumice and rubber cup and the control group, respectively.
|
|
---
abstract: |
We give a general construction of debiased/locally robust/orthogonal (LR) moment functions for GMM, where the derivative with respect to first step nonparametric estimation is zero and equivalently first step estimation has no effect on the influence function. This construction consists of adding an estimator of the influence function adjustment term for first step nonparametric estimation to identifying or original moment conditions. We also give numerical methods for estimating LR moment functions that do not require an explicit formula for the adjustment term.
LR moment conditions have reduced bias and so are important when the first step is machine learning. We derive LR moment conditions for dynamic discrete choice based on first step machine learning estimators of conditional choice probabilities.
We provide simple and general asymptotic theory for LR estimators based on sample splitting. This theory uses the additive decomposition of LR moment conditions into an identifying condition and a first step influence adjustment. Our conditions require only mean square consistency and a few (generally either one or two) readily interpretable rate conditions.
LR moment functions have the advantage of being less sensitive to first step estimation. Some LR moment functions are also doubly robust meaning they hold if one first step is incorrect. We give novel classes of doubly robust moment functions and characterize double robustness. For doubly robust estimators our asymptotic theory only requires one rate condition.
Keywords: Local robustness, orthogonal moments, double robustness, semiparametric estimation, bias, GMM.
JEL classification:
: C13; C14; C21; D24
author:
- |
Victor Chernozhukov\
*MIT*
- |
Juan Carlos Escanciano\
*Indiana University*
- |
Hidehiko Ichimura\
*University of Tokyo*
- |
Whitney K. Newey\
*MIT*
- |
James M. Robins\
*Harvard University*
date: April 2018
title: Locally Robust Semiparametric Estimation
---
Introduction
============
There are many economic parameters that depend on nonparametric or large dimensional first steps. Examples include dynamic discrete choice, games, average consumer surplus, and treatment effects. This paper shows how to construct moment functions for GMM estimators that are debiased/locally robust/orthogonal (LR), where moment conditions have a zero derivative with respect to the first step. We show that LR moment functions can be constructed by adding the influence function adjustment for first step estimation to the original moment functions. This construction can also be interpreted as a decomposition of LR moment functions into identifying moment functions and a first step influence function term. We use this decomposition to give simple and general conditions for root-n consistency and asymptotic normality, with different properties being assumed for the identifying and influence function terms. The conditions are easily interpretable mean square consistency and second order remainder conditions based on estimated moments that use cross-fitting (sample splitting). We also give numerical estimators of the influence function adjustment.
LR moment functions have several advantages. LR moment conditions bias correct in a way that eliminates the large biases from plugging in first step machine learning estimators found in Belloni, Chernozhukov, and Hansen (2014). LR moment functions can be used to construct debiased/double machine learning (DML) estimators, as in Chernozhukov et al. (2017, 2018).
We illustrate by deriving LR moment functions for dynamic discrete choice estimation based on conditional choice probabilities. We provide a DML estimator for dynamic discrete choice that uses first step machine learning of conditional choice probabilities. We find that it performs well in a Monte Carlo example. Such structural models provide a potentially important application of DML, because of potentially high dimensional state spaces. Adding the first step influence adjustment term provides a general way to construct LR moment conditions for structural models so that machine learning can be used for first step estimation of conditional choice probabilities, state transition distributions, and other unknown functions on which structural estimators depend.
LR moment conditions also have the advantage of being relatively insensitive to small variation away from the first step true function. This robustness property is appealing in many settings where it may be difficult to get the first step completely correct. Many interesting and useful LR moment functions have the additional property that they are doubly robust (DR), meaning moment conditions hold when one first step is not correct. We give novel classes of DR moment conditions, including for average linear functionals of conditional expectations and probability densities. The construction of adding the first step influence function adjustment to an identifying moment function is useful to obtain these moment conditions. We also give necessary and sufficient conditions for a large class of moment functions to be DR. We find DR moments have simpler and more general conditions for asymptotic normality, which helps motivate our consideration of DR moment functions as special cases of LR ones. LR moment conditions also help minimize sensitivity to misspecification as in Bonhomme and Weidner (2018).
LR moment conditions have smaller bias from first step estimation. We show that they have the small bias property of Newey, Hsieh, and Robins (2004), that the bias of the moments is of smaller order than the bias of the first step. This bias reduction leads to substantial improvements in finite sample properties in many cases relative to just using the original moment conditions. For dynamic discrete choice we find large bias reductions, moderate variance increases and even reductions in some cases, and coverage probabilities substantially closer to nominal. For machine learning estimators of the partially linear model, Chernozhukov et al. (2017, 2018) found bias reductions so large that the LR estimator is root-n consistent but the estimator based on the original moment condition is not. Substantial improvements were previously also found for density weighted averages by Newey, Hsieh, and Robins (2004, NHR). The twicing kernel estimators in NHR are numerically equal to LR estimators based on the original (before twicing) kernel, as shown in Newey, Hsieh, Robins (1998), and the twicing kernel estimators were shown to have smaller mean square error in large samples. Also, a Monte Carlo example in NHR finds that the mean square error (MSE) of the LR estimator has a smaller minimum and is flatter as a function of bandwidth than the MSE of Powell, Stock, and Stoker’s (1989) density weighted average derivative estimator. We expect similar finite sample improvements from LR moments in other cases.
LR moment conditions have appeared in earlier work. They are semiparametric versions of Neyman (1959) C-alpha test scores for parametric models. Hasminskii and Ibragimov (1978) suggested LR estimation of functionals of a density and argued for their advantages over plug-in estimators. Pfanzagl and Wefelmeyer (1981) considered using LR moment conditions for improving the asymptotic efficiency of functionals of distribution estimators. Bickel and Ritov (1988) gave a LR estimator of the integrated squared density that attains root-n consistency under minimal conditions. The Robinson (1988) semiparametric regression and Ichimura (1993) index regression estimators are LR. Newey (1990) showed that LR moment conditions can be obtained as residuals from projections on the tangent set in a semiparametric model. Newey (1994a) showed that derivatives of an objective function where the first step has been “concentrated out” are LR, including the efficient score of a semiparametric model. NHR (1998, 2004) gave estimators of averages that are linear in density derivative functionals with remainder rates that are as fast as those in Bickel and Ritov (1988). Doubly robust moment functions have been constructed by Robins, Rotnitzky, and Zhao (1994, 1995), Robins and Rotnitzky (1995), Scharfstein, Rotnitzky, and Robins (1999), Robins, Rotnitzky, and van der Laan (2000), Robins and Rotnitzky (2001), Graham (2011), and Firpo and Rothe (2017). They are widely used for estimating treatment effects, e.g. Bang and Robins (2005). Van der Laan and Rubin (2006) developed targeted maximum likelihood to obtain a LR estimating equation based on the efficient influence function of a semiparametric model. Robins et al. (2008, 2017) showed that efficient influence functions are LR, characterized some doubly robust moment conditions, and developed higher order influence functions that can reduce bias. Belloni, Chernozhukov, and Wei (2013), Belloni, Chernozhukov, and Hansen (2014), Farrell (2015), Kandasamy et al. (2015), Belloni, Chernozhukov, Fernandez-Val, and Hansen (2016), and Athey, Imbens, and Wager (2017) gave LR estimators with machine learning first steps in several specific contexts.
A main contribution of this paper is the construction of LR moment conditions from any moment condition and first step estimator that can result in a root-n consistent estimator of the parameter of interest. This construction is based on the limit of the first step when a data observation has a general distribution that allows for misspecification, similarly to Newey (1994). LR moment functions are constructed by adding to identifying moment functions the influence function of the true expectation of the identifying moment functions evaluated at the first step limit, i.e. by adding the influence function term that accounts for first step estimation. The addition of the influence adjustment “partials out” the first order effect of the first step on the moments. This construction of LR moments extends those cited above for first step density and distribution estimators to *any first step,* including instrumental variable estimators. Also, this construction is *estimator based* rather than model based as in van der Laan and Rubin (2006) and Robins et al. (2008, 2017). The construction depends only on the moment functions and the first step rather than on a semiparametric model. Also, we use the fundamental Gateaux derivative definition of the influence function to show LR rather than an embedding in a regular semiparametric model.
The focus on the functional that is the true expected moments evaluated at the first step limit is the key to this construction. This focus should prove useful for constructing LR moments in many setting, including those where it has already been used to find the asymptotic variance of semiparametric estimators, such as Newey (1994a), Pakes and Olley (1995), Hahn (1998), Ai and Chen (2003), Hirano, Imbens, and Ridder (2003), Bajari, Hong, Krainer, and Nekipelov (2010), Bajari, Chernozhukov, Hong, and Nekipelov (2009), Hahn and Ridder (2013, 2016), and Ackerberg, Chen, Hahn, and Liao (2014), Hahn, Liao, and Ridder (2016). One can construct LR moment functions in each of these settings by adding the first step influence function derived for each case as an adjustment to the original, identifying moment functions.
Another contribution is the development of LR moment conditions for dynamic discrete choice. We derive the influence adjustment for first step estimation of conditional choice probabilities as in Hotz and Miller (1993). We find encouraging Monte Carlo results when various machine learning methods are used to construct the first step. We also give LR moment functions for conditional moment restrictions based on orthogonal instruments.
An additional contribution is to provide general estimators of the influence adjustment term that can be used to construct LR moments without knowing their form. These methods estimate the adjustment term numerically, thus avoiding the need to know its form. It is beyond the scope of this paper to develop machine learning versions of these numerical estimators. Such estimators are developed by Chernozhukov, Newey, and Robins (2018) for average linear functionals of conditional expectations.
Further contributions include novel classes of DR estimators, including linear functionals of nonparametric instrumental variables and density estimators, and a characterization of (necessary and sufficient conditions for) double robustness. We also give related, novel partial robustness results where original moment conditions are satisfied even when the first step is not equal to the truth.
A main contribution is simple and general asymptotic theory for LR estimators that use cross-fitting in the construction of the average moments. This theory is based on the structure of LR moment conditions as an identifying moment condition depending on one first step plus an influence adjustment that can depend on an additional first step. We give a remainder decomposition that leads to mean square consistency conditions for first steps plus a few readily interpretable rate conditions. For DR estimators there is only one rate condition, on a product of sample remainders from two first step estimators, leading to particularly simple conditions. This simplicity motivates our inclusion of results for DR estimators. This asymptotic theory is also useful for existing moment conditions that are already known to be LR. Whenever the moment condition can be decomposed into an identifying moment condition depending on one first step and an influence function term that may depend on two first steps the simple and general regularity conditions developed here will apply.
LR moments reduce that smoothing bias that results from first step nonparametric estimation relative to original moment conditions. There are other sources of bias arising from nonlinearity of moment conditions in the first step and the empirical distribution. Cattaneo and Jansson (2017) and Cattaneo, Jansson, and Ma (2017) give useful bootstrap and jackknife methods that reduce nonlinearity bias. Newey and Robins (2017) show that one can also remove this bias by cross fitting in some settings. We allow for cross-fitting in this paper.
Section 2 describes the general construction of LR moment functions for semiparametric GMM. Section 3 gives LR moment conditions for dynamic discrete choice. Section 4 shows how to estimate the first step influence adjustment. Section 5 gives novel classes of DR moment functions and characterizes double robustness. Section 6 gives an orthogonal instrument construction of LR moments based on conditional moment restrictions. Section 7 provides simple and general asymptotic theory for LR estimators.
Locally Robust Moment Functions
===============================
The subject of this paper is GMM estimators of parameters where the sample moment functions depend on a first step nonparametric or large dimensional estimator. We refer to these estimators as semiparametric. We could also refer to them as GMM where first step estimators are plugged in the moments. This terminology seems awkward though, so we simply refer to them as semiparametric GMM estimators. We denote such an estimator by $\hat{\beta}$, which is a function of the data $z_{1},...,z_{n}$ where $n$ is the number of observations. Throughout the paper we will assume that the data observations $z_{i}$ are i.i.d. We denote the object that $\hat{\beta}$ estimates as $\beta_{0}$, the subscript referring to the parameter value under the distribution $F_{0}$ of $z_{i}$.
To describe semiparametric GMM let $m(z,\beta,\gamma)$ denote an $r\times1$ vector of functions of the data observation $z,$ parameters of interest $\beta$, and a function $\gamma$ that may be vector valued. The function $\gamma$ can depend on $\beta$ and $z$ through those arguments of $m.$ Here the function $\gamma$ represents some possible first step, such as an estimator, its limit, or a true function. A GMM estimator can be based on a moment condition where $\beta_{0}$ is the unique parameter vector satisfying$$E[m(z_{i},\beta_{0},\gamma_{0})]=0, \label{moments}$$ and $\gamma_{0}$ is the true $\gamma$. We assume that this moment condition identifies $\beta.$ Let $\hat{\gamma}$ denote some first step estimator of $\gamma_{0}$. Plugging in $\hat{\gamma}$ to obtain $m(z_{i},\beta,\hat{\gamma
})$ and averaging over $z_{i}$ results in the estimated sample moments $\hat{m}(\beta)=\sum_{i=1}^{n}m(z_{i},\beta,\hat{\gamma})/n.$ For $\hat{W}$ a positive semi-definite weighting matrix a semiparametric GMM estimator is$$\tilde{\beta}=\arg\min_{\beta\in B}\hat{m}(\beta)^{T}\hat{W}\hat{m}(\beta),$$ where $A^{T}$ denotes the transpose of a matrix $A$ and $B$ is the parameter space for $\beta$. Such estimators have been considered by, e.g. Andrews (1994), Newey (1994a), Newey and McFadden (1994), Pakes and Olley (1995), Chen and Liao (2015), and others.
Locally robust (LR) moment functions can be constructed by adding the influence function adjustment for the first step estimator $\hat{\gamma}$ to the identifying or original moment functions $m(z,\beta,\gamma).$ To describe this influence adjustment let $\gamma(F)$ denote the limit of $\hat{\gamma}$ when $z_{i}$ has distribution $F,$ where we restrict $F$ only in that $\gamma(F)$ exists and possibly other regularity conditions are satisfied. That is, $\gamma(F)$ is the limit of $\hat{\gamma}$ under possible misspecification, similar to Newey (1994). Let $G$ be some other distribution and $F_{\tau}=(1-\tau)F_{0}+\tau G$ for $0\leq\tau\leq1,$ where $F_{0}$ denotes the true distribution of $z_{i}.$ We assume that $G$ is chosen so that $\gamma(F_{\tau})$ is well defined for $\tau>0$ small enough and possibly other regularity conditions are satisfied, similarly to Ichimura and Newey (2017). The influence function adjustment will be the function $\phi
(z,\beta,\gamma,\lambda)$ such that for all such $G,$$$\frac{d}{d\tau}E[m(z_{i},\beta,\gamma(F_{\tau}))]=\int\phi(z,\beta,\gamma
_{0},\lambda_{0})G(dz),E[\phi(z_{i},\beta,\gamma_{0},\lambda_{0})]=0,
\label{infdef}$$ where $\lambda$ is an additional nonparametric or large dimensional unknown object on which $\phi(z,\beta,\gamma,\lambda)$ depends and the derivative is from the right (i.e. for positive values of $\tau$) and at $\tau=0.$ This equation is the well known definition of the influence function $\phi
(z,\beta,\gamma_{0},\lambda_{0})$ of $\mu(F)=E[m(z_{i},\beta,\gamma(F))]$ as the Gateaux derivative of $\mu(F),$ e.g. Huber (1981). The restriction of $G$ so that $\gamma(F_{\tau})$ exists allows $\phi(z,\beta,\gamma_{0},\lambda
_{0})$ to be the influence function when $\gamma(F)$ is only well defined for certain types of distributions, such as when $\gamma(F)$ is a conditional expectation or density. The function $\phi(z,\beta,\gamma,\lambda)$ will generally exist when $E[m(z_{i},\beta,\gamma(F))]$ has a finite semiparametric variance bound. Also $\phi(z,\beta,\gamma,\lambda)$ will generally be unique because we are not restricting $G$ very much. Also, note that $\phi
(z,\beta,\gamma,\lambda)$ will be the influence adjustment term from Newey (1994a), as discussed in Ichimura and Newey (2017).
LR moment functions can be constructed by adding $\phi(z,\beta,\gamma
,\lambda)$ to $m(z,\beta,\gamma)$ to obtain new moment functions$$\psi(z,\beta,\gamma,\lambda)=m(z,\beta,\gamma)+\phi(z,\beta,\gamma,\lambda).
\label{momadj}$$ Let $\hat{\lambda}$ be a nonparametric or large dimensional estimator having limit $\lambda(F)$ when $z_{i}$ has distribution $F,$ with $\lambda
(F_{0})=\lambda_{0}.$ Also let $\hat{\psi}(\beta)=\sum_{i=1}^{n}\psi
(z_{i},\beta,\hat{\gamma},\hat{\lambda})/n.$ A LR GMM estimator can be obtained as$$\hat{\beta}=\arg\min_{\beta\in B}\hat{\psi}(\beta)^{T}\hat{W}\hat{\psi}(\beta). \label{lrgmm}$$ As usual a choice of $\hat{W}$ that minimizes the asymptotic variance of $\sqrt{n}(\hat{\beta}-\beta_{0})$ will be a consistent estimator of the inverse of the asymptotic variance $\Omega$ of $\sqrt{n}\hat{\psi}(\beta
_{0}).$ As we will further discuss, $\psi(z,\beta,\gamma,\lambda)$ being LR will mean that the estimation of $\gamma$ and $\lambda$ does not affect $\Omega$, so that $\Omega=E[\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})^{T}].$ An optimal $\hat{W}$ also gives an efficient estimator in the wider sense shown in Ackerberg, Chen, Hahn, and Liao (2014), making $\hat{\beta}$ efficient in a semiparametric model where the only restrictions imposed are equation (\[moments\]).
The LR property we consider is that the derivative of the true expectation of the moment function with respect to the first step is zero, for a Gateaux derivative like that for the influence function in equation (\[infdef\]). Define $F_{\tau}=(1-\tau)F_{0}+\tau G$ as before where $G$ is such that both $\gamma(F_{\tau})$ and $\lambda(F_{\tau})$ are well defined. The LR property is that for all $G$ as specified,$$\frac{d}{d\tau}E[\psi(z_{i},\beta,\gamma(F_{\tau}),\lambda(F_{\tau}))]=0.
\label{lrdef}$$ Note that this condition is the same as that of Newey (1994a) for the presence of $\hat{\gamma}$ an $\hat{\lambda}$ to have no effect on the asymptotic distribution, when each $F_{\tau}$ is a regular parametric submodel. Consequently, the asymptotic variance of $\sqrt{n}\hat{\psi}(\beta_{0})$ will be $\Omega$ as in the last paragraph.
To show LR of the moment functions $\psi(z,\beta,\gamma,\lambda)=m(z,\beta
,\gamma)+\phi(z,\beta,\gamma,\lambda)$ from equation (\[momadj\]) we use the fact that the second, zero expectation condition in equation (\[infdef\]) must hold for all possible true distributions. For any given $\beta$ define $\mu(F)=E[m(z_{i},\beta,\gamma(F))]$ and $\phi(z,F)=\phi(z,\beta
,\gamma(F),\lambda(F)).$
<span style="font-variant:small-caps;">Theorem 1:</span> *If i)* $d\mu(F_{\tau})/d\tau=\int\phi
(z,F_{0})G(dz)$*, ii)* $\int\phi(z,F_{\tau})F_{\tau}(dz)=0$ *for all* $\tau\in\lbrack0,\bar{\tau}),$ *and iii)* $\int\phi(z,F_{\tau
})F_{0}(dz)$ *and* $\int\phi(z,F_{\tau})G(dz)$ *are continuous at* $\tau=0$ *then*$$\frac{d}{d\tau}E[\phi(z_{i},F_{\tau})]=-\frac{d\mu(F_{\tau})}{d\tau}.
\label{thm1con}$$
The proofs of this result and others are given in Appendix B. Assumptions i) and ii) of Theorem 1 require that both parts of equation (\[infdef\]) hold with the second, zero mean condition being satisfied when $F_{\tau}$ is the true distribution. Assumption iii) is a regularity condition. The LR property follows from Theorem 1 by adding $d\mu(F_{\tau})/d\tau$ to both sides of equation (\[thm1con\]) and noting that the sum of derivatives is the derivative of the sum. Equation (\[thm1con\]) shows that the addition of $\phi(z,\beta,\gamma,\lambda)$ “partials out” the effect of the first step $\gamma$ on the moment by “cancelling” the derivative of the identifying moment $E[m(z_{i},\beta,\gamma(F_{\tau}))]$ with respect to $\tau$. This LR result for $\psi(z,\beta,\gamma,\lambda)$ differs from the literature in its Gateaux derivative formulation and in the fact that it is not a semiparametric influence function but is the hybrid sum of an identifying moment function $m(z,\beta,\gamma)$ and an influence function adjustment $\phi(z,\beta
,\gamma,\lambda).$
Another zero derivative property of LR moment functions is useful. If the sets $\Gamma$ and $\Lambda$ of possible limits $\gamma(F)$ and $\lambda(F)$, respectively, are linear, $\gamma(F)$ and $\lambda(F)$ can vary separately from one another, and certain functional differentiability conditions hold then LR moment functions will have the property that for any $\gamma\in\Gamma
$, $\lambda\in\Lambda$, and $\bar{\psi}(\gamma,\lambda)=E[\psi(z_{i},\beta
_{0},\gamma,\lambda)]$, $$\frac{\partial}{\partial\tau}\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma
,\lambda_{0})=0,\frac{\partial}{\partial\tau}\bar{\psi}(\gamma_{0},(1-\tau)\lambda_{0}+\tau\lambda)=0. \label{lrdef2}$$ That is, the expected value of the LR moment function will have a zero Gateaux derivative with respect to each of the first steps $\gamma$ and $\lambda.$ This property will be useful for several results to follow. Under still stronger smoothness conditions this zero derivative condition will result in the existence of a constant $C$ such that for a function norm $\left\Vert
\cdot\right\Vert $,$$\left\vert \bar{\psi}(\gamma,\lambda_{0})\right\vert \leq C\left\Vert
\gamma-\gamma_{0}\right\Vert ^{2},\text{ }\left\vert \bar{\psi}(\gamma
_{0},\lambda)\right\vert \leq C\left\Vert \lambda-\lambda_{0}\right\Vert ^{2},
\label{nlremainder}$$ when $\left\Vert \gamma-\gamma_{0}\right\Vert $ and $\left\Vert \lambda
-\lambda_{0}\right\Vert $ are small enough. In Appendix B we give smoothness conditions that are sufficient for LR to imply equations (\[lrdef2\]) and (\[nlremainder\]). When formulating regularity conditions for particular moment functions and first step estimators it may be more convenient to work directly with equation (\[lrdef2\]) and/or (\[nlremainder\]).
The approach of constructing LR moment functions by adding the influence adjustment differs from the model based approach of using an efficient influence function or score for a semiparametric model as moment functions . The approach here is *estimator based* rather than model based. The influence adjustment $\phi(z,\beta,\gamma,\lambda)$ is determined by the limit $\gamma(F)$ of the first step estimator $\hat{\gamma}$ and the moment functions $m(z,\beta,\gamma)$ rather than by some underlying semiparametric model. This estimator based approach has proven useful for deriving the influence function of a wide variety of semiparametric estimators, as mentioned in the Introduction. Here this estimator based approach provides a general way to construct LR moment functions. For any moment function $m(z,\beta,\gamma)$ and first step estimator $\hat{\gamma}$ a corresponding LR estimator can be constructed as in equations (\[momadj\]) and (\[lrgmm\]).
The addition of $\phi(z,\beta,\gamma,\lambda)$ does not affect identification of $\beta$ because $\phi(z,\beta,\gamma_{0},\lambda_{0})$ has expectation zero for any $\beta$ and true $F_{0}.$ Consequently, the LR GMM estimator will have the same asymptotic variance as the original GMM estimator $\tilde{\beta}$ when $\sqrt{n}(\tilde{\beta}-\beta_{0})$ is asymptotically normal, under appropriate regularity conditions. The addition of $\phi(z,\beta
,\gamma,\lambda)$ will change other properties of the estimator. As discussed in Chernozhukov et al. (2017, 2018), it can even remove enough bias so that the LR estimator is root-n consistent and the original estimator is not.
If $F_{\tau}$ was modified so that $\tau$ is a function of a smoothing parameter, e.g. a bandwidth, and $\tau$ gives the magnitude of the smoothing bias of $\gamma(F_{\tau}),$ then equation (\[lrdef\]) is a small bias condition, equivalent to$$E[\psi(z_{i},\beta_{0},\gamma(F_{\tau}),\lambda(F_{\tau}))]=o(\tau).$$ Here $E[\psi(z_{i},\beta_{0},\gamma(F_{\tau}),\lambda(F_{\tau}))]$ is a bias in the moment condition resulting from smoothing that shrinks faster than $\tau.$ In this sense LR GMM estimators have the small bias property considered in NHR. This interpretation is also one sense in which LR GMM is “debiased.”
In some cases the original moment functions $m(z,\beta,\gamma)$ are already LR and the influence adjustment will be zero. An important class of moment functions that are LR are those where $m(z,\beta,\gamma)$ is the derivative with respect to $\beta$ of an objective function where nonparametric parts have been concentrated out. That is, suppose that there is a function $q(z,\beta,\zeta)$ such that $m(z,\beta,\gamma)=\partial q(z,\beta,\zeta
(\beta))/\partial\beta$ where $\zeta(\beta)=\arg\max_{\zeta}E[q(z_{i},\beta,\zeta)]$, where $\gamma$ includes $\zeta(\beta)$ and possibly additional functions. Proposition 2 of Newey (1994a) and Lemma 2.5 of Chernozhukov et al. (2018) then imply that $m(z,\beta,\gamma)$ will be LR. This class of moment functions includes various partially linear regression models where $\zeta$ represents a conditional expectation. It also includes the efficient score for a semiparametric model, Newey (1994a, pp. 1358-1359).
Cross fitting, also known as sample splitting, has often been used to improve the properties of semiparametric and machine learning estimators; e.g. see Bickel (1982), Schick (1986), and Powell, Stock, and Stoker (1989). Cross fitting removes a source of bias and can be used to construct estimators with remainder terms that converge to zero as fast as is known to be possible, as in NHR and Newey and Robins (2017). Cross fitting is also useful for double machine learning estimators, as outlined in Chernozhukov et al. (2017, 2018). For these reasons we allow for cross-fitting, where sample moments have the form$$\hat{\psi}(\beta)=\frac{1}{n}\sum_{i=1}^{n}\psi(z_{i},\beta,\hat{\gamma}_{i},\hat{\lambda}_{i}),$$ with $\hat{\gamma}_{i}$ and $\hat{\lambda}_{i}$ being formed from observations other than the $i^{th}.$ This kind of cross fitting removes an “own observation” bias term and is useful for showing root-n consistency when $\hat{\gamma}_{i}$ and $\hat{\lambda}_{i}$ are machine learning estimators.
One version of cross-fitting with good properties in examples in Chernozhukov et al. (2018) can be obtained by partitioning the observation indices into $L$ groups $I_{\ell},(\ell=1,...,L),$ forming $\hat{\gamma}_{\ell}$ and $\hat{\lambda}_{\ell}$ from observations not in $I_{\ell}$, and constructing$$\hat{\psi}(\beta)=\frac{1}{n}\sum_{\ell=1}^{L}\sum_{i\in I_{\ell}}\psi
(z_{i},\beta,\hat{\gamma}_{\ell},\hat{\lambda}_{\ell}). \label{cfit}$$ Further bias reductions may be obtained in some cases by using different sets of observations for computing $\hat{\gamma}_{\ell}$ and $\hat{\lambda}_{\ell
},$ leading to remainders that converge to zero as rapidly as known possible in interesting cases; see Newey and Robins (2017). The asymptotic theory of Section 7 focuses on this kind of cross fitting.
As an example we consider a bound on average equivalent variation. Let $\gamma_{0}(x)$ denote the conditional expectation of quantity $q$ conditional on $x=(p^{T},y)$ where $p=(p_{1},p_{2}^{T})^{T}$ is a vector of prices and $y$ is income$.$ The object of interest is a bound on average equivalent variation for a price change from $\bar{p}_{1}$ to $\check{p}_{1}$ given by$$\beta_{0}=E[\int\ell(p_{1},y_{i})\gamma_{0}(p_{1},p_{2i},y_{i})dp_{1}],\ell(p_{1},y)=w(y)1(\bar{p}_{1}\leq p_{1}\leq\check{p}_{1})\exp
\{-B(p_{1}-\bar{p}_{1})\}],$$ where $w(y)$ is a function of income and $B$ a constant. It follows by Hausman and Newey (2016) that if $B$ is a lower (upper) bound on the income effect for all individuals then $\beta_{0}$ is an upper (lower) bound on the equivalent variation for a price change from $\bar{p}_{1}$ to $\check{p}_{1},$ averaged over heterogeneity, other prices $p_{2i},$ and income $y_{i}$. The function $w(y)$ allows for averages over income in specific ranges, as in Hausman and Newey (2017).
A moment function that could be used to estimate $\beta_{0}$ is$$m(z,\beta,\gamma)=\int\ell(p_{1},y)\gamma(p_{1},p_{2},y)dp_{1}-\beta.$$ Note that $$E[m(z_{i},\beta_{0},\gamma)]+\beta_{0}=E[\int\ell(p_{1},y_{i})\gamma
(p_{1},p_{2i},y_{i})dp_{1}]=E[\lambda_{0}(x_{i})\gamma(x_{i})],\lambda
_{0}(x)=\frac{\ell(p_{1},y)}{f_{0}(p_{1}|p_{2},y)},$$ where $f_{0}(p_{1}|p_{2},y)$ is the conditional pdf of $p_{1i}$ given $p_{2i}$ and $y_{i}$. Then by Proposition 4 of Newey (1994) the influence function adjustment for any nonparametric estimator $\hat{\gamma}(x)$ of $E[q_{i}|x_{i}=x]$ is$$\phi(z,\beta,\gamma,\lambda)=\lambda(x)[q-\gamma(x)].$$ Here $\lambda_{0}(x)$ is an example of an additional unknown function that is included in $\phi(z,\beta,\gamma,\lambda)$ but not in the original moment functions $m(z,\beta,\gamma)$. Let $\hat{\gamma}_{i}(x)$ be an estimator of $E[q_{i}|x_{i}=x]$ that can depend on $i$ and $\hat{\lambda}_{i}(x)$ be an estimator of $\lambda_{0}(x)$, such as $\hat{f}_{i}(p_{1}|p_{2},y)^{-1}\ell(p_{1},y)$ for an estimator $\hat{f}_{i}(p_{1}|p_{2},y).$ The LR estimator obtained by solving $\hat{\psi}(\beta)=0$ for $m(z,\beta,\gamma)$ and $\phi(z,\beta,\gamma,\lambda)$ as above is$$\hat{\beta}=\frac{1}{n}\sum_{i=1}^{n}\left\{ \int\ell(p_{1},y_{i})\hat
{\gamma}_{i}(p_{1},p_{2i},y_{i})dp_{1}+\hat{\lambda}_{i}(x_{i})[q_{i}-\hat{\gamma}_{i}(x_{i})]\right\} . \label{exlr}$$
Machine Learning for Dynamic Discrete Choice
============================================
A challenging problem when estimating dynamic structural models is the dimensionality of state spaces. Machine learning addresses this problem via model selection to estimate high dimensional choice probabilities. These choice probabilities estimators can then be used in conditional choice probability (CCP) estimators of structural parameters, following Hotz and Miller (1993). In order for CCP estimators based on machine learning to be root-n consistent they must be based on orthogonal (i.e. LR) moment conditions, see Chernozhukov et al. (2017, 2018). Adding the adjustment term provides the way to construct LR moment conditions from known moment conditions for CCP estimators. In this Section we do so for the Rust’s (1987) model of dynamic discrete choice.
We consider an agent choosing among $J$ discrete alternatives by maximizing the expected present discounted value of utility. We assume that the per-period utility function for an agent making choice $j$ in period $t$ is given by$$U_{jt}=u_{j}(x_{t},\beta_{0})+\epsilon_{jt},(j=1,...,J;t=1,2,...).$$ The vector $x_{t}$ is the observed state variables of the problem (*e.g.* work experience, number of children, wealth) and the vector $\beta$ is unknown parameters. The disturbances $\epsilon_{t}=\{\epsilon
_{1t},...,\epsilon_{Jt}\}$ are not observed by the econometrician. As in much of the literature we assume that $\epsilon_{t}$ is i.i.d. over time with known CDF that has support $R^{J},$ is independent of $x_{t},$ and $x_{t}$ is first-order Markov.
To describe the agent’s choice probabilities let $\delta$ denote a time discount parameter, $\bar{v}(x)$ the expected value function, $y_{jt}\in\{0,1\}$ the indicator that choice $j$ is made and $\bar{v}_{j}(x_{t})=u_{j}(x_{t},\beta_{0})+\delta E[\bar{v}(x_{t+1})|x_{t},j]$ the expected value function conditional on choice $j.$ As in Rust (1987), we assume that in each period the agent makes the choice $j$ that maximizes the expected present discounted value of utility $\bar{v}_{j}(x_{t})+\epsilon
_{jt}.$ The probability of choosing $j$ in period $t$ is then$$P_{j}(\bar{v}_{t})=\Pr(\bar{v}_{j}(x_{t})+\epsilon_{jt}\geq\bar{v}_{k}(x_{t})+\epsilon_{kt};k=1,...,J),\bar{v}_{t}=(\bar{v}_{1}(x_{t}),...,\bar
{v}_{J}(x_{t}))^{\prime}. \label{choice prob}$$
These choice probabilities have a useful relationship to the structural parameters $\beta$ when there is a renewal choice, where the conditional distribution of $x_{t+1}$ given the renewal choice and $x_{t}$ does not depend on $x_{t}.$ Without loss of generality suppose that the renewal choice is $j=1.$ Let $\tilde{v}_{jt}$ denote $\tilde{v}_{j}(x_{t})=\bar{v}_{j}(x_{t})-\bar{v}_{1}(x_{t}),$ so that $\tilde{v}_{1t}\equiv0$. As usual, subtracting $\bar{v}_{1t}$ from each $\bar{v}_{jt}$ in $P_{j}(\bar{v}_{t})$ does not change the choice probabilities, so that they depend only on $\tilde{v}_{t}=(\tilde{v}_{2t},...,\tilde{v}_{Jt}).$
The renewal nature of $j=1$ leads to a specific formula for $\tilde{v}_{jt}$ in terms of the per period utilities $u_{jt}=u_{j}(x_{t},\beta_{0})$ and the choice probabilities $P_{t}=P(\tilde{v}_{t})=(P_{1}(\bar{v}_{t}),...P_{J}(\bar{v}_{t}))^{\prime}.$ As in Hotz and Miller (1993), there is a function $\mathcal{P}^{-1}(P)$ such that $\tilde{v}_{t}=\mathcal{P}^{-1}(P_{t}).$ Let $H(P)$ denote the function such that $$H(P_{t})=E[\max_{1\leq j\leq J}\{\mathcal{P}^{-1}(P_{t})_{j}+\epsilon
_{jt}\}|x_{t}]=E[\max_{1\leq j\leq J}\{\tilde{v}_{jt}+\epsilon_{jt}\}|x_{t}].$$ For example, for multinomial logit $H(P_{t})=.5772-\ln(P_{1t}).$ Note that by $j=1$ being a renewal we have $E[\bar{v}_{t+1}|x_{t},1]=C$ for a constant $C$, so that$$\bar{v}(x_{t})=\bar{v}_{1t}+H(P_{t})=u_{1t}+\delta C+H(P_{t}).$$ It then follows that$$\bar{v}_{jt}=u_{jt}+\delta E[\bar{v}(x_{t+1})|x_{t},j]=u_{jt}+\delta
E[u_{1,t+1}+H(P_{t+1})|x_{t},j]+\delta^{2}C,(j=1,...,J).$$ Subtracting then gives$$\tilde{v}_{jt}=u_{jt}-u_{1t}+\delta\{E[u_{1,t+1}+H(P_{t+1})|x_{t},j]-E[u_{1,t+1}+H(P_{t+1})|1]\}. \label{value}$$ This expression for the choice specific value function $\tilde{v}_{jt}$ depends only on $u_{j}(x_{t},\beta),$ $H(P_{t+1})$, and conditional expectations given the state and choice, and so can be used to form semiparametric moment functions.
To describe those moment functions let $\gamma_{1}(x)$ denote the vector of possible values of the choice probabilities $E[y_{t}|x_{t}=x],$ where $y_{t}=(y_{1t},...,y_{Jt})^{\prime}.$ Also let $\gamma_{j}(x_{t},\beta
,\gamma_{1}),(j=2,...,J)$ denote a possible $E[u_{1}(x_{t+1},\beta
)+H(\gamma_{1}(x_{t+1}))|x_{t},j]$ as a function of $\beta$, $x_{t}$ and $\gamma_{1},$ and $\gamma_{J+1}(\beta,\gamma_{1})$ a possible value of $E[u_{1}(x_{t},\beta)+H(\gamma_{1}(x_{t+1}))|1].$ Then a possible value of $\tilde{v}_{jt}$ is given by $$\tilde{v}_{j}(x_{t},\beta,\gamma)=u_{j}(x_{t},\beta)-u_{1}(x_{t},\beta
)+\delta\lbrack\gamma_{j}(x_{t},\beta,\gamma_{1})-\gamma_{J+1}(\beta
,\gamma_{1})],(j=2,...,J).$$ These value function differences are semiparametric, depending on the function $\gamma_{1}$ of choice probabilities and the conditional expectations $\gamma_{j}$, $(j=2,...,J).$ Let $\tilde{v}(x_{t},\beta,\gamma)=(\tilde{v}_{2}(x_{t},\beta,\gamma),...,\tilde{v}_{J}(x_{t},\beta,\gamma))^{\prime}$ and $A(x_{t})$ denote a matrix of functions of $x_{t}$ with $J$ columns. Semiparametric moment functions are given by$$m(z,\beta,\gamma)=A(x)[y-P(\tilde{v}(x,\beta,\gamma))].$$
LR moment functions can be constructed by adding the adjustment term for the presence of the first step $\gamma.$ This adjustment term is derived in Appendix A. It takes the form $$\phi(z,\beta,\gamma,\lambda)=\sum_{j=1}^{J+1}\phi_{j}(z,\beta,\gamma
,\lambda),$$ where $\phi_{j}(z,\beta,\gamma,\lambda)$ is the adjustment term for $\gamma_{j}$ holding all other components $\gamma$ fixed at their true values. To describe it define$$\begin{aligned}
P_{\tilde{v}j}(\tilde{v}) & =\partial P(\tilde{v})/\partial\tilde{v}_{j},\text{ }\pi_{1}=\Pr(y_{t1}=1),\text{ }\lambda_{10}(x)=E[y_{1t}|x_{t+1}=x],\label{ddcdef}\\
\lambda_{j0}(x) & =E[A(x_{t})P_{\tilde{v}j}(\tilde{v}_{t})\frac{y_{tj}}{P_{j}(\tilde{v}_{t})}|x_{t+1}=x],(j=2,...,J).\nonumber\end{aligned}$$ Then for $w_{t}=x_{t+1}$ and $z=(y,x,w)$ let$$\begin{aligned}
\phi_{1}(z,\beta,\gamma,\lambda) & =-\delta\left( \sum_{j=2}^{J}\{\lambda_{j}(x)-E[A(x_{t})P_{\tilde{v}j}(\tilde{v}_{t})]\pi_{1}^{-1}\lambda_{1}(x)\}\right) [\partial H(\gamma_{1}(x))/\partial P]^{\prime
}\{y-\gamma_{1}(x)\}\\
\phi_{j}(z,\beta,\gamma,\lambda) & =-\delta A(x)P_{\tilde{v}j}(\tilde
{v}(x,\beta,\gamma))\frac{y_{j}}{P_{j}(\tilde{v}(x,\beta,\gamma))}\{u_{1}(w,\beta)+H(\gamma_{1}(w))-\gamma_{j}(x,\beta,\gamma_{1})\},(j=2,...,J),\\
\phi_{J+1}(z,\beta,\gamma,\lambda) & =\delta\left( \sum_{j=2}^{J}E[A(x_{t})P_{\tilde{v}j}(\tilde{v}(x_{t},\beta,\gamma))]\right) \pi_{1}^{-1}y_{1}\{u_{1}(w,\beta)+H(\gamma_{1}(w))-\gamma_{J+1}(\beta,\gamma_{1})\}.\end{aligned}$$
<span style="font-variant:small-caps;">Theorem 2:</span> *If the marginal distribution of* $x_{t}$ *does not vary with* $t$ *then LR moment functions for the dynamic discrete choice model are*$$\psi(z,\beta,\gamma)=A(x_{t})[y_{t}-P(\tilde{v}(x_{t},\beta,\gamma
))]+\sum_{j=1}^{J+1}\phi_{j}(z,\beta,\lambda).$$
The form of $\psi(z,\beta,\gamma)$ is amenable to machine learning. A machine learning estimator of the conditional choice probability vector $\gamma
_{10}(x)$ is straightforward to compute and can then be used throughout the construction of the orthogonal moment conditions everywhere $\gamma_{1}$ appears. If $u_{1}(x,\beta)$ is linear in $x,$ say $u_{1}(x,\beta
)=x_{1}^{\prime}\beta_{1}$ for subvectors $x_{1}$ and $\beta_{1}$ of $x$ and $\beta$ respectively, then machine learning estimators can be used to obtain $\hat{E}[x_{1,t+1}|x_{t},j]$ and $\hat{E}[\hat{H}_{t+1}|x_{j},j],$ $(j=2,...,J),$ and a sample average used to form $\hat{\gamma}_{J+1}(\beta,\hat{\gamma}_{1})$. The value function differences can then be estimated as$$\tilde{v}_{j}(x_{t},\beta,\hat{\gamma})=u_{j}(x_{t},\beta)-u_{1}(x_{t},\beta)+\hat{E}[x_{1,t+1}|x_{t},j]^{\prime}\beta_{1}-\hat{E}[x_{1,t+1}|1]^{\prime}\beta_{1}+\hat{E}[\hat{H}_{t+1}|x_{t},j]-\hat{E}[\hat{H}_{t+1}|1].$$ Furthermore, denominator problems can be avoided by using structural probabilities (rather than the machine learning estimators) in all denominator terms.
The challenging part of the machine learning for this estimator is the dependence on $\beta$ of the reverse conditional expectations in $\lambda
_{1}(x)$. It may be computationally prohibitive and possibly unstable to redo machine learning for each $\beta.$ One way to to deal with this complication is to update $\beta$ periodically, with more frequent updates near convergence. It is important that at convergence the $\beta$ in the reverse conditional expectations is the same as the $\beta$ that appears elsewhere.
With data $z_{i}$ that is i.i.d. over individuals these moment functions can be used for any $t$ to estimate the structural parameters $\beta.$ Also, for data for a single individual we could use a time average $\sum_{t=1}^{T-1}\psi(z_{t},\beta,\gamma)/(T-1)$ to estimate $\beta.$ It will be just as important to use LR moments for estimation with a single individual as it is with a cross section of individuals, although our asymptotic theory will not apply to that case.
Bajari, Chernozhukov, Hong, and Nekipelov (2009) derived the influence adjustment for dynamic discrete games of imperfect information. Locally robust moment conditions for such games could be formed using their results. We leave that formulation to future work.
As an example of the finite sample performance of the LR GMM we report a Monte Carlo study of the LR estimator of this Section. The design of the experiment is loosely like the bus replacement application of Rust (1987). Here $x_{t}$ is a state variable meant to represent the lifetime of a bus engine. The transition density is $$x_{t+1}=\left\{
\begin{array}
[c]{c}x_{t}+N(.25,1)^{2},y_{t}=1,\\
x_{t}=1+N(.25,1)^{2},y_{t}=0.
\end{array}
\right. .$$ where $y_{t}=0$ corresponds to replacement of the bus engine and $y_{t}=1$ to nonreplacement. We assume that the agent chooses $y_{t}$ contingent on state to maximize$$\sum_{t=1}^{\infty}\delta^{t-1}[y_{t}(\alpha\sqrt{x_{t}}+\varepsilon
_{t})+(1-y_{t})RC],\alpha=-.3,RC=-4.$$ The unconditional probability of replacement in this model is about $1/8,$ which is substantially higher than that estimated in Rust (1987). The sample used for estimation was $1000$ observations for a single decision maker. We carried out $10,000$ replications.
We estimate the conditional choice probabilities by kernel and series nonparametric regression and by logit lasso, random forest, and boosted tree machine learning methods. Logit conditional choice probabilities and derivatives were used in the construction of $\hat{\lambda}_{j}$ wherever they appear in order to avoid denominator issues. The unknown conditional expectations in the $\hat{\lambda}_{j}$ were estimated by series regressions throughout. Kernel regression was also tried but did not work particularly well and so results are not reported.
Table 1 reports the results of the experiment. Bias, standard deviation, and coverage probability for asymptotic 95 percent confidence intervals are given in Table 1.
Table 1
\[c\][lllllll]{}\
& & &\
& $\ \ \ \ \alpha$ & RC & $\ \ \ \alpha$ & RC & $\ \ \ \alpha$ & RC\
Two step kernel & -.24 & .08 & .08 & .32 & .01 & .86\
LR kernel & -.05 & .02 & .06 & .32 & .95 & .92\
Two step quad & -.00 & .14 & .049 & .33$^{\ast}$ & .91 & .89\
LR quad & -.00 & .01 & .085 & .39 & .95 & .92\
Logit Lasso & -.12 & .25 & .06 & .28 & .74 & .84\
LR Logit Lasso & -.09 & .01 & .08 & .36 & .93 & .95\
Random Forest & -.15 & -.44 & .09 & .50 & .91 & .98\
LR Ran. For. & .00 & .00 & .06 & .44 & 1.0 & .98\
Boosted Trees & -.10 & -.28 & .08 & .50 & .99 & .99\
LR Boost Tr. & .03 & .09 & .07 & .47 & .99 & .97
Here we find bias reduction from the LR estimator in all cases. We also find variance reduction from LR estimation when the first step is kernel estimation, random forests, and boosted trees. The LR estimator also leads to actual coverage of confidence intervals being closer to the nominal coverage. The results for random forests and boosted trees seem noisier than the others, with higher standard deviations and confidence interval coverage probabilities farther from nominal. Overall, we find substantial improvements from using LR moments rather than only the identifying, original moments.
Estimating the Influence Adjustment
===================================
Construction of LR moment functions requires an estimator $\hat{\phi}(z,\beta)$ of the adjustment term. The form of $\phi(z,\beta,\gamma,\lambda)$ is known for some cases from the semiparametric estimation literature. Powell, Stock, and Stoker (1989) derived the adjustment term for density weighted average derivatives. Newey (1994a) gave the adjustment term for mean square projections (including conditional expectations), densities, and their derivatives. Hahn (1998) and Hirano, Imbens, and Ridder (2003) used those results to obtain the adjustment term for treatment effect estimators, where the LR estimator will be the doubly robust estimator of Robins, Rotnitzky, and Zhao (1994, 1995). Bajari, Hong, Krainer, and Nekipelov (2010) and Bajari, Chernozhukov, Hong, and Nekipelov (2009) derived adjustment terms in some game models. Hahn and Ridder (2013, 2016) derived adjustments in models with generated regressors including control functions. These prior results can be used to obtain LR estimators by adding the adjustment term with nonparametric estimators plugged in.
For new cases it may be necessary to derive the form of the adjustment term. Also, it is possible to numerically estimate the adjustment term based on series estimators and other nonparametric estimators. In this Section we describe how to construct estimators of the adjustment term in these ways.
Deriving the Formula for the Adjustment Term
--------------------------------------------
One approach to estimating the adjustment term is to derive a formula for $\phi(z,\beta,\gamma,\lambda)$ and then plug in $\hat{\gamma}$ and $\hat{\lambda}$ in that formula$.$ A formula for $\phi(z,\beta,\gamma
,\lambda)$ can be obtained as in Newey (1994a). Let $\gamma(F)$ be the limit of the nonparametric estimator $\hat{\gamma}$ when $z_{i}$ has distribution $F.$ Also, let $F_{\tau}$ denote a regular parametric model of distributions with $F_{\tau}=F_{0}$ at $\tau=0$ and score (derivative of the log likelihood at $\tau=0)$ equal to $S(z)$. Then under certain regularity conditions $\phi(z,\beta,\gamma_{0},\lambda_{0})$ will be the unique solution to$$\left. \frac{\partial\int m(z,\beta,\gamma(F_{\tau}))F_{0}(dz)}{\partial\tau
}\right\vert _{\tau=0}=E[\phi(z_{i},\beta,\gamma_{0},\lambda_{0})S(z_{i})],E[\phi(z_{i},\beta,\gamma_{0},\lambda_{0})]=0, \label{funeq}$$ as $\{F_{\tau}\}$ and the corresponding score $S(z)$ are allowed to vary over a family of parametric models where the set of scores for the family has mean square closure that includes all mean zero functions with finite variance. Equation (\[funeq\]) is a functional equation that can be solved to find the adjustment term, as was done in many of the papers cited in the previous paragraph.
The influence adjustment can be calculated by taking a limit of the Gateaux derivative as shown in Ichimura and Newey (2017). Let $\gamma(F)$ be the limit of $\hat{\gamma}$ when $F$ is the true distribution of $z_{i}$, as before. Let $G_{z}^{h}$ be a family of distributions that approaches a point mass at $z$ as $h\longrightarrow0.$ If $\phi(z_{i},\beta,\gamma_{0},\lambda_{0})$ is continuous in $z_{i}$ with probability one then$$\phi(z,\beta,\gamma_{0},\lambda_{0})=\lim_{h\longrightarrow0}\left( \left.
\frac{\partial E[m(z_{i},\beta,\gamma(F_{\tau}^{h}))]}{\partial\tau
}\right\vert _{\tau=0}\right) ,F_{\tau}^{h}=(1-\tau)F_{0}+\tau G_{z}^{h}.
\label{derlim}$$ This calculation is more constructive than equation (\[funeq\]) in the sense that the adjustment term here is a limit of a derivative rather than the solution to a functional equation. In Sections 5 and 6 we use those results to construct LR estimators when the first step is a nonparametric instrumental variables (NPIV) estimator.
With a formula for $\phi(z,\beta,\gamma,\lambda)$ in hand from either solving the functional equation in equation (\[funeq\]) or from calculating the limit of the derivative in equation (\[derlim\]), one can estimate the adjustment term by plugging estimators $\hat{\gamma}$ and $\hat{\lambda}$ into $\phi(z,\beta,\gamma,\lambda).$ This approach to estimating LR moments can used to construct LR moments for the average surplus described near the end of Section 2. There the adjustment term depends on the conditional density of $p_{1i}$ given $p_{2i}$ and $y_{i}$. Let $\hat{f}_{\ell}(p_{1}|p_{2},y)$ be some estimator of the conditional pdf of $p_{1i}$ given $p_{2i}$ and $y_{i}.$ Plugging that estimator into the formula for $\lambda_{0}(x)$ gives $\hat{\lambda}_{\ell}(x)=\frac{\ell(p_{1},y)}{\hat{f}_{\ell}(p_{1}|p_{2},y)}.$This $\hat{\lambda}_{\ell}(x)$ can then be used in equation (\[exlr\])$.$
Estimating the Influence Adjustment for First Step Series Estimators
--------------------------------------------------------------------
Estimating the adjustment term is relatively straightforward when the first step is a series estimator. The adjustment term can be estimated by treating the first step estimator as if it were parametric and applying a standard formula for the adjustment term for parametric two-step estimators. Suppose that $\hat{\gamma}_{\ell}$ depends on the data through a $K\times1$ vector $\hat{\zeta}_{\ell}$ of parameter estimators that has true value $\zeta_{0}$. Let $m(z,\beta,\zeta)$ denote $m(z,\beta,\gamma)$ as a function of $\zeta.$ Suppose that there is a $K\times1$ vector of functions $h(z,\zeta)$ such that $\hat{\zeta}_{\ell}$ satisfies$$\frac{1}{\sqrt{\bar{n}_{\ell}}}\sum_{i\in\bar{I}_{\ell}}h(z_{i},\hat{\zeta
}_{\ell})=o_{p}(1),$$ where $\bar{I}_{\ell}$ is a subset of observations, none which are included in $I_{\ell},$ and $\bar{n}_{\ell}$ is the number of observations in $\bar
{I}_{\ell}.$ Then a standard calculation for parametric two-step estimators (e.g. Newey, 1984, and Murphy and Topel, 1985) gives the parametric adjustment term$$\phi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell})=\hat{\Psi}_{\ell}(\beta)h(z_{i},\hat{\zeta}_{\ell}),\hat{\Psi}_{\ell}(\beta)=-\sum_{j\in\bar
{I}_{\ell}}\frac{\partial m(z_{j},\beta,\hat{\zeta}_{\ell})}{\partial\zeta
}\left( \sum_{j\in\bar{I}_{\ell}}\frac{\partial h(z_{j},\hat{\zeta}_{\ell})}{\partial\zeta}\right) ^{-1},i\in I_{\ell}.$$ In many cases $\phi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell})$ approximates the true adjustment term $\phi(z,\beta,\gamma_{0},\lambda_{0}),$ as shown by Newey (1994a, 1997) and Ackerberg, Chen, and Hahn (2012) for estimating the asymptotic variance of functions of series estimators. Here this approximation is used for estimation of $\beta$ instead of just for variance estimation. The estimated LR moment function will be$$\psi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell})=m(z_{i},\beta
,\hat{\zeta}_{\ell})+\phi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell}).
\label{lr series}$$ We note that if $\hat{\zeta}_{\ell}$ were computed from the whole sample then $\hat{\phi}(\beta)=0$. This degeneracy does not occur when cross-fitting is used, which removes “own observation” bias and is important for first step machine learning estimators, as noted in Section 2.
We can apply this approach to construct LR moment functions for an estimator of the average surplus bound example that is based on series regression. Here the first step estimator of $\gamma_{0}(x)=E[q_{i}|x_{i}=x]$ will be that from an ordinary least regression of $q_{i}$ on a vector $a(x_{i})$ of approximating functions. The corresponding $m(z,\beta,\zeta)$ and $h(z,\zeta)$ are$$m(z,\beta,\zeta)=A(x)^{\prime}\zeta-\beta,h(z,\zeta)=a(x)[q-a(x)^{\prime}\zeta],A(x)=\int\ell(p_{1},y)a(p_{1},p_{2},y)dp_{1}.$$ Let $\hat{\zeta}_{\ell}$ denote the least squares coefficients from regressing $q_{i}$ on $a(x_{i})$ for observations that are not included in $I_{\ell}$. Then the estimator of the locally robust moments given in equation (\[lr series\]) is $$\begin{aligned}
\psi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell}) & =A(x_{i})^{\prime
}\hat{\zeta}_{\ell}-\beta+\hat{\Psi}_{\ell}a(x_{i})[q_{i}-a(x_{i})^{\prime
}\hat{\zeta}_{\ell}],\\
\hat{\Psi}_{\ell} & =\sum_{j\in\bar{I}_{\ell}}A(x_{j})^{\prime}\left(
\sum_{j\in\bar{I}_{\ell}}a(x_{j})a(x_{j})^{\prime}\right) ^{-1}.\end{aligned}$$ It can be shown similarly to Newey (1994a, p. 1369) that $\hat{\Psi}_{\ell}$ estimates the population least squares coefficients from a regression of $\lambda_{0}(x_{i})$ on $a(x_{i}),$ so that $\hat{\lambda}_{\ell}(x_{i})=\hat{\Psi}_{\ell}a(x_{i})$ estimates $\lambda_{0}(x_{i}).$ In comparison the LR estimator described in the previous subsection was based on an explicit nonparametric estimator of $f_{0}(p_{1}|p_{2},y),$ while this $\hat{\lambda
}_{\ell}(x)$ implicitly estimates the inverse of that pdf via a mean-square approximation of $\lambda_{0}(x_{i})$ by $\hat{\Psi}_{\ell}a(x_{i}).$
Chernozhukov, Newey, and Robins (2018) introduce machine learning methods for choosing the functions to include in the vector $A(x)$. This method can be combined with machine learning methods for estimating $E[q_{i}|x_{i}]$ to construct a double machine learning estimator of average surplus, as shown in Chernozhukov, Hausman, and Newey (2018).
In parametric models moment functions like those in equation (\[lr series\]) are used to “partial out” nuisance parameters $\zeta.$ For maximum likelihood these moment functions are the basis of Neyman’s (1959) C-alpha test. Wooldridge (1991) generalized such moment conditions to nonlinear least squares and Lee (2005), Bera et al. (2010), and Chernozhukov et al. (2015) to GMM. What is novel here is their use in the construction of semiparametric estimators and the interpretation of the estimated LR moment functions $\psi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell})$ as the sum of an original moment function $m(z_{i},\beta,\hat{\zeta}_{\ell})$ and an influence adjustment $\phi(z_{i},\beta,\hat{\zeta}_{\ell},\hat{\Psi}_{\ell})$.
Estimating the Influence Adjustment with First Step Smoothing
-------------------------------------------------------------
The adjustment term can be estimated in a general way that allows for kernel density, locally linear regression, and other kernel smoothing estimators for the first step. The idea is to differentiate with respect to the effect of the $i^{th}$ observation on sample moments. Newey (1994b) used a special case of this approach to estimate the asymptotic variance of a functional of a kernel based semiparametric or nonparametric estimator. Here we extend this method to a wider class of first step estimators, such as locally linear regression, and apply it to estimate the adjustment term for construction of LR moments.
We will describe this estimator for the case where $\gamma$ is a vector of functions of a vector of variables $x.$ Let $h(z,x,\gamma)$ be a vector of functions of a data observation $z$, $x$, and a possible realized value of $\gamma$ (i.e. a vector of real numbers $\gamma$). Also let $\hat{h}_{\ell
}(x,\gamma)=\sum_{j\in\bar{I}_{\ell}}h(z_{j},x,\gamma)/\bar{n}_{\ell}$ be a sample average over a set of observations $\bar{I}_{\ell}$ not included in $I_{\ell},$ where $\bar{n}_{j}$ is the number of observations in $\bar{I}_{j}.$ We assume that the first step estimator $\hat{\gamma}_{\ell}(x)$ solves$$0=\hat{h}_{\ell}(x,\gamma).$$ We suppress the dependence of $h$ and $\hat{\gamma}$ on a bandwidth. For example for a pdf $\kappa(u)$ a kernel density estimator would correspond to $h(z_{j},x,\gamma)=\kappa(x-x_{j})-\gamma$ and a locally linear regression would be $\hat{\gamma}_{1}(x)$ for$$h(z_{j},x,\gamma)=\kappa(x-x_{j})\left(
\begin{array}
[c]{c}1\\
x-x_{j}\end{array}
\right) [y_{j}-\gamma_{1}-(x-x_{j})^{\prime}\gamma_{2}].$$
To measure the effect of the $i^{th}$ observation on $\hat{\gamma}$ let $\hat{\gamma}_{\ell i}^{\xi}(x)$ be the solution to $$0=\hat{h}_{\ell}(x,\gamma)+\xi\cdot h(z_{i},x,\gamma).$$ This $\hat{\gamma}_{\ell i}^{\xi}(x)$ is the value of the function obtained from adding the contribution $\xi\cdot h(z_{i},x,\gamma)$ of the $i^{th}$ observation. An estimator of the adjustment term can be obtained by differentiating the average of the original moment function with respect to $\xi$ at $\xi=0.$ This procedure leads to an estimated locally robust moment function given by$$\psi(z_{i},\beta,\hat{\gamma}_{\ell})=m(z_{i},\beta,\hat{\gamma}_{\ell
})+\left. \frac{\partial}{\partial\xi}\frac{1}{\bar{n}_{\ell}}\sum_{j\in
\bar{I}_{\ell}}m(z_{j},\beta,\hat{\gamma}_{\ell i}^{\xi}(\cdot))\right\vert
_{\xi=0}.$$ This estimator is a generalization of the influence function estimator for kernels in Newey (1994b).
Double and Partial Robustness
=============================
The zero derivative condition in equation (\[lrdef\]) is an appealing robustness property in and of itself. A zero derivative means that the expected moment functions remain closer to zero than $\tau$ as $\tau$ varies away from zero. This property can be interpreted as local insensitivity of the moments to the value of $\gamma$ being plugged in, with the moments remaining close to zero as $\gamma$ varies away from its true value. Because it is difficult to get nonparametric functions exactly right, especially in high dimensional settings, this property is an appealing one.
Such robustness considerations, well explained in Robins and Rotnitzky (2001), have motivated the development of doubly robust (DR) moment conditions. DR moment conditions have expectation zero if one first stage component is incorrect. DR moment conditions allow two chances for the moment conditions to hold, an appealing robustness feature. Also, DR moment conditions have simpler conditions for asymptotic normality than general LR moment functions as discussed in Section 7. Because many interesting LR moment conditions are also DR we consider double robustness.
LR moments that are constructed by adding the adjustment term for first step estimation provide candidates for DR moment functions. The derivative of the expected moments with respect to each first step will be zero, a necessary condition for DR. The condition for moments constructed in this way to be DR is the following:
<span style="font-variant:small-caps;">Assumption 1:</span> *There are sets* $\Gamma$ *and* $\Lambda
$ *such that for all* $\gamma\in\Gamma$ *and* $\lambda\in
\Lambda$$$E[m(z_{i},\beta_{0},\gamma)]=-E[\phi(z_{i},\beta_{0},\gamma,\lambda
_{0})],E[\phi(z_{i},\beta_{0},\gamma_{0},\lambda)]=0.$$
This condition is just the definition of DR for the moment function $\psi(z,\beta,\gamma)=m(z,\beta,\gamma)+\phi(z,\beta,\gamma,\lambda)$, pertaining to specific sets $\Gamma$ ** and $\Lambda.$
The construction of adding the adjustment term to an identifying or original moment function leads to several novel classes of DR moment conditions. One such class has a first step that satisfies a conditional moment restriction$$E[y_{i}-\gamma_{0}(w_{i})|x_{i}]=0, \label{cmrlin}$$ where $w_{i}$ is potentially endogenous and $x_{i}$ is a vector of instrumental variables. This condition is the nonparametric instrumental variable (NPIV) restriction as in Newey and Powell (1989, 2003) and Newey (1991). A first step conditional expectation where $\gamma_{0}(x_{i})=E[y_{i}|x_{i}]$ is included as special case with $w_{i}=x_{i}.$ Ichimura and Newey (2017) showed that the adjustment term for this step takes the form $\phi(z,\gamma,\lambda)=\lambda(x)[y-\gamma(w)]$ so $m(z,\beta,\gamma
)+\lambda(x)[y-\gamma(x)]$ is a candidate for a DR moment function. A sufficient condition for DR is:
<span style="font-variant:small-caps;">Assumption 2:</span> *i) Equation (\[cmrlin\]) is satisfied; ii)* $\Lambda=\{\lambda(x):E[\lambda(x_{i})^{2}]<\infty\}$ *and* $\Gamma=\{\gamma(w):E[\gamma(w_{i})^{2}]<\infty\};$ *iii) there is* $v(w)$ *with* $E[v(w_{i})^{2}]<\infty$ *such that* $E[m(z_{i},\beta_{0},\gamma)]=E[v(w_{i})\{\gamma(w_{i})-\gamma_{0}(w_{i})\}]$ *for all* $\gamma\in\Gamma$*; iv) there is* $\lambda
_{0}(x)$ *such that* $v(w_{i})=E[\lambda_{0}(x_{i})|w_{i}]$*; and v)* $E[y_{i}^{2}]<\infty.$
By the Riesz representation theorem condition iii) is necessary and sufficient for $E[m(z_{i},\beta_{0},\gamma)]$ to be a mean square continuous functional of $\gamma$ with representer $v(w).$ Condition iv) is an additional condition giving continuity in the reduced form difference $E[\gamma(w_{i})-\gamma
_{0}(w_{i})|x_{i}]$, as further discussed in Ichimura and Newey (2017). Under this condition$$\begin{aligned}
E[m(z_{i},\beta_{0},\gamma)] & =E[E[\lambda_{0}(x_{i})|w_{i}]\{\gamma
(w_{i})-\gamma_{0}(w_{i})\}]=E[\lambda_{0}(x_{i})\{\gamma(w_{i})-\gamma
_{0}(w_{i})\}]\\
& =-E[\phi(z_{i},\gamma,\lambda_{0})],\text{ \ }E[\phi(z_{i},\gamma
_{0},\lambda)]=E[\lambda(x_{i})\{y_{i}-\gamma_{0}(w_{i})\}]=0.\end{aligned}$$ Thus Assumption 2 implies Assumption 1 so that we have
<span style="font-variant:small-caps;">Theorem 3:</span> *If Assumption 2 is satisfied then* $m(z,\beta
,\gamma)+\lambda(x)\{y-\gamma(w)\}$ *is doubly robust.*
There are many interesting, novel examples of DR moment conditions that are special cases of Theorem 3. The average surplus bound is an example where $y_{i}=q_{i},$ $w_{i}=x_{i},$ $x_{i}$ is the observed vector of prices and income, $\Lambda=\Gamma$ is the set of all measurable functions of $x_{i}$ with finite second moment, and $\gamma_{0}(x)=E[y_{i}|x_{i}=x].$ Let $x_{1}$ denote $p_{1}$ and $x_{2}$ the vector of other prices and income, so that $x=(x_{1},x_{2}^{\prime})^{\prime}$. Also let $f_{0}(x_{1}|x_{2})$ denote the conditional pdf of $p_{1}$ given $x_{2}$ and $\ell(x)=\ell(p_{1},y)$ for income $y$. Let $m(z,\beta,\gamma)=\int\ell(p_{1},x_{2})\gamma(p_{1},x_{2})dp_{1}-\beta$ as before. Multiplying and dividing through by $f_{0}(p_{1}|x_{2})$ gives, for all $\gamma,\lambda\in\Gamma$ and $\lambda
_{0}(x)=f_{0}(x_{1}|x_{2})^{-1}\ell(x),$ $$E[m(z_{i},\beta_{0},\gamma)]=E[\int\ell(p_{1},x_{2i})\gamma(p_{1},x_{2i})dp_{1}]-\beta_{0}=E[E[\lambda_{0}(x_{i})\gamma(x_{i})|x_{2i}]]-\beta_{0}=E[\lambda_{0}(x_{i})\{\gamma(x_{i})-\gamma_{0}(x_{i})\}].$$ Theorem 3 then implies that the LR moment function for average surplus $m(z,\beta,\gamma)+\lambda(x)[q-\gamma(x)]$ is DR. A corresponding DR estimator $\hat{\beta}$ is given in equation (\[exlr\]).
The surplus bound is an example of a parameter where $\beta_{0}=E[g(z_{i},\gamma_{0})]$ for some linear functional $g(z,\gamma)$ of $\gamma$ and for $\gamma_{0}$ satisfying the conditional moment restriction of equation (\[cmrlin\])$.$ For the surplus bound $g(z,\gamma)=\int\ell(p_{1},x_{2})\gamma(p_{1},x_{2})dp_{1}.$ If Assumption 2 is satisfied then choosing $m(z,\beta,\gamma)=g(z,\gamma)-\beta$ a DR moment condition is $g(z,\gamma
)-\beta+\lambda(x)[y-\gamma(w)].$ A corresponding DR estimator is$$\hat{\beta}=\frac{1}{n}\sum_{i=1}^{n}\{g(z_{i},\hat{\gamma}_{i})+\hat{\lambda
}_{i}(x_{i})[y_{i}-\hat{\gamma}_{i}(w_{i})]\}, \label{drlin}$$ where $\hat{\gamma}_{i}(w)$ and $\hat{\lambda}_{i}(x)$ are estimators of $\gamma_{0}(w)$ and $\lambda_{0}(x)$ respectively. An estimator $\hat{\gamma
}_{i}$ can be constructed by nonparametric regression when $w_{i}=x_{i}$ or NPIV in general. A series estimator $\hat{\lambda}_{i}(x)$ can be constructed similarly to the surplus bound example in Section 3.2. For $w_{i}=x_{i}$ Newey and Robins (2017) give such series estimators of $\hat{\lambda}_{i}(x)$ and Chernozhukov, Newey, and Robins (2018) show how to choose the approximating functions for $\hat{\lambda}_{i}(x_{i})$ by machine learning. Simple and general conditions for root-n consistency and asymptotic normality of $\hat{\beta}$ that allow for machine learning are given in Section 7.
Novel examples of the DR estimator in equation (\[drlin\]) $w_{i}=x_{i}$ are given by Newey and Robins (2017) and Chernozhukov, Newey, and Robins (2018). Also Appendix C provides a generalization to $\gamma(w)$ and $\gamma(x)$ that satisfy orthogonality conditions more general than conditional moment restrictions and novel examples of those. A novel example with $w_{i}\neq
x_{i}$ is a weighted average derivative of $\gamma_{0}(w)$ satisfying equation (\[cmrlin\]). Here $g(z,\gamma)=\bar{v}(w)\partial\gamma(w)/\partial w$ for some weight function $\bar{v}(w)$. Let $f_{0}(w)$ be the pdf of $w_{i}$ and $v(w)=-f_{0}(w)^{-1}\partial\lbrack\bar{v}(w)f_{0}(w)]/\partial w,$ assuming that derivatives exist. Assume that $\bar{v}(w)\gamma(w)f_{0}(w)$ is zero on the boundary of the support of $w_{i}.$ Integration by parts then gives Assumption 2 iii). Assume also that there exists $\lambda_{0}\in\Lambda$ with $v(w_{i})=E[\lambda_{0}(x_{i})|w_{i}].$ Then for estimators $\hat{\gamma}_{i}$ and $\hat{\lambda}_{i}$ a DR estimator of the weighted average derivative is$$\hat{\beta}=\frac{1}{n}\sum_{i=1}^{n}\{\bar{v}(w_{i})\frac{\partial\hat
{\gamma}_{i}(w_{i})}{\partial w}+\hat{\lambda}_{i}(x_{i})[y_{i}-\hat{\gamma
}_{i}(w_{i})]\}.$$ This is a DR version of the weighted average derivative estimator of Ai and Chen (2007). A special case of this example is the DR moment condition for the weighted average derivative in the exogenous case where $w_{i}=x_{i}$ given in Firpo and Rothe (2017).
Theorem 3 includes existing DR moment functions as special cases where $w_{i}=x_{i}$, including the mean with randomly missing data given by Robins and Rotnitzky (1995), the class of DR estimators in Robins et al. (2008), and the DR estimators of Firpo and Rothe (2017). We illustrate for the mean with missing data. Let $w=x,$ $x=(a,u)$ for an observed data indicator $a\in\{0,1\}$ and covariates $u,$ $m(z,\beta,\gamma)=\gamma(1,u)-\beta,$ and $\lambda_{0}(x)=a/\Pr(a_{i}=1|u_{i}=u).$ Here it is well known that $$E[m(z_{i},\beta_{0},\gamma)]=E[\gamma(1,u_{i})]-\beta_{0}=E[\lambda_{0}(x_{i})\{\gamma(x_{i})-\gamma_{0}(x_{i})\}]=-E[\lambda_{0}(x_{i})\{y_{i}-\gamma(x_{i})\}].$$ Then DR of the moment function $\gamma(1,w)-\beta+\lambda(x)[y-\gamma(x)]$ of Robins and Rotnitzky (1995) follows by Proposition 5.
Another novel class of DR moment conditions are those where the first step $\gamma$ is a pdf of a function $x$ of the data observation $z.$ By Proposition 5 of Newey (1994a), the adjustment term for such a first step is $\phi(z,\beta,\gamma,\lambda)=\lambda(x)-\int\lambda(u)\gamma(u)du$ for some possible $\lambda$. A sufficient condition for the DR as in Assumption 1 is:
<span style="font-variant:small-caps;">Assumption 3:</span> $x_{i}$ *has pdf* $\gamma_{0}(x)$ *and for* $\Gamma=\{\gamma:\gamma(x)\geq0$, $\int\gamma(x)dx=1\}$ *there is* $\lambda_{0}(x)$ *such that for all* $\gamma\in\Gamma,$$$E[m(z_{i},\beta_{0},\gamma)]=\int\lambda_{0}(x)\{\gamma(x)-\gamma_{0}(x)\}dx.$$
Note that for $\phi(z,\gamma,\lambda)=\lambda(x)-\int\lambda(\tilde{x})\gamma(\tilde{x})d\tilde{x}$ it follows from Assumption 3 that $E[m(z_{i},\beta_{0},\gamma)]=-E[\phi(z_{i},\gamma,\lambda_{0})]$ for all $\gamma
\in\Gamma$. Also, $E[\phi(z_{i},\gamma_{0},\lambda)]=E[\lambda(x_{i})]-\int\lambda(\tilde{x})\gamma_{0}(\tilde{x})dx=0.$ Then Assumption 1 is satisfied so we have:
<span style="font-variant:small-caps;">Theorem 4:</span> *If Assumption 3 is satisfied then* $m(z,\beta
,\gamma)+\lambda(x)-\int\lambda(\tilde{x})\gamma(\tilde{x})d\tilde{x}$ *is DR.*
The integrated squared density $\beta_{0}=\int\gamma_{0}(x)^{2}dx$ is an example for $m(z,\beta,\gamma)=\gamma(x)-\beta,$ $\lambda_{0}=\gamma_{0},$ and $$\psi(z,\beta,\gamma,\lambda)=\gamma(x)-\beta+\lambda(x)-\int\lambda(\tilde
{x})\gamma(\tilde{x})dx.$$ This DR moment function seems to be novel. Another example is the density weighted average derivative (DWAD) of Powell, Stock, and Stoker (1989), where $m(z,\beta,\gamma)=-2y\cdot\partial\gamma(x)/\partial x-\beta$. Let $\delta(x_{i})=E[y_{i}|x_{i}]\gamma_{0}(x_{i})$. Assuming that $\delta
(u)\gamma(u)$ is zero on the boundary and differentiable, integration by parts gives$$E[m(z_{i},\beta_{0},\gamma)]=-2E[y_{i}\partial\gamma(x_{i})/\partial
x]-\beta_{0}=\int[\partial\delta(\tilde{x})/\partial x]\{\gamma(\tilde
{x})-\gamma_{0}(\tilde{x})\}du,$$ so that Assumption 3 is satisfied with $\lambda_{0}(x)=\partial\delta
(x)/\partial x.$ Then by Theorem 4$$\hat{\beta}=\frac{1}{n}\sum_{i=1}^{n}\{-2\frac{\partial\hat{\gamma}_{i}(x_{i})}{\partial x}+\frac{\partial\hat{\delta}_{i}(x_{i})}{\partial x}-\int\frac{\partial\hat{\delta}_{i}(\tilde{x})}{\partial x}\hat{\gamma}_{i}(\tilde{x})d\tilde{x}\}$$ is a DR estimator. It was shown in NHR (1998) that the Powell, Stock, and Stoker (1989) estimator with a twicing kernel is numerically equal to a leave one out version of this estimator for the original (before twicing) kernel. Thus the DR result for $\hat{\beta}$ gives an interpretation of the twicing kernel estimator as a DR estimator.
The expectation of the DR moment functions of both Theorem 3 and 4 are affine in $\gamma$ and $\lambda$ holding the other fixed at the truth. This property of DR moment functions is general, as we show by the following characterization of DR moment functions:
<span style="font-variant:small-caps;">Theorem 5:</span> *If* $\Gamma$ *and* $\Lambda$ *are linear then* $\psi(z,\beta,\gamma,\lambda)$ *is DR if and only if* $$\left. \partial E[\psi(z_{i},\beta_{0},(1-\tau)\gamma_{0}+\tau\gamma
,\lambda_{0})]\right\vert _{\tau=0}=0,\left. \partial E[\psi(z_{i},\beta
_{0},\gamma_{0},(1-\tau)\lambda_{0}+\tau\lambda)]\right\vert _{\tau=0}=0,$$ *and* $E[\psi(z_{i},\beta_{0},\gamma,\lambda_{0})]$ *and* $E[\psi(z_{i},\beta_{0},\gamma_{0},\lambda)]$ *are affine in* $\gamma
$ *and* $\lambda$ *respectively.*
The zero derivative condition of this result is a Gateaux derivative, componentwise version of LR. Thus, we can focus a search for DR moment conditions on those that are LR. Also, a DR moment function must have an expectation that is affine in each of $\gamma$ and $\lambda$ while the other is held fixed at the truth. It is sufficient for this condition that $\psi(z_{i},\beta_{0},\gamma,\lambda)$ be affine in each of $\gamma$ and $\lambda$ while the other is held fixed. This property can depend on how $\gamma$ and $\lambda$ are specified. For example the missing data DR moment function $m(1,u)-\beta+\pi(u)^{-1}a[y-\gamma(x)]$ is not affine in the propensity score $\pi(u)=\Pr(a_{i}=1|u_{i}=u)$ but is in $\lambda
(x)=\pi(u)^{-1}a$.
In general Theorem 5 motivates the construction of DR moment functions by adding the adjustment term to obtain a LR moment function that will then be DR if it is affine in $\gamma$ and $\lambda$ separately. It is interesting to note that in the NPIV setting of Theorem 3 and the density setting of Theorem 4 that the adjustment term is always affine in $\gamma$ and $\lambda.$ It then follows from Theorem 5 that in those settings LR moment conditions are precisely those where $E[m(z_{i},\beta_{0},\gamma)]$ is affine in $\gamma.$ Robins and Rotnitzky (2001) gave conditions for the existence of DR moment conditions in semiparametric models. Theorem 5 is complementary to those results in giving a complete characterization of DR moments when $\Gamma$ and $\Lambda$ are linear.
Assumptions 2 and 3 both specify that $E[m(z_{i},\beta_{0},\gamma)]$ is continuous in an integrated squared deviation norm. These continuity conditions are linked to finiteness of the semiparametric variance bound for the functional $E[m(z_{i},\beta_{0},\gamma)],$ as discussed in Newey and McFadden (1994) for Assumption 2 with $w_{i}=x_{i}$ and for Assumption 3. For Assumption 2 with $w_{i}\neq x_{i}$ Severini and Tripathi (2012) showed for $m(z,\beta,\gamma)=v(w)\gamma(w)-\beta$ with known $v(w)$ that the existence of $\lambda_{0}(w)$ with $v(w_{i})=E[\lambda_{0}(x_{i})|w_{i}]$ is necessary for the existence of a root-n consistent estimator of $\beta$. Thus the conditions of Assumption 2 are also linked to necessary conditions for root-n consistent estimation when $w_{i}\neq x_{i}.$
Partial robustness refers to settings where $E[m(z_{i},\beta_{0},\bar{\gamma
})]=0$ for some $\bar{\gamma}\neq\gamma_{0}$. The novel DR moment conditions given here lead to novel partial robustness results as we now demonstrate in the conditional moment restriction setting of Assumption 2. When $\lambda
_{0}(x)$ in Assumption 2 is restricted in some way there may exist $\tilde{\gamma}\neq\gamma_{0}$ with $E[\lambda_{0}(x_{i})\{y_{i}-\tilde
{\gamma}(w_{i})\}]=0.$ Then$$E[m(z_{i},\beta_{0},\tilde{\gamma})]=-E[\lambda_{0}(x_{i})\{y_{i}-\tilde{\gamma}(w_{i})\}]=0.$$ Consider the average derivative $\beta_{0}=E[\partial\gamma_{0}(w_{i})/\partial w_{r}]$ where $m(z,\beta,\gamma)=\partial\gamma(w)/\partial
w_{r}-\beta$ for some $r.$ Let $\delta=(E[a(x_{i})p(w_{i})^{\prime}])^{-1}E[a(x_{i})y_{i}]$ be the limit of the linear IV estimator with right hand side variables $p(w)$ and the same number of instruments $a(x).$ The following is a partial robustness result that provides conditions for the average derivative of the linear IV estimator to equal the true average derivative:
<span style="font-variant:small-caps;">Theorem 6:</span> If $-\partial\ln f_{0}(w)/\partial w_{r}=c^{\prime}p(w)$ for a constant vector $c$, $E[p(w_{i})p(w_{i})^{\prime}]$ is nonsingular, and $E[a(x_{i})|w_{i}=w]=\Pi p(w)$ for a square nonsingular $\Pi$ then for $\delta=(E[a(x_{i})p(w_{i})^{\prime}])^{-1}E[a(x_{i})y_{i}],$$$E[\partial\{p(w_{i})^{\prime}\delta\}/\partial w_{r}]=E[\partial\gamma
_{0}(w_{i})/\partial w_{r}].$$
This result shows that if the density score is a linear combination of the right-hand side variables $p(w)$ used by linear IV, the conditional expectation of the instruments $a(x_{i})$ given $w_{i}$ is a nonsingular linear combination of $p(w)$, and $p(w)$ has a nonsingular second moment matrix then the average derivative of the linear IV estimator is the true average derivative. This is a generalization to NPIV of Stoker’s (1986) result that linear regression coefficients equal the average derivatives when the regressors are multivariate Gaussian.
DR moment conditions can be used to identify parameters of interest. Under Assumption 1 $\beta_{0}$ may be identified from$$E[m(z_{i},\beta_{0},\bar{\gamma})]=-E[\phi(z_{i},\beta_{0},\bar{\gamma
},\lambda_{0})]$$ for any fixed $\bar{\gamma}$ when the solution $\beta_{0}$ to this equation is unique.
<span style="font-variant:small-caps;">Theorem 7:</span> *If Assumption 1 is satisfied,* $\lambda_{0}$ *is identified, and for some* $\bar{\gamma}$ *the equation* $E[\psi(z_{i},\beta,\bar{\gamma},\lambda_{0})]=0$ *has a unique solution then* $\beta_{0}$ *is identified as that solution.*
Applying this result to the NPIV setting of Assumption 2 gives an explicit formula for certain functionals of $\gamma_{0}(w)$ without requiring that the completeness identification condition of Newey and Powell (1989, 2003) be satisfied, similarly to Santos (2011). Suppose that $v(w)$ is identified, e.g. as for the weighted average derivative. Since both $w$ and $x$ are observed it follows that a solution $\lambda_{0}(x)$ to $v(w)=E[\lambda_{0}(x)|w]$ will be identified if such a solution exists. Plugging in $\bar{\gamma}=0$ into the equation $E[\psi(z_{i},\beta_{0},\bar{\gamma},\lambda_{0})]=0$ gives
<span style="font-variant:small-caps;">Corollary 8:</span> *If* $v(w_{i})$ *is identified and there exists* $\lambda_{0}(x_{i})$ *such that* $v(w_{i})=E[\lambda_{0}(x_{i})|w_{i}]$ *then* $\beta_{0}=E[v(w_{i})\gamma_{0}(w_{i})]$ *is identified as* $\beta_{0}=E[\lambda_{0}(x_{i})y_{i}]$*.*
Note that this result holds without the completeness condition. Identification of $\beta_{0}=E[v(w_{i})\gamma_{0}(w_{i})]$ for known $v(w_{i})$ with $v(w_{i})=E[\lambda_{0}(x_{i})|w_{i}]$ follows from Severini and Tripathi (2006). Corollary 8 extends that analysis to the case where $v(w_{i})$ is only identified but not necessarily known and links it to DR moment conditions. Santos (2011) gives a related formula for a parameter $\beta_{0}=\int\tilde
{v}(w)\lambda_{0}(w)dw$. The formula here differs from Santos (2011) in being an expectation rather than a Lebesgue integral. Santos (2011) constructed an estimator. That is beyond the scope of this paper.
Conditional Moment Restrictions
===============================
Models of conditional moment restrictions that depend on unknown functions are important in econometrics. In such models the nonparametric components may be determined simultaneously with the parametric components. In this setting it is useful to work directly with the instrumental variables to obtain LR moment conditions rather than to make a first step influence adjustment. For that reason we focus in this Section on constructing LR moments by orthogonalizing the instrumental variables.
Our orthogonal instruments framework is based on based on conditional moment restrictions of the form$$E[\rho_{j}(z_{i},\beta_{0},\gamma_{0})|x_{ji}]=0,(j=1,...,J),
\label{cond mom restrict}$$ where each $\rho_{j}(z,\beta,\gamma)$ is a scalar residual and $x_{j}$ are instruments that may differ across $j$. This model is considered by Chamberlain (1992) and Ai and Chen (2003, 2007) when $x_{j}$ is the same for each $j$ and for Ai and Chen (2012) when the set of $x_{j}$ includes $x_{j-1}.$ We allow the residual vector $\rho(z,\beta,\gamma)$ to depend on the entire function $\gamma$ and not just on its value at some function of the observed data $z_{i}$.
In this framework we consider LR moment functions having the form$$\psi(z,\beta,\gamma,\lambda)=\lambda(x)\rho(z,\beta,\gamma), \label{gcm}$$ where $\lambda(x)=[\lambda_{1}(x_{1}),...,\lambda_{J}(x_{J})]$ is a matrix of instrumental variables with the $j^{th}$ column given by $\lambda_{j}(x_{j}).$ We will define orthogonal instruments to be those that make $\psi
(z,\beta,\gamma,\lambda)$ locally robust. To define orthogonal instrumental variables we assume that $\gamma$ is allowed to vary over a linear set $\Gamma$ as $F$ varies. For each $\Delta\in\Gamma$ let$$\bar{\rho}_{\gamma}(x,\Delta)=(\frac{\partial E[\rho_{1}(z_{i},\beta
_{0},\gamma_{0}+\tau\Delta)|x_{1}]}{\partial\tau},...,\frac{\partial
E[\rho_{J}(z_{i},\beta_{0},\gamma_{0}+\tau\Delta)|x_{J}]}{\partial\tau
})^{\prime}.$$ This $\bar{\rho}_{\gamma}(x,\Delta)$ is the Gateaux derivative with respect to $\gamma$ of the conditional expectation of the residuals in the direction $\Delta.$ We characterize $\lambda_{0}(x)$ as orthogonal if$$E[\lambda_{0}(x_{i})\bar{\rho}_{\gamma}(x_{i},\Delta)]=0\text{ for all }\Delta\in\Gamma.$$ We assume that $\bar{\rho}_{\gamma}(x,\Delta)$ is linear in $\Delta$ and consider the Hilbert space of vectors of random vectors $a(x)=$ $(a_{1}(x_{1}),...,a_{J}(x_{J}))$ with inner product $\left\langle a,b\right\rangle
=E[a(x_{i})^{\prime}b(x_{i})]$. Let $\bar{\Lambda}_{\gamma}$ denote the closure of the set $\{\bar{\rho}_{\gamma}(x,\Delta):\Delta\in\Gamma\}$ in that Hilbert space. Orthogonal instruments are those where each row of $\lambda
_{0}(x)$ is orthogonal to $\bar{\Lambda}_{\gamma}.$ They can be interpreted as instrumental variables where the effect of estimation of $\gamma$ has been partialed out. When $\lambda_{0}(x)$ is orthogonal then $\psi(z,\beta
,\gamma,\lambda)=\lambda(x)\rho(z,\beta,\gamma)$ is LR:
<span style="font-variant:small-caps;">Theorem 9:</span> *If each row of* $\lambda_{0}(x)$ *is orthogonal to* $\bar{\Lambda}_{\gamma}$ *then the moment functions in equation (\[gcm\]) are LR.*
We also have a DR result:
<span style="font-variant:small-caps;">Theorem 10:</span> *If each row of* $\lambda_{0}(x)$ *is orthogonal to* $\bar{\Lambda}_{\gamma}$ *and* $\rho(z,\beta,\gamma
)$ *is affine in* $\gamma\in\Gamma$ *then the moment functions in equation (\[gcm\]) are DR for* $\Lambda=\{\lambda(x):$ ** $E[\lambda(x_{i})^{\prime}\rho(z_{i},\beta_{0},\gamma_{0})^{\prime}\rho
(z_{i},\beta_{0},\gamma_{0})\lambda(x_{i})]$.
There are many ways to construct orthogonal instruments. For instance, given a $r\times(J-1)$ matrix of instrumental variables $\lambda(x)$ one could construct corresponding orthogonal ones $\lambda_{0}(x_{i})$ as the matrix where each row of $\lambda(x)$ is replaced by the residual from the least squares projection of the corresponding row of $\lambda(x)$ on $\bar{\Lambda
}_{\gamma}$. For local identification of $\beta$ we also require that $$rank(\left. \partial E[\psi(z_{i},\beta,\gamma_{0})]/\partial\beta\right\vert
_{\beta=\beta_{0}})=\dim(\beta). \label{local id beta}$$
A model where $\beta_{0}$ is identified from semiparametric conditional moment restrictions with common instrumental variables is a special case where $x_{ji}$ is the same for each $j$. In this case there is a way to construct orthogonal instruments that leads to an efficient estimator of $\beta_{0}$. Let $\Sigma(x_{i})$ denote some positive definite matrix with its smallest eigenvalue bounded away from zero, so that $\Sigma(x_{i})^{-1}$ is bounded. Let $\left\langle a,b\right\rangle _{\Sigma}=E[a(x_{i})^{\prime}\Sigma
(x_{i})^{-1}b(x_{i})]$ denote an inner product and note that $\bar{\Lambda
}_{\gamma}$ is closed in this inner product by $\Sigma(x_{i})^{-1}$ bounded. Let $\tilde{\lambda}_{k}^{\Sigma}(x_{i},\lambda)$ denote the residual from the least squares projection of the $k^{th}$ row $\lambda\left( x\right)
^{\prime}e_{k}$ of $\lambda(x)$ on $\bar{\Lambda}_{\gamma}$ with the inner product $\left\langle a,b\right\rangle _{\Sigma}.$ Then for all $\Delta
\in\Gamma,$ $$E[\tilde{\lambda}_{k}^{\Sigma}(x_{i},\lambda)^{\prime}\Sigma(x_{i})^{-1}\bar{\rho}_{\gamma}(x_{i},\Delta)]=0,$$ so that for $\tilde{\lambda}^{\Sigma}(x_{i},\lambda)=[\tilde{\lambda}_{1}^{\Sigma}(x_{i},\lambda),...,\tilde{\lambda}_{r}^{\Sigma}(x_{i},\lambda)]$ the instrumental variables $\tilde{\lambda}^{\Sigma}(x_{i},\lambda
)\Sigma(x_{i})^{-1}$ are orthogonal. Also, $\tilde{\lambda}^{\Sigma}(x_{i},\lambda)$ can be interpreted as the solution to$$\min_{\{D(x):D(x)^{\prime}e_{k}\in\bar{\Lambda}_{\gamma},k=1,...,r\}}tr(E[\{\lambda(x_{i})-D(x_{i})\}\Sigma(x_{i})^{-1}\{\lambda(x_{i})-D(x_{i})\}^{\prime}])$$ where the minimization is in the positive semidefinite sense.
The orthogonal instruments that minimize the asymptotic variance of GMM in the class of GMM estimators with orthogonal instruments are given by$$\lambda_{0}^{\ast}(x)=\tilde{\lambda}^{\Sigma^{\ast}}(x,\lambda_{\beta})\Sigma^{\ast}(x)^{-1},\lambda_{\beta}(x_{i})=\left. \frac{\partial
E[\rho(z_{i},\beta,\gamma_{0})|x_{i}]}{\partial\beta}\right\vert _{\beta
=\beta_{0}}^{\prime},\Sigma^{\ast}(x_{i})=Var(\rho_{i}|x_{i}),\rho_{i}=\rho(z_{i},\beta_{0},\gamma_{0}).$$
<span style="font-variant:small-caps;">Theorem 11:</span> *The instruments* $\varphi^{\ast}(x_{i})$ *give an efficient estimator in the class of IV estimators with orthogonal instruments.*
The asymptotic variance of the GMM estimator with optimal orthogonal instruments is $$(E[m_{i}^{\ast}m_{i}^{\ast\prime}])^{-1}=E[\tilde{\lambda}(x_{i},\lambda
^{\ast},\Sigma^{\ast})\Sigma^{\ast}(x_{i})^{-1}\tilde{\lambda}(x_{i},\lambda^{\ast},\Sigma^{\ast})^{\prime}])^{-1}.$$ This matrix coincides with the semiparametric variance bound of Ai and Chen (2003). Estimation of the optimal orthogonal instruments is beyond the scope of this paper. The series estimator of Ai and Chen (2003) could be used for this.
This framework includes moment restrictions with a NPIV first step $\gamma$ satisfying $E[\rho(z_{i},\gamma_{0})|x_{i}]=0$ where we can specify $\rho
_{1}(z,\beta,\gamma)=m(z,\beta,\gamma),$ $x_{1i}=1,$ $\rho_{2}(z,\beta
,\gamma)=\rho(z,\gamma),$ and $x_{2i}=x_{i}.$ It generalizes that setup by allowing for more residuals $\rho_{j}(z,\beta,\gamma)$, $(j\geq3)$ and allowing all residuals to depend on $\beta.$
Asymptotic Theory
=================
In this Section we give simple and general asymptotic theory for LR estimators that incorporates the cross-fitting of equation (\[cfit\]). Throughout we use the structure of LR moment functions that are the sum $\psi(z,\beta
,\gamma,\lambda)=m(z,\beta,\gamma)+\phi(z,\beta,\gamma,\lambda)$ of an identifying or original moment function $m(z,\beta,\gamma)$ depending on a first step function $\gamma$ and an influence adjustment term $\phi
(z,\beta,\gamma,\lambda)$ that can depend on an additional first step $\lambda.$ The asymptotic theory will apply to any moment function that can be decomposed into a function of a single nonparametric estimator and a function of two nonparametric estimators. This structure and LR leads to particularly simple and general conditions.
The conditions we give are composed of mean square consistency conditions for first steps and one, two, or three rate conditions for quadratic remainders. We will only use one quadratic remainder rate for DR moment conditions, involving faster than $1/\sqrt{n}$ convergence of products of estimation errors for $\hat{\gamma}$ and $\hat{\lambda}.$ When $E[m(z_{i},\beta
_{0},\gamma)+\phi(z_{i},\beta_{0},\gamma,\lambda_{0})]$ is not affine in $\gamma$ we will impose a second rate condition that involves faster than $n^{-1/4}$ convergence of $\hat{\gamma}.$ When $E[\phi(z_{i},\gamma
_{0},\lambda)]$ is also not affine in $\lambda$ we will impose a third rate condition that involves faster than $n^{-1/4}$ convergence of $\hat{\lambda}.$ Most adjustment terms $\phi(z,\beta,\gamma,\lambda)$ of which we are aware, including for first step conditional moment restrictions and densities, have $E[\phi(z_{i},\beta_{0},\gamma_{0},\lambda)]$ affine in $\lambda,$ so that faster $n^{-1/4}$ convergence of $\hat{\lambda}$ will not be required under our conditions. It will suffice for most LR estimators which we know of to have faster than $n^{-1/4}$ convergence of $\hat{\gamma}$ and faster than $1/\sqrt{n}$ convergence of the product of estimation errors for $\hat{\gamma
}$ and $\hat{\lambda},$ with only the latter condition imposed for DR moment functions. We also impose some additional conditions for convergence of the Jacobian of the moments and sample second moments that give asymptotic normality and consistent asymptotic variance estimation for $\hat{\beta}$.
An important intermediate result for asymptotic normality is$$\sqrt{n}\hat{\psi}(\beta_{0})=\frac{1}{\sqrt{n}}\sum_{i=1}^{n}\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})+o_{p}(1), \label{no effec}$$ where $\hat{\psi}(\beta)$ is the cross-fit, sample, LR moments of equation (\[cfit\]). This result will mean that the presence of the first step estimators has no effect on the limiting distribution of the moments at the true $\beta_{0}$. To formulate conditions for this result we decompose the difference between the left and right-hand sides into several remainders. Let $\phi(z,\gamma,\lambda)=\phi(z,\beta_{0},\gamma,\lambda),$ $\bar{\phi}(\gamma,\lambda)=E[\phi(z_{i},\gamma,\lambda)],$ and $\bar{m}(\gamma
)=E[m(z_{i},\beta_{0},\gamma)],$ so that $\bar{\psi}(\gamma,\lambda)=\bar
{m}(\gamma)+\bar{\phi}(\gamma,\lambda)$ Then adding and subtracting terms gives $$\sqrt{n}[\hat{\psi}(\beta_{0})-\sum_{i=1}^{n}\psi(z_{i},\beta_{0},\gamma
_{0},\lambda_{0})/n]=\hat{R}_{1}+\hat{R}_{2}+\hat{R}_{3}+\hat{R}_{4},
\label{redecomp}$$ where$$\begin{aligned}
\hat{R}_{1} & =\frac{1}{\sqrt{n}}\sum_{i=1}^{n}[m(z_{i},\beta_{0},\hat{\gamma}_{i})-m(z_{i},\beta_{0},\gamma_{0})-\bar{m}(\hat{\gamma}_{i})]\label{remain}\\
& +\frac{1}{\sqrt{n}}\sum_{i=1}^{n}[\phi(z_{i},\hat{\gamma}_{i},\lambda
_{0})-\phi(z_{i},\gamma_{0},\lambda_{0})-\bar{\phi}(\hat{\gamma}_{i},\lambda_{0})+\phi(z_{i},\gamma_{0},\hat{\lambda}_{i})-\phi(z_{i},\gamma
_{0},\lambda_{0})-\bar{\phi}(\gamma_{0},\hat{\lambda}_{i})],\nonumber\\
\hat{R}_{2} & =\frac{1}{\sqrt{n}}\sum_{i=1}^{n}[\phi(z_{i},\hat{\gamma}_{i},\hat{\lambda}_{i})-\phi(z_{i},\hat{\gamma}_{i},\lambda_{0})-\phi
(z_{i},\gamma_{0},\hat{\lambda}_{i})+\phi(z_{i},\gamma_{0},\lambda
_{0})],\nonumber\\
\hat{R}_{3} & =\frac{1}{\sqrt{n}}\sum_{i=1}^{n}\bar{\psi}(\hat{\gamma}_{i},\lambda_{0}),\;\;\;\hat{R}_{4}=\frac{1}{\sqrt{n}}\sum_{i=1}^{n}\bar{\phi
}(\gamma_{0},\hat{\lambda}_{i}),\nonumber\end{aligned}$$
We specify regularity conditions sufficient for each of $\hat{R}_{1}$, $\hat{R}_{2}$, $\hat{R}_{3},$ and $\hat{R}_{4}$ to converge in probability to zero so that equation (\[no effec\]) will hold. The remainder term $\hat
{R}_{1}$ is a stochastic equicontinuity term as in Andrews (1994). We give mean square consistency conditions for $\hat{R}_{1}\overset{p}{\longrightarrow
}0$ in Assumption 3.
The remainder term $\hat{R}_{2}$ is a second order remainder that involves both $\hat{\gamma}$ and $\hat{\lambda}.$ When the influence adjustment is $\phi(z,\gamma,\lambda)=\lambda(x)[y-\gamma(w)],$ as for conditional moment restrictions, then$$\hat{R}_{2}=\frac{-1}{\sqrt{n}}\sum_{i=1}^{n}[\hat{\lambda}_{i}(x_{i})-\lambda_{0}(x_{i})][\hat{\gamma}_{i}(w_{i})-\gamma_{0}(w_{i})].$$ $\hat{R}_{2}$ will converge to zero when the product of convergence rates for $\hat{\lambda}_{i}(x_{i})$ and $\hat{\gamma}_{i}(w_{i})$ is faster than $1/\sqrt{n}.$ However, that is not the weakest possible condition. Weaker conditions for locally linear regression first steps are given by Firpo and Rothe (2017) and for series regression first steps by Newey and Robins (2017). These weaker conditions still require that the product of biases of $\hat{\lambda}_{i}(x_{i})$ and $\hat{\gamma}_{i}(w_{i})$ converge to zero faster than $1/\sqrt{n}$ but have weaker conditions for variance terms. We allow for these weaker conditions by allowing $\hat{R}_{2}\overset{p}{\longrightarrow}0$ as a regularity condition. Assumption 5 gives these conditions.
We will have $\hat{R}_{3}=\hat{R}_{4}=0$ in the DR case of Assumption 1, where $\hat{R}_{1}\overset{p}{\longrightarrow}0$ and $\hat{R}_{2}\overset{p}{\longrightarrow}0$ will suffice for equation (\[no effec\]). In non DR cases LR leads to $\bar{\psi}(\gamma,\lambda_{0})=\bar{m}(\gamma
)+\bar{\phi}(\gamma,\lambda_{0})$ having a zero functional derivative with respect to $\gamma$ at $\gamma_{0}$ so that $\hat{R}_{3}\overset{p}{\longrightarrow}0$ when $\hat{\gamma}_{i}$ converges to $\gamma_{0}$ at a rapid enough, feasible rate. For example if $\bar{\psi
}(\gamma,\lambda_{0})$ is twice continuously Frechet differentiable in a neighborhood of $\gamma_{0}$ for a norm $\left\Vert \cdot\right\Vert ,$ with zero Frechet derivative at $\gamma_{0}$. Then$$\left\vert \hat{R}_{3}\right\vert \leq C\sum_{\ell=1}^{L}\sqrt{n}\left\Vert
\hat{\gamma}_{\ell}-\gamma_{0}\right\Vert ^{2}\overset{p}{\longrightarrow}0$$ when $\left\Vert \hat{\gamma}-\gamma_{0}\right\Vert =o_{p}(n^{-1/4})$. Here $\hat{R}_{3}\overset{p}{\longrightarrow}0$ when each $\hat{\gamma}_{\ell}$ converges to $\gamma_{0}$ more quickly than $n^{-1/4}$. It may be possible to weaken this condition by bias correcting $m(z,\beta,\hat{\gamma}),$ as by the bootstrap in Cattaneo and Jansson (2017), by the jackknife in Cattaneo Ma and Jansson (2017), and by cross-fitting in Newey and Robins (2017). Consideration of such bias corrections for $m(z,\beta,\hat{\gamma})$ is beyond the scope of this paper.
In many cases $\hat{R}_{4}=0$ even though the moment conditions are not DR. For example that is true when $\hat{\gamma}$ is a pdf or when $\gamma_{0}$ estimates the solution to a conditional moment restriction. In such cases mean square consistency, $\hat{R}_{2}\overset{p}{\longrightarrow}0,$ and faster than $n^{-1/4}$ consistency of $\hat{\gamma}$ suffices for equation (\[no effec\]); no convergence rate for $\hat{\lambda}$ is needed. The simplification that $\hat{R}_{4}=0$ seems to be the result of $\lambda$ being a Riesz representer for the linear functional that is the derivative of $\bar{m}(\gamma)$ with respect to $\gamma.$ Such a Riesz representer will enter $\bar{\phi}(\lambda,\gamma_{0})$ linearly, leading to $\hat{R}_{4}=0.$ When $\hat{R}_{4}\neq0$ then $\hat{R}_{4}\overset{p}{\longrightarrow}0$ will follow from twice Frechet differentiability of $\bar{\phi}(\lambda,\gamma
_{0})$ in $\lambda$ and faster than $n^{-1/4}$ convergence of $\hat{\lambda}.$
All of the conditions can be easily checked for a wide variety of machine learning and conventional nonparametric estimators. There are well known conditions for mean square consistency for many conventional and machine learning methods. Rates for products of estimation errors are also know for many first step estimators as are conditions for $n^{-1/4}$ consistency. Thus, the simple conditions we give here are general enough to apply to a wide variety of first step estimators.
The first formal assumption of this section is sufficient for $\hat{R}_{1}\overset{p}{\longrightarrow}0.$
<span style="font-variant:small-caps;">Assumption 4:</span> *For each* $\ell=1,...,L$*, i) Either* $m(z,\beta_{0},\gamma)$ *does not depend on* $z$ *or* $\int\{m(z,\beta_{0},\hat{\gamma}_{\ell})-m(z,\beta_{0},\gamma_{0})\}^{2}F_{0}(dz)\overset{p}{\longrightarrow}0,$ *ii)* $\int\{\phi
(z,\hat{\gamma}_{\ell},\lambda_{0})-\phi(z,\gamma_{0},\lambda_{0})\}^{2}F_{0}(dz)\overset{p}{\longrightarrow}0,$ *and* $\int\{\phi(z,\gamma
_{0},\hat{\lambda}_{\ell})-\phi(z,\gamma_{0},\lambda_{0})\}^{2}F_{0}(dz)\overset{p}{\longrightarrow}0;$
The cross-fitting used in the construction of $\hat{\psi}(\beta_{0})$ is what makes the mean-square consistency conditions of Assumption 4 sufficient for $\hat{R}_{1}\overset{p}{\longrightarrow}0$. The next condition is sufficient for $\hat{R}_{2}\overset{p}{\longrightarrow}0.$
<span style="font-variant:small-caps;">Assumption 5:</span> *For each* $\ell=1,...,L$*, either i)*$$\sqrt{n}\int\max_{j}|\phi_{j}(z,\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\phi_{j}(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi_{j}(z,\hat{\gamma}_{\ell
},\lambda_{0})+\phi_{j}(z,\gamma_{0},\lambda_{0})|F_{0}(dz)\overset{p}{\longrightarrow}0$$ *or ii)* $\hat{R}_{2}\overset{p}{\longrightarrow}0.$
As previously discussed, this condition allows for just $\hat{R}_{2}\overset{p}{\longrightarrow}0$ in order to allow the weak regularity conditions of Firpo and Rothe (2017) and Newey and Robins (2017). The first result of this Section shows that Assumptions 4 and 5 are sufficient for equation (*\[no effec\]*) when the moment functions are DR.
<span style="font-variant:small-caps;">Lemma 12:</span> *If Assumption 1 is satisfied, with probability approaching one* $\hat{\gamma}\in\Gamma$*,* $\hat{\lambda}\in\Lambda
,$ *and Assumptions 4 and 5 are satisfied then equation (\[no effec\]) is satisfied.*
An important class of DR estimators are those from equation (\[drlin\]). The following result gives conditions for asymptotic linearity of these estimators:
<span style="font-variant:small-caps;">Theorem 13:</span> *If a) Assumptions 2 and 4 i) are satisfied with* $\hat{\gamma}\in\Gamma$ *and* $\hat{\lambda}\in\Lambda$ *with probability approaching one; b)* $\lambda_{0}(x_{i})$ *and* $E[\{y_{i}-\gamma_{0}(w_{i})\}^{2}|x_{i}]$ *are bounded; c) for each* $\ell=1,...,L$*,* $\int[\hat{\gamma}_{\ell}(w)-\gamma_{0}(w)]^{2}F_{0}(dz)\overset{p}{\longrightarrow}0,$ ** $\int[\hat{\lambda}_{\ell
}(x)-\lambda_{0}(x)]^{2}F_{0}(dz)$ ** $\overset{p}{\longrightarrow}0$*, and either*$$\sqrt{n}\left\{ \int[\hat{\gamma}_{\ell}(w)-\gamma_{0}(w)]^{2}F_{0}(dw)\right\} ^{1/2}\left\{ \int[\hat{\lambda}_{\ell}(x)-\lambda_{0}(x)]^{2}F_{0}(dx)\right\} ^{1/2}\mathit{\ }\overset{p}{\longrightarrow}0$$ *or*$$\frac{1}{\sqrt{n}}\sum_{i\in I_{\ell}}\{\hat{\gamma}_{\ell}(w_{i})-\gamma
_{0}(w_{i})\}\{\hat{\lambda}_{\ell}(x_{i})-\lambda_{0}(x_{i})\}\overset{p}{\longrightarrow}0;$$ *then*$$\sqrt{n}(\hat{\beta}-\beta_{0})=\frac{1}{\sqrt{n}}\sum_{i=1}^{n}[g(z_{i},\gamma_{0})-\beta_{0}+\lambda_{0}(x_{i})\{y_{i}-\gamma_{0}(w_{i})\}]+o_{p}(1).$$
The conditions of this result are simple, general, and allow for machine learning first steps. Conditions a) and b) simply require mean square consistency of the first step estimators $\hat{\gamma}$ and $\hat{\lambda}.$ The only convergence rate condition is c), which requires a product of estimation errors for the two first steps to go to zero faster than $1/\sqrt{n}$. This condition allows for a trade-off in convergence rates between the two first steps, and can be satisfied even when one of the two rates is not very fast. This trade-off can be important when $\lambda_{0}(x)$ is not continuous in one of the components of $x$, as in the surplus bound example. Discontinuity in $x$ can limit that rate at which $\lambda_{0}(x)$ can be estimated. This result extends the results of Chernozhukov et al. (2018) and Farrell (2015) for DR estimators of treatment effects to the whole novel class of DR estimators from equation (\[drlin\]) with machine learning first steps. In interesting related work, Athey et al. (2016) show root-n consistent estimation of an average treatment effect is possible under very weak conditions on the propensity score, under strong sparsity of the regression function. Thus, for machine learning the conditions here and in Athey et al. (2016) are complementary and one may prefer either depending on whether or not the regression function can be estimated extremely well based on a sparse method. The results here apply to many more DR moment conditions.
DR moment conditions have the special feature that $\hat{R}_{3}$ and $\hat
{R}_{4}$ in Proposition 4 are equal to zero. For estimators that are not DR we impose that $\hat{R}_{3}$ and $\hat{R}_{4}$ converge to zero.
<span style="font-variant:small-caps;">Assumption 6:</span> *For each* $\ell=1,...,L$*, i)* $\sqrt
{n}\bar{\psi}(\hat{\gamma}_{\ell},\lambda_{0})\overset{p}{\longrightarrow}0$ *and ii)* $\sqrt{n}\bar{\phi}(\gamma_{0},\hat{\lambda}_{\ell
})\overset{p}{\longrightarrow}0.$
Assumption 6 requires that $\hat{\gamma}$ converge to $\gamma_{0}$ rapidly enough but places no restrictions on the convergence rate of $\hat{\lambda}$ when $\bar{\phi}(\gamma_{0},\hat{\lambda}_{\ell})=0.$
<span style="font-variant:small-caps;">Lemma 14:</span> *If Assumptions 4-6 are satisfied then equation (\[no effec\]) is satisfied.*
Assumptions 4-6 are based on the decomposition of LR moment functions into an identifying part and an influence function adjustment. These conditions differ from other previous work in semiparametric estimation, as in Andrews (1994), Newey (1994), Newey and McFadden (1994), Chen, Linton, and van Keilegom (2003), Ichimura and Lee (2010), Escanciano et al. (2016), and Chernozhukov et al. (2018), that are not based on this decomposition. The conditions extend Chernozhukov et. al. (2018) to many more DR estimators and to estimators that are nonlinear in $\hat{\gamma}$ but only require a convergence rate for $\hat{\gamma}$ and not for $\hat{\lambda}$.
This framework helps explain the potential problems with “plugging in” a first step machine learning estimator into a moment function that is not LR. Lemma 14 implies that if Assumptions 4-6 are satisfied for some $\hat{\lambda}$ then $\sqrt{n}\hat{m}(\beta_{0})-\sum_{i=1}^{n}\psi(z_{i},\beta_{0},\gamma
_{0})/\sqrt{n}\overset{p}{\longrightarrow}0$ if and only if$$\hat{R}_{5}=\frac{1}{\sqrt{n}}\sum_{i=1}^{n}\phi(z_{i},\hat{\gamma},\hat{\lambda})\overset{p}{\longrightarrow}0. \label{plugin}$$ The plug-in method will fail when this equation does not hold. For example, suppose $\gamma_{0}=E[y|x]$ so that by Proposition 4 of Newey (1994),$$\frac{1}{\sqrt{n}}\sum_{i=1}^{n}\phi(z_{i},\hat{\gamma},\hat{\lambda})=\frac{-1}{\sqrt{n}}\sum_{i=1}^{n}\hat{\lambda}_{i}(x_{i})[y_{i}-\hat{\gamma
}_{i}(x_{i})].$$ Here $\hat{R}_{5}\overset{p}{\longrightarrow}0$ is an approximate orthogonality condition between the approximation $\hat{\lambda}_{i}(x_{i})$ to $\lambda_{0}(x_{i})$ and the nonparametric first stage residuals $y_{i}-\hat{\gamma}_{i}(x_{i}).$ Machine learning uses model selection in the construction of $\hat{\gamma}_{i}(x_{i}).$ If the model selected by $\hat{\gamma}_{i}(x_{i})$ to approximate $\gamma_{0}(x_{i})$ is not rich (or dense) enough to also approximate $\lambda_{0}(x_{i})$ then $\hat{\lambda}_{i}(x_{i})$ need not be approximately orthogonal to $y_{i}-\hat{\gamma}_{i}(x_{i})$ and $\hat{R}_{5}$ need not converge to zero. In particular, if the variables selected to be used to approximate $\gamma_{0}(x_{i})$ cannot be used to also approximate $\lambda_{0}(x_{i})$ then the approximate orthogonality condition can fail. This phenomenon helps explain the poor performance of the plug-in estimator shown in Belloni, Chernozhukov, and Hansen (2014) and Chernozhukov et al. (2017, 2018). The plug-in estimator can be root-n consistent if the only thing being selected is an overall order of approximation, as in the series estimation results of Newey (1994). General conditions for root-n consistency of the plug-in estimator can be formulated using Assumptions 4-6 and $\hat{R}_{2}\overset{p}{\longrightarrow}0,$ which we do in Appendix D.
Another component of an asymptotic normality result is convergence of the Jacobian term $\partial\hat{\psi}(\beta)/\partial\beta$ to $M=\left.
E[\partial\psi(z_{i},\beta,\gamma_{0},\lambda_{0})/\partial\beta\right\vert
_{\beta=\beta_{0}}].$ We impose the following condition for this purpose.
<span style="font-variant:small-caps;">Assumption 7:</span> $M\,$*exists and there is a neighborhood* $\mathcal{N}$ *of* $\beta_{0}$ *and* $\left\Vert \cdot
\right\Vert $ *such that i) for each* $\ell,$ $\left\Vert \hat{\gamma
}_{\ell}-\gamma_{0}\right\Vert \overset{p}{\longrightarrow}0,$ $\left\Vert
\hat{\lambda}_{\ell}-\lambda_{0}\right\Vert \overset{p}{\longrightarrow}0;$ *ii)* for all $\left\Vert \gamma-\gamma_{0}\right\Vert $ and $\left\Vert \lambda-\lambda_{0}\right\Vert $ small enough $\psi(z_{i},\beta,\gamma,\lambda)$ *is differentiable in* $\beta$ *on* $\mathcal{N}$ *with probability approaching* $1$ *iii) there is* $\zeta^{\prime}>0$ *and* $d(z_{i})$ *with* $E[d(z_{i})]<\infty
$ *such that for* $\beta\in N$ *and* $\left\Vert \gamma
-\gamma_{0}\right\Vert $ *small enough* $$\left\Vert \frac{\partial\psi(z_{i},\beta,\gamma,\lambda)}{\partial\beta
}-\frac{\partial\psi(z_{i},\beta_{0},\gamma,\lambda)}{\partial\beta
}\right\Vert \leq d(z_{i})\left\Vert \beta-\beta_{0}\right\Vert ^{\zeta
^{\prime}};$$ *iii) For each* $\ell=1,...,L,$ $j,$ and $k$, $\int\left\vert
\partial\psi_{j}(z,\beta_{0},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell
})/\partial\beta_{k}-\partial\psi_{j}(z,\beta_{0},\gamma_{0},\lambda
_{0})/\partial\beta_{k}\right\vert F_{0}(dz)\overset{p}{\longrightarrow}0,$
The following intermediate result gives Jacobian convergence.
<span style="font-variant:small-caps;">Lemma 15:</span> *If Assumption 7 is satisfied then for any* $\bar{\beta}\overset{p}{\longrightarrow}\beta_{0},$ ** $\hat{\psi}(\beta)$ *is differentiable at* $\bar{\beta}$ *with probability approaching one and* $\partial\hat{\psi}(\bar{\beta})/\partial\beta
\overset{p}{\longrightarrow}M.$
With these results in place the asymptotic normality of semiparametric GMM follows in a standard way.
<span style="font-variant:small-caps;">Theorem 16:</span> *If Assumptions 4-7 are satisfied,* $\hat{\beta
}\overset{p}{\longrightarrow}\beta_{0},$ ** $\hat{W}\overset{p}{\longrightarrow}W$*,* $M^{\prime}WM$ *is nonsingular, and* $E[\left\Vert \psi(z_{i},\beta_{0},\gamma_{0},\lambda
_{0})\right\Vert ^{2}]<\infty$ *then for* $\Omega=E[\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})\psi(z_{i},\beta_{0},\gamma_{0},\lambda
_{0})^{\prime}],$$$\sqrt{n}(\hat{\beta}-\beta_{0})\overset{d}{\longrightarrow}N(0,V),V=(M^{\prime
}WM)^{-1}M^{\prime}W\Omega WM(M^{\prime}WM)^{-1}.$$
It is also useful to have a consistent estimator of the asymptotic variance of $\hat{\beta}$. As usual such an estimator can be constructed as$$\begin{aligned}
\hat{V} & =(\hat{M}^{\prime}\hat{W}\hat{M})^{-1}\hat{M}^{\prime}\hat{W}\hat{\Omega}\hat{W}\hat{M}(\hat{M}^{\prime}\hat{W}\hat{M})^{-1},\\
\hat{M} & =\frac{\partial\hat{\psi}(\hat{\beta})}{\partial\beta},\hat
{\Omega}=\frac{1}{n}\sum_{\ell=1}^{L}\sum_{i\in\mathcal{I}_{\ell}}\psi
(z_{i},\hat{\beta},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})\psi(z_{i},\hat{\beta},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})^{\prime}.\end{aligned}$$ Note that this variance estimator ignores the estimation of $\gamma$ and $\lambda$ which works here because the moment conditions are LR. The following result gives conditions for consistency of $\hat{V}.$
<span style="font-variant:small-caps;">Theorem 17:</span> *If Assumptions 4 and 7 are satisfied with* $E[b(z_{i})^{2}]<\infty,$ ** $M^{\prime}WM$ *is nonsingular, and* $$\int\left\Vert \phi(z,\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\phi
(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi(z,\hat{\gamma}_{\ell},\lambda
_{0})+\phi(z,\gamma_{0},\lambda_{0})\right\Vert ^{2}F_{0}(dz)\overset{p}{\longrightarrow}0$$ *then* $\hat{\Omega}\overset{p}{\longrightarrow}\Omega$ *and* $\hat{V}\overset{p}{\longrightarrow}V.$
In this section we have used cross-fitting and a decomposition of moment conditions into identifying and influence adjustment components to formulate simple and general conditions for asymptotic normality of LR GMM estimators. For reducing higher order bias and variance it may be desirable to let the number of groups grow with the sample size. That case is beyond the scope of this paper.
Appendix A: Proofs of Theorems
==============================
**Proof of Theorem 1:** By ii) and iii), $$0=(1-\tau)\int\phi(z,F_{\tau})F_{0}(dz)+\tau\int\phi(z,F_{\tau})G(dz).$$ Dividing by $\tau$ and solving gives$$\frac{1}{\tau}\int\phi(z,F_{\tau})F_{0}(dz)=-\int\phi(z,F_{\tau})G(dz)+\int\phi(z,F_{\tau})F_{0}(z).$$ Taking limits as $\tau\longrightarrow0$, $\tau>0$ and using i) gives$$\frac{d}{d\tau}\int\phi(z,F_{\tau})F_{0}(dz)=-\int\phi(z,F_{0})G(dz)+0=-\frac
{d\mu(F_{\tau})}{d\tau}.\text{ }Q.E.D.$$
**Proof of Theorem 2**: We begin by deriving $\phi_{1},$ the adjustment term for the first step CCP estimation. We use the definitions given in the body of the paper. We also let$$\begin{aligned}
P_{\tilde{v}j}(\tilde{v}) & =\partial P(\tilde{v})/\partial\tilde{v}_{j},\text{ }\pi_{1}=\Pr(y_{t1}=1),\text{ }\lambda_{10}(x)=E[y_{1t}|x_{t+1}=x],\\
\lambda_{j0}(x) & =E[A(x_{t})P_{\tilde{v}j}(\tilde{v}_{t})\frac{y_{tj}}{P_{j}(\tilde{v}_{t})}|x_{t+1}=x],(j=2,...,J).\end{aligned}$$ Consider a parametric submodel as described in Section 4 and let $\gamma
_{1}(x,\tau)$ denote the conditional expectation of $y_{t}$ given $x_{t}$ under the parametric submodel. Note that for $\tilde{v}_{t}=\tilde{v}(x_{t}),$$$\begin{aligned}
& E[A(x_{t})P_{\tilde{v}j}(\tilde{v}_{t})\frac{\partial E[H(\gamma
_{1}(x_{t+1},\tau))|x_{t},y_{tj}=1]}{\partial\tau}]\\
& =\frac{\partial}{\partial\tau}E[A(x_{t})P_{vj}(\tilde{v}_{t})\frac{y_{tj}}{P_{j}(\tilde{v}_{t})}H(\gamma_{1}(x_{t+1},\tau))]\\
& =\frac{\partial}{\partial\tau}E[E[A(x_{t})P_{vj}(\tilde{v}_{t})\frac
{y_{tj}}{P_{j}(\tilde{v}_{t})}|x_{t+1}]H(\gamma_{1}(x_{t+1},\tau))]\\
& =\frac{\partial}{\partial\tau}E[\lambda_{j0}(x_{t+1})H(\gamma_{1}(x_{t+1},\tau))]=\frac{\partial}{\partial\tau}E[\lambda_{j0}(x_{t})H(\gamma_{1}(x_{t},\tau))]\\
& =E[\lambda_{j0}(x_{t})\frac{\partial H(\gamma_{10}(x_{t}))}{\partial
P}^{\prime}\frac{\partial\gamma_{1}(x_{t},\tau)}{\partial\tau}]=E[\lambda
_{j0}(x_{t})\frac{\partial H(\gamma_{10}(x_{t}))}{\partial P}^{\prime}\{y_{t}-\gamma_{10}(x_{t})\}S(z_{t})].\end{aligned}$$ where the last (sixth) equality follows as in Proposition 4 of Newey (1994a), and the fourth equality follows by equality of the marginal distributions of $x_{t}$ and $x_{t+1}$. Similarly, for $\pi_{1}=\Pr(y_{t1}=1)$ and $\lambda_{10}(x)=E[y_{1t}|x_{t+1}=x]$ we have$$\begin{aligned}
\frac{\partial E[H(\gamma_{1}(x_{t+1},\tau))|y_{t1}=1]}{\partial\tau} &
=\frac{\partial E[\pi_{1}^{-1}y_{1t}H(\gamma_{1}(x_{t+1},\tau))]}{\partial
\tau}=\frac{\partial E[\pi_{1}^{-1}\lambda_{10}(x_{t+1})H(\gamma_{1}(x_{t+1},\tau))]}{\partial\tau}\\
& =\frac{\partial E[\pi_{1}^{-1}\lambda_{10}(x_{t})H(\gamma_{1}(x_{t},\tau))]}{\partial\tau}\\
& =E[\pi_{1}^{-1}\lambda_{10}(x_{t})\frac{\partial H(\gamma_{10}(x_{t}))}{\partial P}^{\prime}\{y_{t}-\gamma_{10}(x_{t})\}S(z_{t})]\end{aligned}$$ Then combining terms gives$$\begin{aligned}
& \frac{\partial E[m(z_{t},\beta_{0},\gamma_{1}(\tau),\gamma_{-10})]}{\partial\tau}\\
& =-\delta\sum_{j=2}^{J}\{E[A(x_{t})P_{vj}(\tilde{v}_{t})\frac{\partial
E[H(\gamma_{1}(x_{t+1},\tau))|x_{t},y_{tj}=1]}{\partial\tau}]\\
& -E[A(x_{t})P_{vj}(\tilde{v}_{t})]\frac{\partial E[H(\gamma_{1}(x_{t+1},\tau))|y_{t1}=1]}{\partial\tau}\}\\
& =-\delta\sum_{j=2}^{J}E[\{\lambda_{j0}(x_{t})-E[A(x_{t})P_{\tilde{v}j}(\tilde{v}_{t})]\pi_{1}^{-1}\lambda_{10}(x_{t})\}\frac{\partial
H(\gamma_{10}(x_{t}))}{\partial P}^{\prime}\{y_{t}-\gamma_{10}(x_{t})\}S(z_{t})]\\
& =E[\phi_{1}(z_{t},\beta_{0},\gamma_{0},\lambda_{0})S(z_{t})].\end{aligned}$$
Next, we show the result for $\phi_{j}(z,\beta,\gamma,\lambda)$ for $2\leq
j\leq J.$ As in the proof of Proposition 4 of Newey (1994a), for any $w_{t}$ we have$$\frac{\partial}{\partial\tau}E[w_{t}|x_{t},y_{tj}=1,\tau]=E[\frac{y_{tj}}{P_{j}(\tilde{v}_{t})}\{w_{t}-E[w_{t}|x_{t},y_{tj}=1]\}S(z_{t})|x_{t}].$$ It follows that$$\begin{aligned}
\frac{\partial E[m(z_{t},\beta_{0},\gamma_{j}(\tau),\gamma_{-j,0})]}{\partial\tau} & =-\delta E[A(x_{t})P_{vj}(\tilde{v}_{t})\frac{\partial
E[u_{1,t+1}+H_{t+1}|x_{t},y_{tj}=1,\tau]}{\partial\tau}]\\
& =-\delta\frac{\partial}{\partial\tau}E[E[A(x_{t})P_{vj}(\tilde{v}_{t})\{u_{1,t+1}+H_{t+1}\}|x_{t},y_{tj}=1,\tau]].\\
& =-\delta E[A(x_{t})P_{vj}(\tilde{v}_{t})\frac{y_{tj}}{P_{j}(\tilde{v}_{t})}\{u_{1,t+1}+H_{t+1}-\gamma_{j0}(x_{t},\beta_{0},\gamma_{1})\}S(z_{t})]\\
& =E[\phi_{j}(z_{t},\beta_{0},\gamma_{0},\lambda_{0})S(z_{t})],\end{aligned}$$ showing that the formula for $\phi_{j}$ is correct. The proof for $\phi_{J+1}$ follows similarly. *Q.E.D.*
**Proof of Theorem 3:** Given in text.
**Proof of Theorem 4:** Given in text.
**Proof of Theorem 5:** Let $\bar{\psi}(\gamma,\lambda)=E[\psi
(z_{i},\beta_{0},\gamma,\lambda)]$. Suppose that $\psi(z,\beta,\gamma
,\lambda)$ is DR. Then for any $\gamma\neq\gamma_{0},\gamma\in\Gamma$ we have$$0=\bar{\psi}(\gamma,\lambda_{0})=\bar{\psi}(\gamma_{0},\lambda_{0})=\bar{\psi
}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0}),$$ for any $\tau.$ Therefore for any $\tau$,$$\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0})=0=(1-\tau)\bar{\psi
}(\gamma_{0},\lambda_{0})+\tau\bar{\psi}(\gamma,\lambda_{0}),$$ so that $\bar{\psi}(\gamma,\lambda_{0})$ is affine in $\gamma.$ Also by the previous equation $\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0})=0$ identically in $\tau$ so that $$\frac{\partial}{\partial\tau}\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma
,\lambda_{0})=0,$$ where the derivative with respect to $\tau$ is evaluated at $\tau=0.$ Applying the same argument switching of $\lambda$ and $\gamma$ we find that $\bar{\psi
}(\gamma_{0},\lambda)$ is affine in $\lambda$ and $\partial\bar{\psi}(\gamma_{0},(1-\tau)\lambda_{0}+\tau\lambda)/\partial\tau=0.$
Next suppose that $\bar{\psi}(\gamma,\lambda_{0})$ is affine $\gamma$ and $\partial\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0})/\partial
\tau=0.$ Then by $\bar{\psi}(\gamma_{0},\lambda_{0})=0$, for any $\gamma
\in\Gamma,$ $$\begin{aligned}
\bar{\psi}(\gamma,\lambda_{0}) & =\partial\lbrack\tau\bar{\psi}(\gamma,\lambda_{0})]/\partial\tau=\partial\lbrack(1-\tau)\bar{\psi}(\gamma_{0},\lambda_{0})+\tau\bar{\psi}(\gamma,\lambda_{0})]/\partial\tau\\
& =\partial\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0})/\partial
\tau=0.\end{aligned}$$ Switching the roles of $\gamma$ and $\lambda$ it follows analogously that $\bar{\psi}(\gamma_{0},\lambda)=0$ for all $\lambda\in\Lambda,$ so $\bar{\psi
}(\gamma,\lambda)$ is doubly robust. *Q.E.D.*
**Proof of Theorem 6:** Let $\lambda_{0}(x)=-c^{\prime}\Pi^{-1}a(x)$ so that $E[\lambda_{0}(x_{i})|w_{i}]=-c^{\prime}\Pi^{-1}\Pi p(w_{i})=-c^{\prime
}p(w_{i}).$Then integration by parts gives$$\begin{aligned}
E[m(z_{i},\beta_{0},\tilde{\gamma})] & =E[c^{\prime}p(w_{i})\{\tilde{\gamma
}(w_{i})-\gamma_{0}(w_{i})\}]=-E[\gamma_{0}(x_{i})\{\tilde{\gamma}(w_{i})-\gamma_{0}(w_{i})\}]\\
& =E[\gamma_{0}(x_{i})\{y_{i}-\tilde{\gamma}(w_{i})\}]=-c^{\prime}\Pi
^{-1}E[a(x_{i})\{y_{i}-\tilde{\gamma}(w_{i})\}]=0.\text{ }Q.E.D.\end{aligned}$$
**Proof of Theorem 7:** If $\lambda_{0}$ is identified then $m(z,\beta,\bar{\gamma},\lambda_{0})$ is identified for every $\beta$. By DR$$E[m(z_{i},\beta,\bar{\gamma},\lambda_{0})]=0$$ at $\beta=\beta_{0}$ and by assumption this is the only $\beta$ where this equation is satisfied. *Q.E.D.*
**Proof of Corollary 8:** Given in text.
**Proof of Theorem 9:** Note that for $\rho_{i}=\rho(z_{i},\beta
_{0},\gamma_{0}),$$$\bar{\psi}(\gamma_{0},(1-\tau)\lambda_{0}+\tau\lambda)]=(1-\tau)E[\lambda
_{0}(x_{i})\rho_{i}]+\tau E[\lambda(x_{i})\rho_{i}]=0. \label{th9proof}$$ Differentiating gives the second equality in eq. (\[lrdef2\]). Also, for $\Delta=\gamma-\gamma_{0},$$$\frac{\partial\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0})}{\partial\tau}=E[\lambda_{0}(x_{i})\bar{\rho}(x_{i},\Delta)]=0,$$ giving the first equality in eq. (\[lrdef2\]). *Q.E.D.*
**Proof of Theorem 10:** The first equality in eq. (\[th9proof\]) of the proof of Theorem 9 shows that $\bar{\psi}(\gamma_{0},\lambda)$ is affine in $\lambda$. Also,$$\bar{\psi}((1-\tau)\gamma_{0}+\tau\gamma,\lambda_{0})=E[\lambda_{0}(x_{i})\{(1-\tau)\rho(z_{i},\beta_{0},\gamma_{0})+\tau\rho(z_{i},\beta
_{0},\gamma)\}]=(1-\tau)\bar{\psi}(\gamma_{0},\lambda_{0})+\tau\bar{\psi
}(\gamma,\lambda_{0}),$$ so that $\bar{\psi}(\gamma,\lambda_{0})$ is affine in $\gamma.$ The conclusion then follows by Theorem 5. *Q.E.D.*
**Proof of Theorem 11:** To see that $\tilde{\lambda}^{\Sigma^{\ast}}(x_{i},\lambda^{\ast})\Sigma^{\ast}(x_{i})^{-1}$ minimizes the asymptotic variance note that for any orthogonal instrumental variable matrix $\lambda_{0}(x),$ by the rows of $\lambda_{\beta}(x_{i})-\tilde{\lambda
}^{\Sigma^{\ast}}(x_{i},\lambda_{\beta})$ being in $\bar{\Lambda}_{\gamma},$ $$M=E[\lambda_{0}(x_{i})\lambda_{\beta}(x_{i})^{\prime}]=E[\lambda_{0}(x_{i})\tilde{\lambda}^{\Sigma^{\ast}}(x_{i},\lambda_{\beta})^{\prime
}]=E[\lambda_{0}(x_{i})\rho_{i}\rho_{i}^{\prime}\Sigma^{\ast}(x_{i})^{-1}\tilde{\lambda}^{\Sigma^{\ast}}(x_{i},\lambda_{\beta})^{\prime}].$$ Since the instruments are orthogonal the asymptotic variance matrix of the GMM estimator with $\hat{W}\overset{p}{\longrightarrow}W$ is the same as if $\hat{\gamma}=\gamma_{0}.$ Define $m_{i}=M^{\prime}W\lambda_{0}(x_{i})\rho
_{i}$ and $m_{i}^{\ast}=\tilde{\lambda}^{\Sigma^{\ast}}(x_{i},\lambda_{\beta
})\Sigma^{\ast}(x_{i})^{-1}\rho_{i}.$ The asymptotic variance of the GMM estimator for orthogonal instruments $\lambda_{0}(x)$ is$$(M^{\prime}WM)^{-1}M^{\prime}WE[\lambda_{0}(x_{i})\rho_{i}\rho_{i}^{\prime
}\lambda_{0}(x_{i})^{\prime}]WM(M^{\prime}WM)^{-1}=(E[m_{i}m_{i}^{\ast\prime
}])^{-1}E[m_{i}m_{i}^{\prime}](E[m_{i}m_{i}^{\ast}])^{-1\prime}.$$ The fact that this matrix is minimized in the positive semidefinite sense for $m_{i}=m_{i}^{\ast}$ is well known, e.g. see Newey and McFadden (1994). *Q.E.D.*
The following result is useful for the results of Section 7:
<span style="font-variant:small-caps;">Lemma A1:</span> *If Assumption 4 is satisfied then* $\hat{R}_{1}\overset{p}{\longrightarrow}0.$ *If Assumption 5 is satisfied then* $\hat{R}_{2}\overset{p}{\longrightarrow}0.$
Proof: Define $\hat{\Delta}_{i\ell}=m(z_{i},\hat{\gamma}_{\ell})-m(z_{i},\gamma_{0})-\bar{m}(\hat{\gamma}_{\ell})$ for $i\in I_{\ell}$ and let $Z_{\ell}^{c}$ denote the observations $z_{i}$ for $i\notin I_{\ell}$. Note that $\hat{\gamma}_{\ell}$ depends only on $Z_{\ell}^{c}$. By construction and independence of $Z_{\ell}^{c}$ and $z_{i},i\in I_{\ell}$ we have $E[\hat{\Delta}_{i\ell}|Z_{\ell}^{c}]=0.$ Also by independence of the observations, $E[\hat{\Delta}_{i\ell}\hat{\Delta}_{j\ell}|Z_{\ell}^{c}]=0$ for $i,j\in I_{\ell}.$ Furthermore, for $i\in I_{\ell}$ $E[\hat{\Delta}_{i\ell
}^{2}|Z_{\ell}^{c}]\leq\int[m(z,\hat{\gamma}_{\ell})-m(z,\gamma_{0})]^{2}F_{0}(dz)$. Then we have $$\begin{aligned}
E[\left( \frac{1}{\sqrt{n}}\sum_{i\in I_{\ell}}\hat{\Delta}_{i\ell}\right)
^{2}|Z_{\ell}^{c}] & =\frac{1}{n}E[\left( \sum_{i\in I_{\ell}}\hat{\Delta
}_{i\ell}\right) ^{2}|Z_{\ell}^{c}]=\frac{1}{n}\sum_{i\in I_{\ell}}E[\hat{\Delta}_{i\ell}^{2}|Z_{\ell}^{c}]\\
& \leq\int[m(z,\hat{\gamma}_{\ell})-m(z,\gamma_{0})]^{2}F_{0}(dz)\overset{p}{\longrightarrow}0.\end{aligned}$$ The conditional Markov inequality then implies that $\sum_{i\in I_{\ell}}\hat{\Delta}_{i\ell}/\sqrt{n}\overset{p}{\longrightarrow}0.$ The analogous results also hold for $\hat{\Delta}_{i\ell}=\phi(z_{i},\hat{\gamma}_{\ell
},\lambda_{0})-\phi(z_{i},\gamma_{0},\lambda_{0})-\bar{\phi}(\hat{\gamma
}_{\ell},\lambda_{0})$ and $\hat{\Delta}_{i\ell}=\phi(z_{i},\gamma_{0},\hat{\lambda}_{\ell})-\phi(z_{i},\gamma_{0},\lambda_{0})-\bar{\phi}(\gamma_{0},\hat{\lambda}_{\ell})$. Summing across these three terms and across $\ell=1,...,L$ gives the first conclusion.
For the second conclusion, note that under the first hypothesis of Assumption 5,$$\begin{aligned}
& E[\left\vert \frac{1}{\sqrt{n}}\sum_{i\in I_{\ell}}[\phi_{j}(z_{i},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\phi_{j}(z_{i},\gamma_{0},\hat{\lambda}_{\ell})-\phi_{j}(z_{i},\hat{\gamma}_{\ell},\lambda_{0})+\phi_{j}(z_{i},\gamma_{0},\lambda_{0})]\right\vert |Z_{\ell}^{c}]\\
& \leq\frac{1}{\sqrt{n}}\sum_{i\in I_{\ell}}E[\left\vert \phi_{j}(z_{i},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\phi_{j}(z_{i},\gamma_{0},\hat{\lambda}_{\ell})-\phi_{j}(z_{i},\hat{\gamma}_{\ell},\lambda_{0})+\phi_{j}(z_{i},\gamma_{0},\lambda_{0})\right\vert |Z_{\ell}^{c}]\\
& \leq\sqrt{n}\int\left\vert \phi_{j}(z,\hat{\gamma}_{\ell},\hat{\lambda
}_{\ell})-\phi_{j}(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi_{j}(z,\hat{\gamma
}_{\ell},\lambda_{0})+\phi_{j}(z_{i},\gamma_{0},\lambda_{0})\right\vert
F_{0}(dz)\overset{p}{\longrightarrow}0,\end{aligned}$$ so $\hat{R}_{2}\overset{p}{\longrightarrow}0$ follows by the conditional Markov and triangle inequalities. The second hypothesis of Assumption 5 is just $\hat{R}_{2}\overset{p}{\longrightarrow}0.$ $Q.E.D.$
**Proof of Lemma 12**: By Assumption 1 and the hypotheses that $\hat{\gamma}_{i}\in\Gamma$ and $\hat{\lambda}_{i}\in\Lambda$ we have $\hat
{R}_{3}=\hat{R}_{4}=0.$ By Lemma A1 we have $\hat{R}_{1}\overset{p}{\longrightarrow}0$ and $\hat{R}_{2}\overset{p}{\longrightarrow}0.$ The conclusion then follows by the triangle inequality. $Q.E.D.$
**Proof of Theorem 13:** Note that for $\varepsilon=y-\gamma_{0}(w)$ $$\begin{aligned}
\phi(z,\hat{\gamma},\lambda_{0})-\phi(z,\gamma_{0},\lambda_{0}) &
=\lambda_{0}(x)[\hat{\gamma}(w)-\gamma_{0}(w)],\\
\phi(z,\gamma_{0},\hat{\lambda})-\phi(z,\gamma_{0},\lambda_{0}) &
=[\hat{\lambda}(x)-\lambda_{0}(x)]\varepsilon,\\
\phi(z,\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\phi(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi(z,\hat{\gamma}_{\ell},\lambda_{0})+\phi
_{j}(z,\gamma_{0},\lambda_{0}) & =-[\hat{\lambda}(x)-\lambda_{0}(x)][\hat{\gamma}(x)-\gamma_{0}(x)].\end{aligned}$$ The first part of Assumption 4 ii) then follows by$$\begin{aligned}
\int[\phi(z,\hat{\gamma}_{\ell},\lambda_{0})-\phi(z,\gamma_{0},\lambda
_{0})]^{2}F_{0}(dz) & =\int\lambda_{0}(x)^{2}[\hat{\gamma}(w)-\gamma
_{0}(w)]^{2}F_{0}(dz)\\
& \leq C\int[\hat{\gamma}(w)-\gamma_{0}(w)]^{2}F_{0}(dz)\overset{p}{\longrightarrow}0.\end{aligned}$$ The second part of Assumption 4 ii) follows by$$\begin{aligned}
\int[\phi(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi(z,\gamma_{0},\lambda
_{0})]^{2}F_{0}(dz) & =\int[\hat{\lambda}_{\ell}(x)-\lambda_{0}(x)]^{2}\varepsilon^{2}F_{0}(dz)\\
& =\int\left[ \hat{\lambda}_{\ell}(x)-\lambda_{0}(x)\right] ^{2}E[\varepsilon^{2}|x]F_{0}(dz)\\
& \leq C\int\left[ \hat{\lambda}_{\ell}(x)-\lambda_{0}(x)\right] ^{2}F_{0}(dz)\overset{p}{\longrightarrow}0.\end{aligned}$$ Next, note that by the Cauchy-Schwartz inequality, $$\begin{aligned}
& \sqrt{n}\int|\phi(z,\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\phi
(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi(z,\hat{\gamma}_{\ell},\lambda
_{0})+\phi(z,\gamma_{0},\lambda_{0})|F_{0}(dz)\\
& =\sqrt{n}\int\left\vert [\hat{\lambda}_{\ell}(x)-\lambda_{0}(x)][\hat
{\gamma}_{\ell}(w)-\gamma_{0}(w)]\right\vert F_{0}(dx)\\
& \leq\sqrt{n}\{\int[\hat{\lambda}_{\ell}(x)-\lambda_{0}(x)]^{2}F_{0}(dx)\}^{1/2}\{\int[\hat{\gamma}_{\ell}(w)-\gamma_{0}(w)]^{2}F_{0}(dw)\}^{1/2}.\end{aligned}$$ Then the first rate condition of Assumption 5 holds under the first rate condition of Theorem 13 while the second condition of Assumption 5 holds under the last hypothesis of Theorem 13. Then eq. (\[no effec\]) holds by Lemma 12, and the conclusion by rearranging the terms in eq. (\[no effec\]). *Q.E.D.*
**Proof of Lemma 14:** Follows by Lemma A1 and the triangle inequality. *Q.E.D.*
**Proof of Lemma 15:** Let $\hat{M}(\beta)=\partial\hat{\psi}(\beta)/\partial\beta$ when it exists, $\tilde{M}_{\ell}=n^{-1}\sum_{i\in
I_{\ell}}\partial\psi(z_{i},\beta_{0},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell
})/\partial\beta,$ and $\bar{M}_{\ell}=n^{-1}\sum_{i\in I_{\ell}}\partial
\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})/\partial\beta.$ By the law of large numbers, and Assumption 5 iii), $\sum_{\ell=1}^{L}\bar{M}_{\ell
}\overset{p}{\longrightarrow}M.$ Also, by condition iii) for each $j$ and $k,$ $$E[|\tilde{M}_{\ell jk}-\bar{M}_{\ell jk}||Z^{\ell}]\leq\int\left\vert
\partial\psi_{j}(z,\beta_{0},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell
})/\partial\beta_{k}-\partial\psi_{j}(z,\beta_{0},\gamma_{0},\lambda
_{0})/\partial\beta_{k}\right\vert F_{0}(dz)\overset{p}{\longrightarrow}0.$$ Then by the conditional Markov inequality, for each $\ell,$ $$\tilde{M}_{\ell}-\bar{M}_{\ell}\overset{p}{\longrightarrow}0.$$ It follows by the triangle inequality that $\sum_{\ell=1}^{L}\tilde{M}_{\ell
}\overset{p}{\longrightarrow}M.$ Also, with probability approaching one we have for any $\bar{\beta}\overset{p}{\longrightarrow}\beta_{0}$$$\left\Vert \hat{M}(\bar{\beta})-\sum_{\ell=1}^{L}\tilde{M}_{\ell}\right\Vert
\leq\left( \frac{1}{n}\sum_{i=1}^{n}d(z_{i})\right) \left\Vert \bar{\beta
}-\beta_{0}\right\Vert ^{\zeta^{\prime}}=O_{p}(1)o_{p}(1)\overset{p}{\longrightarrow}0.$$ The conclusion then follows by the triangle inequality. *Q.E.D.*
**Proof of Theorem 16:** The conclusion follows in a standard manner from the conclusions of Lemmas 14 and 15. *Q.E.D.*
**Proof of Theorem 17:** Let $\hat{\psi}_{i}=\psi(z_{i},\hat{\beta},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})$ and $\psi_{i}=\psi(z_{i},\beta
_{0},\gamma_{0},\lambda_{0}).$ By standard arguments (e.g. Newey, 1994), it suffices to show that $\sum_{i=1}^{n}\left\Vert \hat{\psi}_{i}-\psi
_{i}\right\Vert ^{2}/n\overset{p}{\longrightarrow}0.$ Note that$$\begin{aligned}
\hat{\psi}_{i}-\psi_{i} & =\sum_{j=1}^{5}\hat{\Delta}_{ji},\hat{\Delta}_{1i}=\psi(z_{i},\hat{\beta},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})-\psi(z_{i},\beta_{0},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell}),\hat{\Delta
}_{2i}=m(z_{i},\beta_{0},\hat{\gamma}_{\ell})-m(z_{i},\beta_{0},\gamma_{0}),\\
\hat{\Delta}_{3i} & =\phi(z_{i},\hat{\gamma}_{\ell},\lambda_{0})-\phi
(z_{i},\gamma_{0},\lambda_{0}),\hat{\Delta}_{4i}=\phi(z_{i},\gamma_{0},\hat{\lambda}_{\ell})-\phi(z_{i},\gamma_{0},\lambda_{0}),\\
\hat{\Delta}_{5i} & =\phi(z_{i},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell
})-\phi(z_{i},\hat{\gamma}_{\ell},\lambda_{0})-\phi(z_{i},\gamma_{0},\hat{\lambda}_{\ell})+\phi(z_{i},\gamma_{0},\lambda_{0}).\end{aligned}$$ By standard arguments it suffices to show that for each $j$ and $\ell,$ $$\frac{1}{n}\sum_{i\in I_{\ell}}\left\Vert \hat{\Delta}_{ji}\right\Vert
^{2}\overset{p}{\longrightarrow}0. \label{var conv}$$ For $j=1$ it follows by a mean value expansion and Assumption 7 with $E[b(z_{i})^{2}]<\infty$ that$$\frac{1}{n}\sum_{i\in I_{\ell}}\left\Vert \hat{\Delta}_{1i}\right\Vert
^{2}=\frac{1}{n}\sum_{i\in I_{\ell}}\left\Vert \frac{\partial}{\partial\beta
}\psi(z_{i},\bar{\beta},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell})(\hat{\beta
}-\beta)\right\Vert ^{2}\leq\frac{1}{n}\left( \sum_{i\in I_{\ell}}b(z_{i})^{2}\right) \left\Vert \hat{\beta}-\beta\right\Vert ^{2}\overset{p}{\longrightarrow}0,$$ where $\bar{\beta}\,$is a mean value that actually differs from row to row of $\partial\psi(z_{i},\bar{\beta},\hat{\gamma}_{\ell},\hat{\lambda}_{\ell
})/\partial\beta$. For $j=2$ note that by Assumption 4,$$E[\frac{1}{n}\sum_{i\in I_{\ell}}\left\Vert \hat{\Delta}_{2i}\right\Vert
^{2}|Z^{\ell}]\leq\int\left\Vert m(z,\beta_{0},\hat{\gamma}_{\ell})-m(z,\beta_{0},\gamma_{0})\right\Vert ^{2}F_{0}(dz)\overset{p}{\longrightarrow}0,$$ so eq. (\[var conv\]) holds by the conditional Markov inequality. For $j=3$ and $j=4$ eq. (\[var conv\]) follows similarly. For $j=5$, it follows from the hypotheses of Theorem 17 that$$E[\frac{1}{n}\sum_{i\in I_{\ell}}\left\Vert \hat{\Delta}_{5i}\right\Vert
^{2}|Z^{\ell}]\leq\int\left\Vert \phi(z,\hat{\gamma}_{\ell},\hat{\lambda
}_{\ell})-\phi(z,\gamma_{0},\hat{\lambda}_{\ell})-\phi(z,\hat{\gamma}_{\ell
},\lambda_{0})+\phi(z,\gamma_{0},\lambda_{0})\right\Vert ^{2}F_{0}(dz)\overset{p}{\longrightarrow}0.$$ Then eq. (\[var conv\]) holds for $j=5$ by the conditional Markov inequality. *Q.E.D.*
Appendix B: Local Robustness and Derivatives of Expected Moments.
=================================================================
In this Appendix we give conditions sufficient for the LR property of equation (\[lrdef\]) to imply the properties in equations (\[lrdef2\]) and (\[nlremainder\]). As discussed following equation (\[nlremainder\]), it may be convenient when specifying regularity conditions for specific moment functions to work directly with (\[lrdef2\]) and/or (\[nlremainder\]).
<span style="font-variant:small-caps;">Assumption B1:</span> *There are linear sets* $\Gamma$ *and* $\Lambda$ *and a set* $G$ *such that i)* $\bar{\psi}(\gamma,\lambda)$ *is Frechet differentiable at* $(\gamma_{0},\lambda_{0});$ *ii) for all* $G\in$ ** $G$ *the vector* $(\gamma(F_{\tau}),\lambda(F_{\tau}))$ *is Frechet differentiable at* $\tau=0;$ *iii) the closure of* $\{\partial(\gamma(F_{\tau}),\lambda(F_{\tau}))/\partial\tau:G\in$ ** $G\}$ *is* $\Gamma\times\Lambda$*.*
<span style="font-variant:small-caps;">Theorem B1:</span> *If Assumption B1 is satisfied and equation (\[lrdef\]) is satisfied for all* $G\in$ ** $\mathcal{G}$ *then equation (\[lrdef2\]) is satisfied.*
Proof: Let $\bar{\psi}^{\prime}(\gamma,\lambda)$ denote the Frechet derivative of $\bar{\psi}(\gamma,\lambda)$ at $(\gamma_{0},\lambda_{0})$ in the direction $(\gamma,\lambda),$ which exists by i). By ii), the chain rule for Frechet derivatives (e.g. Proposition 7.3.1 of Luenberger, 1969), and by eq. *(\[lrdef\])* it follows that for $(\Delta_{\gamma}^{G},\Delta_{\lambda}^{G})=\partial(\gamma(F_{\tau}),\lambda(F_{\tau}))/\partial\tau,$$$\bar{\psi}^{\prime}(\Delta_{\gamma}^{G},\Delta_{\lambda}^{G})=\frac
{\partial\bar{\psi}(\gamma(F_{\tau}),\lambda(F_{\tau}))}{\partial\tau}=0.$$ By $\bar{\psi}^{\prime}(\gamma,\lambda)$ being a continuous linear function and iii) it follows that $\bar{\psi}^{\prime}(\gamma,\lambda)=0$ for all $(\gamma,\lambda)\in\Gamma\times\Lambda.$ Therefore, for any $\gamma\in\Gamma$ and $\lambda\in\Lambda,$$$\bar{\psi}^{\prime}(\gamma-\gamma_{0},0)=0,\bar{\psi}^{\prime}(0,\lambda
-\lambda_{0})=0.$$ Equation *(\[lrdef2\])* then follows by i). *Q.E.D.*
<span style="font-variant:small-caps;">Theorem B2:</span> *If equation (\[lrdef2\]) is satisfied and in addition* $\bar{\psi}(\gamma,\lambda_{0})$ *and* $\bar{\psi}(\gamma
_{0},\lambda)$ *are twice Frechet differentiable in open sets containing* $\gamma_{0}$ *and* $\lambda_{0}$ *respectively with bounded second derivative then equation* (\[nlremainder\]) *is satisfied.*
Proof: Follows by Proposition 7.3.3 of Luenberger (1969). *Q.E.D.*
Appendix C: Doubly Robust Moment Functions for Orthogonality Conditions
=======================================================================
In this Appendix we generalize the DR estimators for conditional moment restrictions to orthogonality conditions for a general residual $\rho
(z,\gamma)$ that is affine in $\gamma$ but need not have the form $y-\gamma(w).$
<span style="font-variant:small-caps;">Assumption C1:</span> *There are linear sets* $\Gamma$ and $\Lambda$ *of functions* $\lambda(x)$ *and* $\gamma(w)$ *that are closed in mean square such that i) For any* $\gamma,\tilde{\gamma}\in\Gamma$ and scalar $\tau,$ $E[\rho(z_{i},\gamma)^{2}]<\infty$ and $\rho(z,(1-\tau
)\gamma+\tau\tilde{\gamma})=(1-\tau)\rho(z,\gamma)+\tau\rho(z,\tilde{\gamma})$ ; *ii)* $E[\lambda(x_{i})\rho(z_{i},\gamma_{0})]=0$ for all $\lambda
\in\Lambda;$ *iii) there exists* $\lambda_{0}\in\Lambda$ *such that* $E[m(z_{i},\beta_{0},\gamma)]=-E[\lambda_{0}(x_{i})\rho(z_{i},\gamma
)]$ *for all* $\gamma\in\Gamma.$
Assumption C1 ii) could be thought of as an identification condition for $\gamma_{0}$. For example, if $\Lambda$ is all functions of $x_{i}$ with finite mean square then ii) is $E[\rho(z_{i},\gamma_{0})|x_{i}]=0,$ the nonparametric conditional moment restriction of Newey and Powell (2003) and Newey (1991). Assumption C1 iii) also has an interesting interpretation. Let $\Pi(a)(x_{i})$ denote the orthogonal mean-square projection of a random variable $a(z_{i})$ with finite second moment on $\Gamma.$ Then by ii) and iii) we have$$\begin{aligned}
E[m(z_{i},\beta_{0},\gamma)] & =-E[\lambda_{0}(x_{i})\rho(z_{i},\gamma)]=E[\lambda_{0}(x_{i})\Pi(\rho(\gamma))(x_{i})]\\
& =E[\lambda_{0}(x_{i})\{\Pi(\rho(\gamma))(x_{i})-\Pi(\rho(\gamma_{0}))(x_{i})\}]\\
& =E[\lambda_{0}(x_{i})\{\Pi(\rho(\gamma)-\rho(\gamma_{0}))(x_{i})\}].\end{aligned}$$ Here we see that $E[m(z_{i},\beta_{0},\gamma)]$ is a linear, mean-square continuous function of $\Pi(\rho(\gamma)-\rho(\gamma_{0}))(x_{i}).$ The Riesz representation theorem will also imply that if $E[m(z_{i},\beta_{0},\gamma)]$ is a linear, mean-square continuous function of $\Pi(\rho(\gamma)-\rho
(\gamma_{0}))(x_{i})$ then $\lambda_{0}(x)$ exists satisfying Assumption C1 ii). For the case where $w_{i}=x_{i}$ this mean-square continuity condition is necessary for existence of a root-n consistent estimator, as in Newey (1994) and Newey and McFadden (1994). We conjecture that when $w_{i}$ need not equal $x_{i}$ this condition generalizes Severini and Tripathi’s (2012) necessary condition for existence of a root-n consistent estimator of $\beta_{0}$.
Noting that Assumptions 1 ii) and iii) are the conditions for double robustness we have
<span style="font-variant:small-caps;">Theorem C1:</span> *If Assumption C1 is satisfied then* $\psi
(z,\beta,\gamma,\lambda)=m(z,\beta,\gamma)+\lambda(x)\rho(z,\gamma)$ *is doubly robust.*
It is interesting to note that $\lambda_{0}(x)$ satisfying Assumption C1 iii) need not be unique. When the closure of $\{\Pi(\rho(\gamma))(x_{i}):\gamma
\in\Gamma\}$ is not all of $\Lambda$ then there will exist $\tilde{\lambda}\in\Lambda$ such that $\tilde{\lambda}\neq0$ and $$E[\tilde{\lambda}(x_{i})\rho(z_{i},\gamma)]=E[\tilde{\lambda}(x_{i})\Pi
(\rho(\gamma))(x_{i})]=0\text{ for all }\gamma\in\Gamma.$$ In that case Assumption C1 iii) will also be satisfied for $\lambda_{0}(x_{i})+\tilde{\lambda}(x_{i}).$ We can think of this case as one where $\gamma_{0}$ is overidentified, similarly to Chen and Santos (2015). As discussed in Ichimura and Newey (2017), the different $\lambda_{0}(x_{i})$ would correspond to different first step estimators.
The partial robustness results of the last Section can be extended to the orthogonality condition setting of Assumption C1. Let $\Lambda^{\ast}$ be a closed linear subset of $\Lambda,$ such as finite dimensional linear set and let $\gamma^{\ast}$ be such that $E[\lambda(x_{i})\rho(z_{i},\gamma^{\ast
})]=0$ for all $\lambda\in\Lambda^{\ast}$. Note that if $\lambda_{0}\in
\Lambda^{\ast}$ it follows by Theorem C1 that$$E[m(z_{i},\beta_{0},\gamma^{\ast})]=-E[\lambda_{0}(x_{i})\rho(z_{i},\gamma^{\ast})]=0.$$
<span style="font-variant:small-caps;">Theorem C2:</span> *If* $\Lambda^{\ast}$ *is a closed linear subset of* $\Lambda$*,* $E[\lambda(x_{i})\rho(z_{i},\gamma^{\ast})]=0$ *for all* $\lambda\in\Lambda^{\ast}$*, and Assumption C2 iii) is satisfied with* $\lambda_{0}\in\Lambda^{\ast}$ *then*$$E[m(z_{i},\beta_{0},\gamma^{\ast})]=0.$$
$.$
Appendix D: Regularity Conditions for Plug-in Estimators
========================================================
In this Appendix we formulate regularity conditions for root-n consistency and asymptotic normality of the plug-in estimator $\tilde{\beta}$ as described in Section 2, where $m(z,\beta,\gamma)$ need not be LR. These conditions are based on Assumptions 4-6 applied to the influence adjustment $\phi
(z,\gamma,\lambda)$ corresponding to $m(z,\beta,\gamma)$ and $\hat{\gamma}.$ For this purpose we treat $\hat{\lambda}$ as any object that can approximate $\lambda_{0}(x),$ not just as an estimator of $\lambda_{0}.$
<span style="font-variant:small-caps;">Theorem D1:</span> *If Assumptions 4-6 are satisfied, Assumption 7* is satisfied with $m(z,\beta,\gamma)$ replacing $\psi(z,\beta,\gamma
,\lambda),$ ** $\tilde{\beta}\overset{p}{\longrightarrow}\beta_{0},$ ** $\hat{W}\overset{p}{\longrightarrow}W$*,* $M^{\prime}WM$ *is nonsingular,* $E[\left\Vert \psi(z_{i},\beta_{0},\gamma
_{0},\lambda_{0})\right\Vert ^{2}]<\infty,$ *and*$$\hat{R}_{5}=\frac{1}{\sqrt{n}}\sum_{i=1}^{n}\phi(z_{i},\hat{\gamma}_{i},\hat{\lambda}_{i})\overset{p}{\longrightarrow}0,$$ *then for* $\Omega=E[\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})\psi(z_{i},\beta_{0},\gamma_{0},\lambda_{0})^{\prime}],$$$\sqrt{n}(\hat{\beta}-\beta_{0})\overset{d}{\longrightarrow}N(0,V),V=(M^{\prime
}WM)^{-1}M^{\prime}W\Omega WM(M^{\prime}WM)^{-1}.$$
The condition $\hat{R}_{5}\overset{p}{\longrightarrow}0$ was discussed in Section 7. It is interesting to note that $\hat{R}_{5}\overset{p}{\longrightarrow}0$ appears to be a complicated condition that seems to depend on details of the estimator $\hat{\gamma}_{i}$ in a way that Assumptions 4-7 do not. In this way the regularity conditions for the LR estimator seem to be more simple and general than those for the plug-in estimator.
Acknowledgements
Whitney Newey gratefully acknowledges support by the NSF. Helpful comments were provided by M. Cattaneo, B. Deaner, J. Hahn, M. Jansson, Z. Liao, A. Pakes, R. Moon, A. de Paula, V. Semenova, and participants in seminars at Cambridge, Columbia, Cornell, Harvard-MIT, UCL, USC, Yale, and Xiamen. B. Deaner provided capable research assistance.
**REFERENCES**
<span style="font-variant:small-caps;">Ackerberg, D., X. Chen, and J. Hahn</span> (2012): “A Practical Asymptotic Variance Estimator for Two-step Semiparametric Estimators,” *The Review of Economics and Statistics* 94: 481–498.
<span style="font-variant:small-caps;">Ackerberg, D., X. Chen, J. Hahn, and Z. Liao</span> (2014): “Asymptotic Efficiency of Semiparametric Two-Step GMM,” *The Review of Economic Studies* 81: 919–943.
<span style="font-variant:small-caps;">Ai, C. [and]{} X. Chen</span> (2003): Efficient Estimation of Models with Conditional Moment Restrictions Containing Unknown Functions, *Econometrica* 71, 1795-1843.
<span style="font-variant:small-caps;">Ai, C. [and]{} X. Chen</span> (2007): “Estimation of Possibly Misspecified Semiparametric Conditional Moment Restriction Models with Different Conditioning Variables,” *Journal of Econometrics* 141, 5–43.
<span style="font-variant:small-caps;">Ai, C. [and]{} X. Chen</span> (2012): “The Semiparametric Efficiency Bound for Models of Sequential Moment Restrictions Containing Unknown Functions,” *Journal of Econometrics* 170, 442–457.
<span style="font-variant:small-caps;">Andrews, D.W.K.</span> (1994): Asymptotics for Semiparametric Models via Stochastic Equicontinuity, *Econometrica* 62, 43-72.
<span style="font-variant:small-caps;">Athey, S., G. Imbens, and S. Wager</span> (2017): “Efficient Inference of Average Treatment Effects in High Dimensions via Approximate Residual Balancing,” *Journal of the Royal Statistical Society, Series B,* forthcoming.
<span style="font-variant:small-caps;">Bajari, P., V. Chernozhukov, H. Hong, and D. Nekipelov</span> (2009): “Nonparametric and Semiparametric Analysis of a Dynamic Discrete Game,” working paper, Stanford.
<span style="font-variant:small-caps;">Bajari, P., H. Hong, J. Krainer, and D. Nekipelov</span> (2010): “Estimating Static Models of Strategic Interactions,” *Journal of Business and Economic Statistics* 28, 469-482.
<span style="font-variant:small-caps;">Bang, and J.M. Robins</span> (2005): “Doubly Robust Estimation in Missing Data and Causal Inference Models,” *Biometrics* 61, 962–972.
<span style="font-variant:small-caps;">Belloni, A., D. Chen, V. Chernozhukov, and C. Hansen</span> (2012): Sparse Models and Methods for Optimal Instruments with an Application to Eminent Domain, *Econometrica* 80, 2369–2429.
<span style="font-variant:small-caps;">Belloni, A., V. Chernozhukov, and Y. Wei</span> (2013): Honest Confidence Regions for Logistic Regression with a Large Number of Controls, arXiv preprint arXiv:1304.3969.
<span style="font-variant:small-caps;">Belloni, A., V. Chernozhukov, and C. Hansen</span> (2014): “Inference on Treatment Effects after Selection among High-Dimensional Controls,” *The Review of Economic Studies* 81, 608–650.
<span style="font-variant:small-caps;">Belloni, A., V. Chernozhukov, I. Fernandez-Val, and C. Hansen</span> (2016): “Program Evaluation and Causal Inference with High-Dimensional Data,” *Econometrica* 85, 233-298.
<span style="font-variant:small-caps;">Bera, A.K., G. Montes-Rojas, and W. Sosa-Escudero</span> (2010): “General Specification Testing with Locally Misspecified Models,” *Econometric Theory* 26, 1838–1845.
<span style="font-variant:small-caps;">Bickel, P.J.</span> (1982): “On Adaptive Estimation,” *Annals of Statistics* 10, 647-671.
<span style="font-variant:small-caps;">Bickel, P.J. and Y. Ritov</span> (1988): “Estimating Integrated Squared Density Derivatives: Sharp Best Order of Convergence Estimates,” *Sankhyā: The Indian Journal of Statistics, Series A* 238, 381-393.
<span style="font-variant:small-caps;">Bickel, P.J., C.A.J. Klaassen, Y. Ritov, [and]{} J.A. Wellner</span> (1993): *Efficient and Adaptive Estimation for Semiparametric Models*, Springer-Verlag, New York.
<span style="font-variant:small-caps;">Bickel, P.J. and Y. Ritov</span> (2003): “Nonparametric Estimators Which Can Be ”Plugged-in," *Annals of Statistics* 31, 1033-1053.
<span style="font-variant:small-caps;">Bonhomme, S., and M. Weidner</span> (2018): “Minimizing Sensitivity to Misspecification,” working paper.
<span style="font-variant:small-caps;">Cattaneo, M.D., and M. Jansson</span> (2017): “Kernel-Based Semiparametric Estimators: Small Bandwidth Asymptotics and Bootstrap Consistency,” *Econometrica*, forthcoming.
<span style="font-variant:small-caps;">Cattaneo, M.D., M. Jansson, and X. Ma</span> (2017): “Two-step Estimation and Inference with Possibly Many Included Covariates,” working paper.
<span style="font-variant:small-caps;">Chamberlain, G.</span> (1987): Asymptotic Efficiency in Estimation with Conditional Moment Restrictions, *Journal of Econometrics* 34, 1987, 305–334.
<span style="font-variant:small-caps;">Chamberlain, G.</span> (1992): Efficiency Bounds for Semiparametric Regression, *Econometrica* 60, 567–596.
<span style="font-variant:small-caps;">Chen, X. and X. Shen</span> (1997): Sieve Extremum Estimates for Weakly Dependent Data, *Econometrica* 66, 289-314.
<span style="font-variant:small-caps;">Chen, X., O.B. Linton, [and]{} I. [van Keilegom]{}</span> (2003): Estimation of Semiparametric Models when the Criterion Function Is Not Smooth, *Econometrica* 71, 1591-1608.
<span style="font-variant:small-caps;">Chen, X., and Z. Liao</span> (2015): “Sieve Semiparametric Two-Step GMM Under Weak Dependence”, *Journal of Econometrics* 189, 163–186.
<span style="font-variant:small-caps;">Chen, X., and A. Santos</span> (2015): Overidentification in Regular Models, working paper.
<span style="font-variant:small-caps;">Chernozhukov, V., C. Hansen, and M. Spindler</span> (2015): “Valid Post-Selection and Post-Regularization Inference: An Elementary, General Approach,” *Annual Review of Economics* 7: 649–688.
<span style="font-variant:small-caps;">Chernozhukov, V., G.W. Imbens and W.K. Newey</span> (2007): “Instrumental Variable Identification and Estimation of Nonseparable Models,” *Journal of Econometrics* 139, 4-14.
<span style="font-variant:small-caps;">Chernozhukov, V., D. Chetverikov, M. Demirer, E. Duflo, C. Hansen, W. Newey</span> (2017): “Double/Debiased/Neyman Machine Learning of Treatment Effects,” *American Economic Review Papers and Proceedings* 107, 261-65.
<span style="font-variant:small-caps;">Chernozhukov, V., D. Chetverikov, M. Demirer, E. Duflo, C. Hansen, W. Newey, J. Robins</span> (2018): "Debiased/Double Machine Learning for Treatment and Structural Parameters,*Econometrics Journal* 21, C1-C68.
<span style="font-variant:small-caps;">Chernozhukov, V., J.A. Hausman, and W.K. Newey</span> (2018): “Demand Analysis with Many Prices,” working paper, MIT.
<span style="font-variant:small-caps;">Chernozhukov, V., W.K. Newey, J. Robins</span> (2018): “Double/De-Biased Machine Learning Using Regularized Riesz Representers,” arxiv.
<span style="font-variant:small-caps;">Escanciano, J-C., D. Jacho-Cha'vez, and A. Lewbel</span> (2016): Identification and Estimation of Semiparametric Two Step Models, *Quantitative Economics* 7, 561-589.
<span style="font-variant:small-caps;">Farrell, M.</span> (2015): “Robust Inference on Average Treatment Effects with Possibly More Covariates than Observations,” *Journal of Econometrics* 189, 1–23.
<span style="font-variant:small-caps;">Firpo, S. and C. Rothe</span> (2017): “Semiparametric Two-Step Estimation Using Doubly Robust Moment Conditions,” working paper.
<span style="font-variant:small-caps;">Graham, B.W.</span> (2011): “Efficiency Bounds for Missing Data Models with Semiparametric Restrictions,” *Econometrica* 79, 437–452.
<span style="font-variant:small-caps;">Hahn, J. (1998):</span> “On the Role of the Propensity Score in Efficient Semiparametric Estimation of Average Treatment Effects,” *Econometrica* 66, 315-331.
<span style="font-variant:small-caps;">Hahn, J. and G. Ridder</span> (2013): “Asymptotic Variance of Semiparametric Estimators With Generated Regressors,” *Econometrica* 81, 315-340.
<span style="font-variant:small-caps;">Hahn, J. and G. Ridder</span> (2016): Three-stage Semi-Parametric Inference: Control Variables and Differentiability,“ working paper.”
<span style="font-variant:small-caps;">Hahn, J., Z. Liao, and G. Ridder</span> (2016): “Nonparametric Two-Step Sieve M Estimation and Inference,” working paper, UCLA.
<span style="font-variant:small-caps;">Hasminskii, R.Z. and I.A. Ibragimov</span> (1978): “On the Nonparametric Estimation of Functionals,” *Proceedings of the 2nd Prague Symposium on Asymptotic Statistics*, 41-51.
<span style="font-variant:small-caps;">Hausman, J.A., and W.K. Newey</span> (2016): “Individual Heterogeneity and Average Welfare,” *Econometrica* 84, 1225-1248.
<span style="font-variant:small-caps;">Hausman, J.A., and W.K. Newey</span> (2017): “Nonparametric Welfare Analysis,” *Annual Review of Economics* 9, 521–546.
<span style="font-variant:small-caps;">Hirano, K., G. Imbens, and G. Ridder</span> (2003): “Efficient Estimation of Average Treatment Effects Using the Estimated Propensity Score,” *Econometrica* 71: 1161–1189.
<span style="font-variant:small-caps;">Hotz, V.J. and R.A. Miller</span> (1993): “Conditional Choice Probabilities and the Estimation of Dynamic Models,” *Review of Economic Studies* 60, 497-529.
<span style="font-variant:small-caps;">Huber, P. (1981):</span> *Robust Statistics,* New York: Wiley.
<span style="font-variant:small-caps;">Ichimura, H.</span> (1993): “Estimation of Single Index Models,” *Journal of Econometrics* 58, 71-120.
<span style="font-variant:small-caps;">Ichimura, H., [and]{} S. Lee</span> (2010): Characterization of the Asymptotic Distribution of Semiparametric M-Estimators, *Journal of Econometrics* 159, 252–266.
<span style="font-variant:small-caps;">Ichimura, H. and W.K. Newey</span> (2017): “The Influence Function of Semiparametric Estimators,” CEMMAP Working Paper, CWP06/17.
<span style="font-variant:small-caps;">Kandasamy, K., A. Krishnamurthy, B. P'oczos, L. Wasserman, J.M. Robins</span> (2015): “Influence Functions for Machine Learning: Nonparametric Estimators for Entropies, Divergences and Mutual Informations,” arxiv.
<span style="font-variant:small-caps;">Lee, Lung-fei</span> (2005): A $C(\alpha)$-type Gradient Test in the GMM Approach, working paper.
<span style="font-variant:small-caps;">Luenberger, D.G.</span> (1969): *Optimization by Vector Space Methods*, New York: Wiley.
<span style="font-variant:small-caps;">Murphy, K.M. and R.H. Topel</span> (1985): “Estimation and Inference in Two-Step Econometric Models,” *Journal of Business and Economic Statistics* 3, 370-379.
<span style="font-variant:small-caps;">Newey, W.K.</span> (1984): “A Method of Moments Interpretation of Sequential Estimators,” *Economics Letters* 14, 201-206.
<span style="font-variant:small-caps;">Newey, W.K.</span> (1990): “Semiparametric Efficiency Bounds,” *Journal of Applied Econometrics* 5, 99-135.
<span style="font-variant:small-caps;">Newey, W.K.</span> (1991): Uniform Convergence in Probability and Stochastic Equicontinuity, *Econometrica* 59, 1161-1167.
<span style="font-variant:small-caps;">Newey, W.K.</span> (1994a): “The Asymptotic Variance of Semiparametric Estimators,” *Econometrica* 62, 1349-1382.
<span style="font-variant:small-caps;">Newey, W.K.</span> (1994b): Kernel Estimation of Partial Means and a General Variance Estimator, *Econometric Theory* 10, 233-253.
<span style="font-variant:small-caps;">Newey, W.K.</span> (1997): Convergence Rates and Asymptotic Normality for Series Estimators, *Journal of Econometrics* 79, 147-168.
<span style="font-variant:small-caps;">Newey, W.K. (</span>1999): Consistency of Two-Step Sample Selection Estimators Despite Misspecification of Distribution, *Economics Letters* 63, 129-132.
<span style="font-variant:small-caps;">Newey, W.K., [and]{} D. McFadden</span> (1994): Large Sample Estimation and Hypothesis Testing," in *Handbook of Econometrics*, Vol. 4, ed. by R. Engle, and D. McFadden, pp. 2113-2241. North Holland.
<span style="font-variant:small-caps;">Newey, W.K., [and]{} J.L. Powell</span> (1989): “Instrumental Variable Estimation of Nonparametric Models,” presented at Econometric Society winter meetings, 1988.
<span style="font-variant:small-caps;">Newey, W.K., [and]{} J.L. Powell</span> (2003): “Instrumental Variable Estimation of Nonparametric Models,” *Econometrica* 71, 1565-1578.
<span style="font-variant:small-caps;">Newey, W.K., F. Hsieh, [and]{} J.M. Robins</span> (1998): Undersmoothing and Bias Corrected Functional Estimation," MIT Dept. of Economics working paper 72, 947-962.
<span style="font-variant:small-caps;">Newey, W.K., F. Hsieh, [and]{} J.M. Robins</span> (2004): Twicing Kernels and a Small Bias Property of Semiparametric Estimators, *Econometrica* 72, 947-962.
<span style="font-variant:small-caps;">Newey, W.K., and J. Robins</span> (2017): “Cross Fitting and Fast Remainder Rates for Semiparametric Estimation,” arxiv.
<span style="font-variant:small-caps;">Neyman, J.</span> (1959): Optimal Asymptotic Tests of Composite Statistical Hypotheses, *Probability and Statistics, the Harald Cramer Volume*, ed., U. Grenander, New York, Wiley.
<span style="font-variant:small-caps;">Pfanzagl, J., and W. Wefelmeyer</span> (1982): "Contributions to a General Asymptotic Statistical Theory. Springer Lecture Notes in Statistics.
<span style="font-variant:small-caps;">Pakes, A. and G.S. Olley</span> (1995): “A Limit Theorem for a Smooth Class of Semiparametric Estimators,” *Journal of Econometrics* 65, 295-332.
<span style="font-variant:small-caps;">Powell, J.L., J.H. Stock, and T.M. Stoker</span> (1989): “Semiparametric Estimation of Index Coefficients,” *Econometrica* 57, 1403-1430.
<span style="font-variant:small-caps;">Robins, J.M., A. Rotnitzky, and L.P. Zhao</span> (1994): “Estimation of Regression Coefficients When Some Regressors Are Not Always Observed,” *Journal of the American Statistical Association* 89: 846–866.
<span style="font-variant:small-caps;">Robins, J.M. and A. Rotnitzky</span> (1995): “Semiparametric Efficiency in Multivariate Regression Models with Missing Data,” *Journal of the American Statistical Association* 90:122–129.
<span style="font-variant:small-caps;">Robins, J.M., A. Rotnitzky, and L.P. Zhao</span> (1995): “Analysis of Semiparametric Regression Models for Repeated Outcomes in the Presence of Missing Data,” *Journal of the American Statistical Association* 90,106–121.
<span style="font-variant:small-caps;">Robins, J.M.,and A. Rotnitzky (2001):</span> Comment on Semiparametric Inference: Question and an Answer Likelihood by P.A. Bickel and J. Kwon, *Statistica Sinica* 11, 863-960.
<span style="font-variant:small-caps;">Robins, J.M., A. Rotnitzky, and M. van der Laan</span> (2000): "Comment on ’On Profile Likelihood’ by S. A. Murphy and A. W. van der Vaart, *Journal of the American Statistical Association* 95, 431-435.
<span style="font-variant:small-caps;">Robins, J., M. Sued, Q. Lei-Gomez, and A. Rotnitzky</span> (2007): “Comment: Performance of Double-Robust Estimators When Inverse Probability’ Weights Are Highly Variable,” *Statistical Science* 22, 544–559.
<span style="font-variant:small-caps;">Robins, J.M., L. Li, E. Tchetgen, and A. van der Vaart</span> (2008): “Higher Order Influence Functions and Minimax Estimation of Nonlinear Functionals,” *IMS Collections Probability and Statistics: Essays in Honor of David A. Freedman, Vol 2,* 335-421.
<span style="font-variant:small-caps;">Robins, J.M., L. Li, R. Mukherjee, E. Tchetgen, and A. van der Vaart</span> (2017): “Higher Order Estimating Equations for High-Dimensional Models,” *Annals of Statistics,* forthcoming.
<span style="font-variant:small-caps;">Robinson, P.M.</span> (1988): "\`Root-N-consistent Semiparametric Regression," *Econometrica* 56, 931-954.
<span style="font-variant:small-caps;">Rust, J.</span> (1987): “Optimal Replacement of GMC Bus Engines: An Empirical Model of Harold Zurcher,” *Econometrica* 55, 999-1033.
<span style="font-variant:small-caps;">Santos, A.</span> (2011): “Instrumental Variable Methods for Recovering Continuous Linear Functionals,” *Journal of Econometrics*, 161, 129-146.
<span style="font-variant:small-caps;">Scharfstein D.O., A. Rotnitzky, and J.M. Robins (1999):</span> Rejoinder to Adjusting For Nonignorable Drop-out Using Semiparametric Non-response Models, *Journal of the American Statistical Association* 94, 1135-1146.
<span style="font-variant:small-caps;">Severini, T. and G. Tripathi (2006): "</span>Some Identification Issues in Nonparametric Linear Models with Endogenous Regressors," *Econometric Theory* 22, 258-278.
<span style="font-variant:small-caps;">Severini, T. and G. Tripathi (2012):</span> “Efficiency Bounds for Estimating Linear Functionals of Nonparametric Regression Models with Endogenous Regressors,” *Journal of Econometrics* 170, 491-498.
<span style="font-variant:small-caps;">Schick, A.</span> (1986): “On Asymptotically Efficient Estimation in Semiparametric Models,” *Annals of Statistics* 14, 1139-1151.
<span style="font-variant:small-caps;">Stoker, T.</span> (1986): “Consistent Estimation of Scaled Coefficients,” *Econometrica* 54, 1461-1482.
<span style="font-variant:small-caps;">Tamer, E.</span> (2003): “Incomplete Simultaneous Discrete Response Model with Multiple Equilibria,” *Review of Economic Studies* 70, 147-165.
<span style="font-variant:small-caps;">van der Laan, M. and Rubin</span> (2006): “Targeted Maximum Likelihood Learning,” U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 213.
<span style="font-variant:small-caps;">[van der Vaart]{}, A.W.</span> (1991): On Differentiable Functionals, *The Annals of Statistics,* 19, 178-204.
<span style="font-variant:small-caps;">[van der Vaart]{}, A.W.</span> (1998): *Asymptotic Statistics,* Cambride University Press, Cambridge, England.
<span style="font-variant:small-caps;">[van der Vaart]{}, A.W.</span> (2014): “Higher Order Tangent Spaces and Influence Functions,” Statistical Science 29, 679–686.
<span style="font-variant:small-caps;">Wooldridge, J.M.</span> (1991): On the Application of Robust, Regression-Based Diagnostics to Models of Conditional Means and Conditional Variances, *Journal of Econometrics* 47, 5-46.
|
|
Four-ever? Competition remedies in the audit market
Oxera
In light of recent accounting scandals, there are widespread calls for the UK competition authority to re-examine the audit market. Yet spending a substantial amount of resources on a market investigation, and concluding once again that there is a competition problem, is of little value if a suitable remedy cannot be found. A break-up of the Big Four is perceived by many as a necessary and long-awaited intervention, but is it the right solution? And if not, what would be an alternative remedy?
The UK audit market has gone through some turmoil recently.[1] This month the Financial Reporting Council (FRC), which regulates UK audit, announced a deterioration in audit quality across the ‘Big Four’ firms (KPMG, PwC, Deloitte and EY) compared with the previous year. Most notably, the FRC noted that 50% of KPMG’s FTSE 350 audits failed to reach the FRC’s standard for audit quality.[2] At a global level, the International Forum of Independent Audit Regulators found significant problems in 40% of the 918 audits of listed public interest entities that it inspected last year.[3]
The recent audit failures uncovered by regulators are hardly trivial. In Miller Energy the US Securities and Exchange Commission found that KPMG had overvalued certain assets by more than 100 times.[4] In BHS the FRC noted that PwC had signed off the accounts just days before the company was sold for £1.[5] In the more recent case of Carillion, equity analysts appeared unaware of the warning signs that might have been flagged by a good audit.[6]
These market outcomes in audit services are unsatisfactory from a policy perspective. The Big Four’s joint market share in FTSE 350 audit has been close to 100% for many years, and the Big Four likewise dominate the audit of large companies across the world. It is this high market concentration that is frequently blamed for the poor outcomes,[7] and regulators and competition authorities across the world have raised concerns about concentration ever since the collapse of Arthur Andersen in 2002. This year, two UK Parliamentary Committees have called for a new competition investigation by the Competition and Markets Authority (CMA) that ‘should explicitly include consideration of both breaking up the Big Four into more audit firms, and detaching audit arms from those providing other professional services’.[8] The Chief Executive Officer of the FRC and the CEO of PwC have both expressed support for the idea of having the CMA study the audit market afresh.[9]
Previous remedies in the audit market
The audit market is effectively dominated at the top end by the Big Four, and despite turmoil in financial markets the audit market structure has remained largely unchanged since 2002.[10] Concerns emanating from the high concentration include a lack of choice, a lack of innovation, higher audit fees, conflicts of interest, a lack of independence that weakens auditor professional scepticism, a systemic risk if one Big Four firm should fail, and, above all, poor-quality audit reducing the credibility and reliability of audited financial statements for the world’s largest companies.[11]
The previous investigation by the UK Competition Commission (CC), predecessor to the CMA, put forward a package of seven remedies, the most significant of which was a requirement that FTSE 350 companies put their audit out to tender at least every ten years (‘mandatory tendering’). Shortly thereafter, the EU introduced rules that obliged listed companies to switch their auditor (‘mandatory rotation’) every 20 years.[12] At the conclusion of the previous market investigation the CC expressed confidence in its package of remedies, noting that they should ‘increase choice’ and provide a ‘substantially improved environment for competition’.[13] The CC’s remedies package did not include any structural remedies.
The CC and EU remedies have not solved the problem of attracting more competition from outside the Big Four.[14] Indeed, the leading non-Big Four firms, Grant Thornton and BDO, between them have fewer FTSE 350 clients than before the regulatory interventions. In 2013, just before the new measures to boost competition were enacted, Grant Thornton had six FTSE 350 audit clients. In 2016, this number was unchanged. But in 2018 the firm said that it would exit the market for large audits.[15] In 2013 BDO had eight FTSE 350 clients, falling to five in 2016.[16] The previous rule changes are therefore widely perceived to have failed to remedy concerns over market concentration. The Big Four accountancy firms still audit 97% of FTSE 350 companies, a similar rate to that found by Oxera[17] in its 2006 market study for the FRC.[18]
What could structural remedies achieve?
Vertical separation
There are different types of structural remedies. Vertical separation of the Big Four firms into audit and non-audit services would not increase the basic number of firms participating in the FTSE 350 audit market, but it would increase the effective choice for many companies that have non-audit relationships with Big Four audit firms. These relationships can preclude, whether legally or in terms of company perception,[19] considering all four current audit firms as viable substitute auditors.[20]
Vertical separation would also be oriented towards audit quality, removing the conflicts of interest that can arise when the auditor also supplies valuable non-audit services. Yet the idea was not popular among investors at the time of the previous competition investigation. In 2012, an Oxera investor survey report found that ‘almost all investors surveyed do not want to see structural separation of the Big Four firms into audit and non-audit activities.’[21]
Horizontal separation
Horizontal separation of the Big Four firms would immediately improve choice in the sense of seeing more than four firms in the market, and also choice in terms of seeing several non-conflicted audit firms in every audit tender. Such a separation would therefore also, in general terms, improve competition. It could also serve audit quality by reducing the number of instances where a company involved in a complex transaction cannot realistically find an adviser that is not subject to some conflict of interest.
In the case of Carillion, PwC acted as the company’s pensions consultant (2002–17), then switched to advising the pension scheme trustees on Carillion’s restructuring proposals (from July 2017), and was finally appointed by the government to help manage the defunct Carillion after its collapse (from January 2018).[22] It would appear that PwC was the only viable choice to advise on Carillion’s insolvency, because it was the only Big Four firm that did not have active contracts with Carillion at the time of Carillion’s demise.[23] Expanding the market from a ‘Big Four’ to a ‘Large 6’ seems attractive in the face of such apparent conflicts, but realistically it would be a very difficult exercise if the aim is to create a ‘Large 6’ group of firms of similar size with similar international networks.
Would a break-up increase audit quality?
Audits are for the protection of investors against false accounting by a company’s management. The starting point is therefore that the true customer of audit, the investor, is not the procurer of audit services. This alone creates an environment in which market failures may be expected.
But why does audit quality fall short? Boeing and Airbus, Coca-Cola and Pepsi, and the Silicon Valley giants all operate in concentrated markets—but it seems highly unlikely that half of new aeroplanes, or soft drinks cans, possess substantial errors. Market concentration per se does not entail a poor-quality product: even a monopolist will have regard to product quality, knowing that if its product is faulty the financial consequences of fines and compensating consumers will typically be severe.
In equilibrium, a firm would only produce faulty items to the extent that it is rational to do so—i.e. if errors cannot be detected or if the financial consequences of errors are insubstantial. It seems to be widely accepted that audit quality is below the level demanded by investors, on whose behalf the audit is undertaken. The economics literature on audit has studied the link between greater market concentration and higher audit fees, but this does not help us very much in the present circumstances, where the primary concerns are not to do with high prices, or even exclusionary conduct, but with limited choice and sub-optimal quality. Where does the solution lie?
Penalties for poor-quality service
In public services markets (health, education) there is a high degree of regulatory supervision of quality—such as barring doctors who are found to be negligent, and awarding damages to patients harmed by negligence—even when the main providers are state-owned and have no incentive to chase profits at the expense of quality. In 2017, the UK National Health Service (NHS) estimated that the total liability for outstanding medical negligence cases could be as much as £56.1bn, and the £1.5bn annual NHS payout to settle claims is expected to double by 2023.[24] In audit, the strength of regulatory supervision by the FRC is subject to an independent review following concerns that it lacks adequate powers to intervene in the market.[25]
However, the FRC has recently been levying higher fines for audit errors. It fined PwC £6.5m regarding failed UK retailer, BHS;[26] £5.1m for its auditing of accountancy group, RSM Tenon (also, ironically, an auditor);[27] and £5m in relation to the property company, Connaught.[28] The other Big Four firms have also faced heavy fines, in both the UK and USA: £1.8m for EY’s auditing of Tech Data;[29] £4.8m for KPMG’s work on Miller Energy;[30] and £4m for Deloitte relating to the audit of Aero Inventory.[31] The FRC is also fining audit partners whom it finds to be responsible for misconduct—for example, the lead partner for BHS has been fined £325k and banned from working as an auditor for 15 years.[32] These FRC penalties are, however, minor relative to the £38m audit-related settlement reached by the UK’s largest pension scheme, USS, with PwC Brazil as part of a class action lawsuit against troubled oil giant, Petrobras.[33] But note that the FRC has this month implemented an increase in fines to £10m or more for ‘seriously poor audit work by a Big 4 firm’, following an independent review in 2017 of FRC sanctions.[34]
Are audit fines providing optimal enforcement?
From an economics perspective, if the deterrence effect of penalties is sufficiently severe, firms that might otherwise chase market share by cutting prices and their costs for a given audit will be deterred from cutting quality. In other words, when deterrence is weak, there is an opportunity for rent-seeking by firms that cut quality on unobservable dimensions. Although it might be argued that the cost to an accountant’s reputation is great enough to give the right incentives, this point seems difficult to sustain in light of the continued flourishing of firms that have had quite major hits to their professional reputations.
How large would audit fines need to be in order to deter bad audit? This article cannot provide the answer, but it may be instructive to look at a comparison between audit fines and cartel fines (in the EU). The latter are set based on the European Commission’s criteria. As the Commission explains:
The Commission’s policy with regards to competition law infringements is one of prevention … [fines] are ultimately aimed at prevention, and must hence fulfil two objectives: to punish and to deter. Breaking the competition rules is profitable if it goes unpunished – that is why companies do it.[35]
European Commission cartel fines are set based on the gravity and the duration of a competition infringement, and are capped at a maximum of 10% of a company’s total turnover. The 10% turnover ceiling for fines is engaged only when a cartel fine based on the usual criteria would otherwise be set at more than 10% of turnover.
Cartel fines are large compared with audit fines, as Tables 1 and 2 illustrate. Looking at FRC audit fines in the cases mentioned above, the average fine is 0.016% of a Big Four firm’s annual global turnover, as shown in Table 1. The final column of Table 1 indicates that increasing this percentage to 0.5% would lead to fines of a much greater order of magnitude. This is purely illustrative; it is not a recommendation as to the optimal size of audit fines.
Source: FRC and the audit firms’ annual reports for fiscal year 2017.
How do cartel fines compare? Weighted by the number of fines falling into each percentage bracket of turnover, the average European Commission cartel fine is 2.40% of turnover. This means that cartel fines expressed as a percentage of global turnover are about 150 times larger (2.40% divided by 0.016%) than FRC audit fines measured in the same way. Table 2 shows the calculation of the weighted average European Commission cartel fine.[36]
Table 2 European Commission weighted average cartel fines as a percentage of a company’s global turnover
Source: European Commission cartel statistics, last updated 21 March 2018.
It might be argued that increased deterrence for poor audit would come at the cost of competition, such as financial penalties leading to market exit and a ‘Big Three’, or hiking the barriers to entry for non-Big Four audit firms. Likewise, the Commission does not wish to fine a cartel with penalties that are so high that the consequence would be a reduction in the number of market competitors (or else the competition remedy would be self-defeating). Hence the scaling of cartel fines to turnover, and the ‘inability to pay’ test, whereby the Commission can reduce the scale of fines where it is shown that they pose a serious threat to the economic viability of the undertaking concerned. Scaling audit fines to audit firm turnover makes it unlikely that such penalties would deter entry or cause the market exit of one of the Big Four. The cartel fines policy therefore has useful principles, albeit it does not indicate the right order of magnitude for audit fines.
Fines set as a percentage of turnover would of course decline if measured against a smaller metric for revenue. As a hypothetical exercise, taking Big Four audit-only revenues as the denominator, the FRC fines mentioned previously would be on average 0.039% of the firms’ global audit-only revenues. In this scenario cartel fines at 2.40% of global turnover would be about 60 times greater than the FRC recent audit fines (2.40% divided by 0.039%), and a hypothetical fine of 0.5% of audit fines would amount to between £45m and £60m. The latter figures are much closer to the penalties proposed in last year’s independent review of FRC sanctions—i.e. ‘£10 million or more (before any discount)’. Note also that the independent review recommended that ‘the figure could be well above [£10m] if dishonesty or conscious wrongdoing were involved.’[37]
Evidence on the deterrence effect of cartel fines can be found in the economics literature. Professor Stephen Davies at the ESRC Centre for Competition Policy estimates that cartel deterrence is highly effective:
On the most conservative of our estimates, more than half of all potential cartel harm never occurs, because it is deterred. This is very much a lower bound, and the proportion could be as high as 90%.[38]
Similar research would be required to understand the effects of a different penalty regime for poor audit.
Break-up or shake-up?
There is little doubt that a new CMA investigation would consider a break-up remedy. However, no matter what the divestments and structural changes, the inherent tension within the industry’s ‘client pays’ business model is likely to remain—that is, an auditor’s basic conflict between serving the paying client and serving the greater good.
If it were to address that conflict, the CMA would need to look into penalties and deterrence, as well as studying the effects of a break-up remedy. It is not realistic to expect the CMA to be able to fix every major issue in the market by achieving the goal of reduced concentration in FTSE 350 audit.
The quality of audit might be improved with a more disaggregated market, but this link is not certain. Moreover, it is possible that greater deterrence for bad audit would lead to an organic change in market structure: the Big Four have expertise in advising clients as to when a substantial divestment or restructuring might increase shareholder value. It seems possible that, in a world of greater deterrence, the accounting firms might look inwards using this expertise and shake up the market structure themselves.
Possibly the Big Four firms are already thinking along these lines. According to a letter from the two MPs who led the parliamentary review on Carillion, voluntary break-up scenarios are now under active consideration:
Since our report was published, Bill Michael, Chairman KPMG UK, said his firm had been thinking about break-up scenarios ‘for some time’ as the current business model of the Big Four is ‘unsustainable’. Mr Michael is quoted as saying:
‘The profession, like it or not, is an oligopoly. You can’t be all things to all men and women forever. We have to reduce the level of conflicts and demonstrate why they are manageable and why the public and all stakeholders should trust us.’
Other Big four firms have reportedly begun making preparations for a break-up.[39]
Finally, the example of cartel fines shows that they are of a different scale to audit fines, raising the question as to whether fines should be reconsidered in the audit market. Penalties for anticompetitive conduct are used for prevention, not retribution. An audit firm with consistent high quality would have a minimal incidence of fines, which would place the high-quality firm at a competitive advantage to an audit firm with lower quality.[40] If audit quality became high across the market, no firm would be faced with very substantial financial penalties, and investor perceptions as to the value of statutory audit might be restored. In summary: prevention is better than cure.
[23] Peter Kyle, Member of the Business, Energy and Industrial Strategy Committee, speaking at the pre-appointment hearing with the Government’s preferred candidate for Chair of the Competition and Markets Authority, HC 985, 24 April 2018. See Transcript of oral evidence, Question 34, p. 19.
[36] The European Commission statistics provide the percentages of fines imposed on undertakings per cartel infringement. Certain cases may comprise several infringements for which multiple counting of undertakings is considered.
You can find out more about which cookies we are using or switch them off in settings.
Privacy Overview
This website uses cookies so that we can provide you with the best user experience possible. Cookie information is stored in your browser and performs functions such as recognising you when you return to our website and helping our team to understand which sections of the website you find most interesting and useful.
Strictly Necessary Cookies
Strictly Necessary Cookie should be enabled at all times so that we can save your preferences for cookie settings.
disable
If you disable this cookie, we will not be able to save your preferences. This means that every time you visit this website you will need to enable or disable cookies again.
3rd Party Cookies
This website uses Google Analytics to collect anonymous information such as the number of visitors to the site, and the most popular pages.
Keeping this cookie enabled helps us to improve our website.
disable
Please enable Strictly Necessary Cookies first so that we can save your preferences!
|
|
Your inner Chimp can be your best friend or your worst enemy...this is the Chimp Paradox
Do you sabotage your own happiness and success? Are you struggling to make sense of yourself? Do your emotions sometimes dictate your life?
Dr. Steve Peters explains that we all have a being within our minds that can wreak havoc on every aspect of our lives—be it business or personal. He calls this being "the chimp," and it can work either for you or against you. The challenge comes when we try to tame the chimp, and persuade it to do our bidding.
The Chimp Paradox contains an incredibly powerful mind management model that can help you be happier and healthier, increase your confidence, and become a more successful person. This book will help you to:
—Recognize how your mind is working
—Understand and manage your emotions and thoughts
—Manage yourself and become the person you would like to be
Dr. Peters explains the struggle that takes place within your mind and then shows you how to apply this understanding. Once you're armed with this new knowledge, you will be able to utilize your chimp for good, rather than letting your chimp run rampant with its own agenda.
|
|
75 Ill. App.2d 144 (1966)
220 N.E.2d 590
Decatur and Macon County Hospital Association, a Corporation Not For Profit of Illinois, for the Use of Niagara Fire Insurance Company, Phoenix Assurance Company, Standard Fire Insurance Company, Rochester American Insurance Company, American Insurance Company, United States Fire Insurance Company, Hartford Fire Insurance Company, and Merchants Fire Assurance Corporation, Plaintiff-Appellee,
v.
Erie City Iron Works, a Foreign Corporation, T.A. Brinkoetter & Sons, Inc., a Foreign Corporation, and Illinois Power Company, an Illinois Corporation, Defendants, Erie City Iron Works, a Foreign Corporation, Defendant-Appellant.
Gen. No. 10,679.
Illinois Appellate Court Fourth District.
September 26, 1966.
Rehearing denied October 24, 1966.
*145 *146 Earl S. Hodges, of Springfield, and Greanias & Owen, of Decatur (Marshall A. Susler, of counsel), for appellant.
Giffin, Winning, Lindner & Newkirk, of Springfield (James M. Drake, of counsel), for appellee.
TRAPP, P.J.
Defendant Erie City Iron Works, hereinafter designated Erie, appeals from a judgment in the sum of $30,818.50 entered in favor of the plaintiff upon the verdict of a jury against Erie and T.A. Brinkoetter & Sons, Inc. Other disposition has been made as to the case against the latter and we consider only the appeal of Erie.
Plaintiff's action was for property damage in the approximate amount of the judgment incurred as the result of the explosion of a gas-fired boiler manufactured by Erie and installed by Brinkoetter. At the time of the explosion installation had just been completed and was at the stage of the initial start-up and adjustment of the boiler. Title to it had not yet passed to the plaintiff.
The defendant's theory is that defendant was not guilty of the negligence that was the proximate cause of plaintiff's damages; that the court should have directed a verdict in favor of this defendant, or granted defendant's post-trial motion for judgment notwithstanding the verdict of the jury or, in the alternative, should have granted defendant a new trial of the issues, because of error committed by the court in submitting, to the jury, both Count I and Count II of plaintiff's complaint, which respectively were predicated upon a res ipsa loquitur theory and specific negligence theory; that there was error by the court in denying defendant's motion for mistrial because of prejudicial conduct of counsel; that conduct of *147 a juror was prejudicial to defendant; and that there was error by the court in giving certain instructions to the jury; and other errors hereinafter discussed.
Plaintiff purchased the boiler as a "package" boiler fabricated by Erie at its plant and shipped assembled for installation as a complete unit with automatic firing controls built on.
The fire control unit and the main motorized valve were not manufactured by Erie but were purchased by it and affixed to the fabricated boiler. The Brinkoetter contract called for it to install the boiler and connect it to the line bringing gas into the building.
In making the installation, Brinkoetter did not install what has been called a "dirt leg," i.e., a trap consisting of a length of pipe extending beyond the point where a vertical gas line is turned so that it travels horizontally. Its function is to catch condensed moisture and debris in the gas line. Plaintiff had retained consulting engineers to design and supervise installation of the boiler. The schematic drawing provided by the engineer did not show a "dirt leg." The latter testified that the contractor should install a "dirt leg" whether drawn in the plans or not. Officers of Brinkoetter say that it puts in dirt legs when the plans call for them, otherwise it does not.
Neither the fabricated boiler nor the connecting line, as installed, included a "strainer," which is described as a distinctive appearing section of pipe containing a screen, the function of which is to catch debris which might be carried through the line by the flow of gas. When used, it is installed in the line ahead of the valves and controls. A brochure of the valve manufacturer recommended that a strainer be placed ahead of the main valve. Such a strainer was not included in the unit fabricated by Erie. The consulting engineer's schematic drawing did not include a strainer. He testified that he would have included it if he had known that a strainer was recommended. An officer of Brinkoetter testified that he had never heard *148 of a strainer in a gas line. In behalf of the latter, its foreman and employes testified that as the gas line was being installed, steps were taken to knock loose the scale and clean the connecting pipe. It appears that the installation was nearly completed when the contractor was advised by the gas company foreman that it would be necessary to install a regulator, i.e., a device which lowered the pressure from the 35-pound pressure in the main to some 10 pounds as specified by the boiler. A used regulator was available at the hospital and was installed. At first it did not function, but after some adjustment was observed to be reducing the pressure. It was not tested after the explosion. In installing the regulator at this time, it was necessary to cut the gas line with a torch and weld on a section of pipe. It does not appear what, if anything, was done to inspect for and remove debris in the pipe following this operation. There is some conflict in the evidence as to whether or not welding slag would enter the pipe by reason of this work.
Under the terms of its contract with Erie, plaintiff elected to have the services of a start-up engineer. Upon notification of the completion of the installation such engineer, one Enders, was sent by Erie. The explosion in issue occurred at 11:40 a.m. on Thursday, September 25, 1958. In summary, it appears that Enders had arrived on the preceding Tuesday, that the boiler was started up and fired for some 20 hours and then shut down, and that on the morning of the 25th it had been started up and fired for some 2 hours preceding the explosion. Enders died following the explosion, apparently as the result of injuries sustained.
With regard to the things done during this period, one Binns, a member of the hospital maintenance staff, testified that Enders started the boiler operation, handled the controls and made adjustments, and that immediately prior to the explosion Enders was making an adjustment of the water level in the boiler. Charles Fearn, foreman *149 of the gas distribution crew of the utility company which was working on the exterior gas line, testified that he had been in the boiler room during the morning and Enders had told him that the boiler was on low fire or "no load" firing, and that he was going to test the boiler on high fire, asking Fearn to time the meter outside so that there could be a measurement of the cubic feet of gas entering the boiler on high fire. No specific arrangement was made as to when this would be done.
Following the explosion, a State boiler inspector, and representatives of the interested parties, together with engineers and experts retained by them, assembled at the scene to examine the boiler which had been kept undisturbed. Several of them testified that they had noticed the absence of the dirt leg and the screen in the gas line connected to the boiler. The main valve was examined as to its external indicator and the testimony varies from the statement that it was apparently closed, through slightly open to one-third open. The boiler inspector testified that he assumed that it was open. It does not appear that any organized procedure was followed so that each expert present observed all of the matters testified to.
The main valve was then disassembled. Most witnesses testified to observing some scale and several pieces of welding slag on both the upstream and downstream sides of the valve.
There is testimony that upon examination of the several parts of the valve, a resilient neoprene seal was observed to be indented and that the stainless steel seat of the valve was scored to a depth of 1/16th of an inch or so, the width of the indentation being that of a blade of a table knife. There is other testimony that the seat bore only normal scratches. It does not appear that tests were made to determine whether the indentations on the neoprene seal coincided with the scoring of the valve seat. At the trial the neoprene seal no longer bore any indentation. *150 This was explained as being due to the resilient nature of the substance. The steel valve seat was not produced at the trial.
The consensus of the testimony is that there was a gas explosion followed by an explosion of the boiler itself. The opinion testimony is that the first explosion resulted from the ignition of a surplus of gas within the combustion chamber, which gas was somehow ignited. Paul Wilson, an employe of Erie in charge of their service department, testified that he did not believe it possible to find the actual cause of the majority of explosion cases, and George Harper, a professor of engineering at the University of Illinois, testified that in such an explosion things are so disrupted that it cannot be ascertained with certainty what happened, but that it was necessary to draw deductions.
From the record it appears that a variety of factors inducing the explosion may have existed. There is, of course, the contradictory nature of the testimony as to whether or not the motorized main valve was closed or open, whether or not slag from welding had lodged in the main valve so that it was not completely closed, and whether such slag would be sufficient to hold the valve open with the pressures concerned without distorting the valve stem, which apparently was in normal condition.
There is testimony by Ted Brinkoetter that the control system, upon being tested, did not always work, but there is also testimony that it functioned correctly upon tests. Harry Reynolds, an investigating engineer retained by the plaintiff, testified that it would take a very small amount of gas to cause an explosion in this boiler, and that it was particularly hazardous to operate the boiler on a "no load" basis as the mixture of air and gas gets out of balance and becomes explosive. He also testified that upon initial examination, the oil burning switch was on instead of the gas burning switch. A witness, testifying in behalf of Brinkoetter, stated that shortly before the explosion, *151 Enders flipped a switch and that the flame in the boiler went out and did not come on again.
It is one of defendant's arguments that by this contract it was to furnish a package boiler but had no responsibility for its installation. This position was taken in its first motion to the complaint and is argued here.
The nature of defendant's disclaimer seems to be based upon its Exhibit #1 contained in a foreword to the instruction manual which Erie shipped with the boiler. A relevant part includes the following:
"When the service of an Erie City Iron Works Engineer is provided for the customer, it is for the purpose of aiding in the training of the customer's personnel and not to replace them or assume any of their duties. It should be understood that the responsibility for operation rests solely with the customer's operators and the Erie City Iron Works assumes no responsibility for the customer's operators' failure to properly perform their respective duties, and the presence of an Erie City Iron Works Engineer at the customer's plant in no way relieves the customer's personnel of any of their responsibilities."
The following also appears in slightly varying form in several places in the contract for the purchase of the boiler:
"With respect to all preliminary operations, initial start-up, demonstration of capacity and performance guarantees, representatives of the Company are authorized only to advise and consult with the Purchaser or its representatives and no representative of the Company is licensed to operate the equipment. In the event the Purchaser shall operate the equipment specified hereunder prior to final acceptance, the Purchaser shall indemnify and save harmless the Company against any loss or expense and against any liability imposed upon the Company, resulting *152 from the operation of such equipment by the Purchaser prior to final acceptance, except any such loss, expense or liability for injury or damage resulting from the negligent acts or omissions of the Company or its agents or employees." (Emphasis supplied).
It appears from the testimony that the package boiler is not operational upon delivery but requires adjustment to make it perform properly. Paul Wilson, who is in charge of field service for defendant, testified that the linkage of the butterfly valve regulating the ratio of air and gas must be adjusted and that the damper linkage must be "positioned." He testified that the service engineer never operates the boiler but that it is the obligation of the purchaser to make such adjustments according to the engineer's instructions. He testified that it was the service engineer's duty to make a visual check of the gas line installed, check the controls and firing equipment, consult and assist placing the boiler in service, instruct in operating the boiler and its controls and assist in making the final adjustments.
Brewster, a witness for Brinkoetter, testified that Enders examined the pipeline but made no suggestions for changes in the work as installed, and the record is that Enders did, in fact, start-up and fire the boiler, make adjustments, and made or had arranged to make the tests, including the testing of its capacity on the high fire. Binns, an employe of the hospital, testified that no one other than Enders handled or adjusted the controls. The manual submitted by Erie contains a section A designated "Preparing the boiler for service Inspection of unit." Section A-1 states that prior to placing equipment in service a complete inspection should be made to determine its condition and continues:
"In case of newly constructed power equipment, this inspection should insure that the unit has been correctly completed."
*153 Section A-2 is as follows:
"Responsibility for the completion of construction normally rests with the customer's construction engineer working in conjunction with the manufacturer's erection or service engineer. At completion of construction work, an inspection should be made in the presence of the customer's construction engineer, operating engineer, the construction superintendent and the manufacturer's engineer (if one is present) and agreement reached that the equipment is in a satisfactory condition for placing into service."
There is no evidence that such inspection or agreement was reached or called for by defendant's service engineer.
As to the contention that by contract Erie had no responsibility, claimed under its Exhibit #1, the "foreword" to the instruction manual and the several provisions set out in the contract should not control under these circumstances. The effect of these documents might be that Erie could not be required to perform the tests and effect the start-up of the boiler, but they should not control liability where under the evidence it might be reasonable to conclude that they did, in fact, undertake and perform the work. The contract provision quoted does not attempt to exclude negligence of Erie employes.
Erie discusses Count I of the complaint as involving the principles of res ipsa loquitur under a pleading of general negligence. These principles are thoroughly discussed in Metz v. Central Illinois Electric & Gas Co., 32 Ill.2d 446, 207 NE2d 305, and need not be reiterated.
[1] Erie urges that the inference of negligence under Count I should not be allowed because the boiler was not under its exclusive control. The defendant points out that the evidence discloses that Enders, Brewster, an employe of Brinkoetter, Binns, an employe of the hospital, and Robert Brinkoetter were all present at the time of the explosion. The evidence has been examined to determine *154 what, if anything, these individuals were doing to exercise control of the unit. We cannot say that it is contrary to the manifest weight of the evidence for the jury to conclude that Erie's man Enders was, in fact, in control of the proceedings incident to the start-up and testing of the boiler. There is no evidence that any person other than Enders participated in any phase of the work.
In May v. Columbian Rope Co., 40 Ill. App.2d 264, 189 NE2d 394, the complaint alleged the purchase and delivery of a new rope which broke shortly after placing the rope into use. There was judgment n.o.v. entered by the trial court. The Appellate Court reversed, holding that the inference of negligence under the theory of res ipsa loquitur was properly applicable. As to that defendant's contention that it was not in control of the rope at the time of the injury, the court said:
"Decisions from other states and recent cases here reject this inflexible application of a rule of control and hold that a defendant in a res ipsa loquitur case cannot automatically defeat an allegation of negligence with a bare showing that, before harm struck, it had parted with control of the harmful instrumentality. (Prosser, Torts 206 (2d ed 1955).)
"The demonstrable trend of these authorities is to determine from the nature of the defective instrumentality and the surrounding circumstances whether the inference of the defendant's negligence is strong enough to survive the fact that, between the defendant's control and the plaintiff's injury, another possession intervened."
The court continued to say that it was for the determination of the jury as to whether the permissive inference of negligence arising from the facts was to prevail over defendant's countervailing proof of due care.
As stated in Prosser, Law of Torts, 2d ed 1955, p 206, chap 7, § 42, the word "control" may be the wrong word. It is said:
*155 "Some courts have said that it is enough that the defendant was in exclusive control at the time of the indicated negligence. It would be far better, and much confusion would be avoided, if the idea of `control' were discarded altogether, and we were to say merely that the apparent cause of the accident must be such that the defendant would be responsible for any negligence connected with it."
In Schroeder v. City & County Sav. Bank of Albany, 293 NY 370, 57 NE2d 57, the defendants were several contractors and the owner of a building under repair. The court noted:
"It is not necessary for the applicability of the res ipsa loquitur doctrine that there be but a single person in control of that which caused the damage."
Amongst other cases defendant relies upon Kirchner v. Kuhlman, 334 Ill. App. 339, 79 NE2d 628. There defendant's employes were working on plaintiff's premises but we find no evidence that these defendants had control of the trash container belonging to the plaintiff in which the fire started. Again, in Krump v. Highlander Ice Cream Co., 30 Ill. App.2d 103, 173 NE2d 822, the collision of two automobiles caused one of them to strike and damage plaintiff's building. While the court said that the doctrine of res ipsa loquitur did not apply, it did hold that there was a presumption of negligence where an accident occurred which would not ordinarily occur if due care had been taken, and that it was proper to call upon the defendants to exculpate themselves. The distinction between this conclusion and the theory of res ipsa loquitur appears slight.
[2] Defendant argues that Count I of the complaint alleged general negligence stating a cause of action upon the theory of res ipsa loquitur, while Count II alleges certain acts of specific negligence, and that under the authorities in this State the inference of negligence which *156 arises under res ipsa loquitur, "vanishes" upon the introduction of evidence of specific negligence. Amongst the authorities cited are Bollenbach v. Bloomenthal, 341 Ill. 539, 173 NE 670. This rule has been categorically overruled by our Supreme Court in Metz v. Central Illinois Electric & Gas Co., 32 Ill.2d 446, 207 NE2d 305. In that case the complaint charged general negligence in one count employing the theory of res ipsa loquitur, and in a second count alleged specific negligence. At the close of the evidence plaintiff was required to, or did elect, to rely upon the charge of negligence and the theory of res ipsa loquitur. The verdict for the plaintiff was reversed in the Appellate Court on the theory that res ipsa loquitur did not apply as other parties had access to the area of the gas main. In reversing the Appellate Court, the Supreme Court remarked upon the conflict amongst the Illinois decisions. We may note that many of these decisions are in broad language open to a variety of interpretations, and frequently they do not indicate the reason for the decision. In Metz the Supreme Court concluded that the more studied, more just view is that the inference of negligence does not vanish when contrary evidence appears, but that it remains to be considered and weighed by the jury against the direct evidence offered by the party charged, citing Cobb v. Marshall Field & Co., 22 Ill. App.2d 143, 159 NE2d 520; Illinois Pattern Jury Instruction, 22.01 with comment on pages 128, 129; Prosser, 20 Minn L Rev, 241. See also O'Hara v. Central Illinois Light Co., 319 Ill. App. 336, 49 NE2d 274; May v. Columbian Rope Co., 40 Ill. App.2d 264, 189 NE2d 394.
[3] Defendant's contention that plaintiff should have been required to elect as between the counts is controlled by the rule of Metz. Defendant's authorities are Wm. Wrigley, Jr. Co. v. Standard Roofing Co., 325 Ill. App. 210, 59 NE2d 510; and Simmons v. South Shore Hospital, 340 Ill. App. 153, 91 NE2d 135. In the former case the Appellate Court undertook to specify what may be described *157 as the requirements that plaintiff elect between the general negligence count and the count for specific negligence. The only cited authority for such procedure was Bollenbach v. Bloomenthal and its rule that the inference of negligence vanished upon the introduction of evidence of specific negligence. By reason of the Metz decision, this reason for such rule no longer exists. Simmons v. South Shore Hospital, as well as Jackson v. 919 Corp., 344 Ill. App. 519, 101 NE2d 594, simply relied upon the rule of Wrigley as authority without discussing it.
There is, in fact, persuasive opinion contrary to the contention of Erie regarding the theory of election in Erckman v. Northern Illinois Gas Co., 61 Ill. App.2d 137, 210 NE2d 42. There premises were damaged by an explosion of gas leaking from the company lines. The complaint alleged only specific negligence and there was some evidence of a failure of periodic inspection. The trial court gave an instruction authorizing the jury to apply, or employ, the inference of negligence under res ipsa loquitur. The Appellate Court reversed since there was no pleading of general negligence, but stated that upon a new trial the complaint should be amended to include such an allegation. The court there said:
"An inference of general negligence arising from the doctrine of res ipsa loquitur is not necessarily inconsistent with proof of specific negligence. To hold that proof of specific negligence precludes the application of the res ipsa doctrine could lead to the absurd result of weak proof of specific negligence voiding a strong inference of general negligence.... If there is an inference of general negligence and proof of specific negligence, but reasonable men may differ as to the effect of this evidence, it should then be for a jury to determine under which theory, if any, the plaintiff should prevail. McCormick v. Kopmann, 23 Ill.2d 189, 205, 161 NE2d 720 (3rd Dist 1959)."
*158 [4] The Illinois courts recognize that the doctrine of res ipsa loquitur is but one form of circumstantial evidence. May v. Columbian Rope Co., 40 Ill. App.2d 264, 189 NE2d 394.
It has been suggested that the doctrine that requires election assumes that the inference arising through res ipsa loquitur must be an alternative to direct proof rather than a type of circumstantial evidence to be weighed with other evidence, and it has been criticised as an assumption that the pleader must be totally ignorant of the facts. 2 ALR3d 1335, at 1340. There is reason in the hypothesis that there should not be a penalty imposed upon the pleader for placing before the court all facts known to him. 27 Fordham L Rev, 411-415; Foster v. Union Starch & Refining Co., 11 Ill. App.2d 346, 137 NE2d 499. This is particularly true when an allegation notifies the defendant of the intent to rely upon the inference of negligence arising under the doctrine of res ipsa loquitur. It is the policy under the rule of Metz v. Central Illinois Electric & Gas Co., 32 Ill.2d 446, 207 NE2d 305, that once the inference of negligence arises through allegations of general negligence, it remains for the consideration of the jury, unless and until the precise cause of the injury is established. 27 Fordham L Rev 411. In Prosser, Law of Torts, 2d ed, chap 7, § 43, p 214, it is suggested:
"It is quite generally agreed that the introduction of evidence which does not purport to furnish a complete explanation of the occurrence does not deprive the plaintiff of res ipsa loquitur."
In Cassady v. Old Colony St. Ry. Co., 184 Mass. 156, 68 NE 10, at p 12, the court said:
"The defendant also contends that, even if originally the doctrine would have been applicable, the plaintiff had lost or waived her rights under that doctrine, because, instead of resting her case solely upon it, she undertook to go further, and show particularly *159 the cause of the accident. This position is not tenable. It is true that, where the evidence shows the precise cause of the accident, (citing authorities), there is, of course, no room for the application of the doctrine of presumption. The real cause being shown, there is no occasion to inquire as to what the presumption would have been as to it if it had not been shown. But if, at the close of the evidence, the cause does not clearly appear, or if there is a dispute as to what it is, then it is open to the plaintiff to argue upon the whole evidence, and the jury are justified in relying upon presumptions, unless they are satisfied that the cause has been shown to be inconsistent with it. An unsuccessful attempt to prove by direct evidence the precise cause does not estop the plaintiff from relying upon the presumptions applicable to it."
We believe that this position was approached in Krueger v. Richardson, 326 Ill. App. 205, 61 NE2d 399, when the court noted that the plaintiff was not required to prove the specific acts of negligence as alleged, but they had a right to rely upon the proof and its reasonable inferences to establish a prima facie case of general negligence.
In this case it seems proper to say that reasonable men might differ as to the effect of the evidence heard by the jury. Expert witnesses would not even undertake to announce an hypothesis, but rather advised of the virtual impossibility of reaching a specific determination of what caused the explosion. This situation here appears to be precisely that contemplated in the language of Erckman v. Northern Illinois Gas Company.
[5] In its reply brief Erie contends that the doctrine cannot be followed because there are multiple defendants. No Illinois cases seem applicable as precedent. In Schroeder v. City & County Sav. Bank of Albany, 293 NY 370, 57 NE2d 57, it was held error to dismiss a complaint seeking to apply res ipsa loquitur as against three defendants. *160 See also Burr v. Sherwin-Williams Co. (Cal App), 258 P.2d 58, 38 ALR2d 905 et seq. Again in Zichler v. St. Louis Public Service Co., 332 Mo 902, 59 S.W.2d 654, general negligence was pleaded against the service company while specific negligence was pleaded as to another defendant who was found not guilty by the jury. It was contended that it was improper to permit the res ipsa loquitur inference to be applied to one joint tort feasor, but not the other. Pointing out that the rule was one of evidence rather than pleading, the court said:
"A plaintiff should not be compelled to confine his action to one joint-feasor only in order to be accorded the rights which the law gives to him."
It being the policy under the rule of Metz that the inference of negligence is to be weighed by the jury with other evidence, we see no reason why the benefit of such rule should be denied to the plaintiff where under the events at issue, more than one party may be the source of injury to the plaintiff for otherwise he would be limited in the use of, or be completely denied the benefit of the rule. In Metz the Supreme Court said that whether the doctrine applies in a given case is a question of law for the trial court. We believe that these conclusions dispose of the contentions of Erie that the court erred in refusing to strike par 8 to Count I.
Defendant contends that the case must be remanded for error in the giving of instructions. His objection to plaintiff's instruction #20 is that it permits the jury to consider the case upon the theory of res ipsa loquitur, as well as upon the allegations of specific negligence. The matters hereinabove discussed dispose of this contention.
[6] There is objection to Brinkoetter's instruction #6 which may be summarized as an issues instruction relating to negligence alleged as to Erie and as to the defendant Brinkoetter. It is contended that as to Erie there is no evidence in the record as to certain matters *161 stated in the instruction to be alleged in the complaint. The Abstract discloses that at the conference on instructions Erie simply made the objection that the evidence did not support all of the charges. This does not meet the rule that specific objections to instructions must be made at the conference on instructions. Vasic v. Chicago Transit Authority, 33 Ill. App.2d 11, 180 NE2d 347. The court's comment indicates that he believed that those matters not supported by the evidence had been omitted from the instruction. Under such circumstances we do not believe that there is reversible error.
[7] Erie urges that the cause must be reversed and remanded by reason of the fact that a juror on voir dire indicated that he was not interested in any lawsuits then pending in court, but that subsequent to the trial, counsel discovered that he had been defendant in a lawsuit and was, at the time of trial, a plaintiff in a pending cause. Erie does not contend that it was, in fact, prejudiced by the juror sitting upon the panel, but says that the prejudicial effect cannot be calculated. It indicates that it could have challenged the juror, though it is not claimed that it would have done so. In Department of Public Works & Buildings v. Christensen, 25 Ill.2d 273, 184 NE2d 884, it was alleged that the party would not have accepted the juror if a true answer had been given. The Supreme Court there held that the motion for a new trial would be denied unless it was shown not only that the juror answered falsely, but also that prejudice resulted. Erie cites the case of People v. Ortiz, 320 Ill. 205, 150 NE 708, which may be distinguished because in that case the juror had actually expressed hostility to the defendant which he had concealed.
[8] Erie urges that the judgment must be reversed because of a reference to insurance introduced during cross-examination in behalf of the defendant Brinkoetter. One George Harper testified in behalf of the plaintiff as an expert witness who had examined the boiler following *162 the explosion. It appears that he had originally been requested to make the examination by a representative of the company insuring Erie. The name of the insurance company was given in answer to a question to whom he had delivered his report. The trial court sustained an objection to a question as to what party was covered and an objection as to whether the insurance company represented Erie. The trial court, while indicating disapproval of counsel's action, denied the motion for a mistrial.
It is clear that plaintiff did not, in any way, precipitate this issue. Under the circumstances of this case, the proceedings clearly indicated to the jury that certain insurance companies were to be the beneficiaries of a judgment for plaintiff. This fact would seem to indicate little probability of prejudice as between insurance companies upon the issue of liability. Edwards v. Hill-Thomas Lime Co., 378 Ill. 180, 37 NE2d 801.
Upon the possibility of prejudice regarding the issue of damages, the amount of the verdict is slightly less than the amount paid by plaintiff to Erie for the boiler. Insofar as counsel may have attempted to create prejudice as between the parties defendant, the verdict of the jury is joint and they seem to make no distinction. Under the circumstances of this case, we conclude that there was no abuse of discretion by the trial court in refusing to grant a mistrial. Isenhart v. Seibert, 6 Ill. App.2d 220, 127 NE2d 469.
[9] Upon consideration of the issues of law, we conclude that the trial court did not err in refusing to direct a verdict or enter a judgment n.o.v. upon the several motions made by Erie, and that, from an examination of the evidence, the verdict of the jury is not contrary to the manifest weights of the evidence.
Taken with the case was plaintiff's motion to dismiss as a "use plaintiff" the Niagara Fire Insurance Company. The effect of such dismissal is to reduce the amount of *163 the judgment in the sum of $4,873.05. The motion is allowed and the judgment ordered reduced in said amount.
The judgment of the trial court is affirmed, but the cause is remanded with directions to enter judgment in the amount due by reason of the dismissal of the party plaintiff pursuant to motion.
Affirmed as modified.
SMITH and CRAVEN, JJ., concur.
|
|
Comparison of patient satisfaction with acrylic and flexible partial dentures.
Restoration of partial edentulous mouth may be done using a variety of treatment options. Removable partial denture (RPD) is commonly used because of its availability. RPDs from flexible resins unlike those from acrylic engage hard and soft tissue undercuts and feel more comfortable in the mouth. The aim of the study was to compare satisfaction with dentures made from these two types of materials. It was a quasi-experimental study among thirty patients at the Prosthetics Clinic, University College Hospital, Ibadan. Patients aged 16 years or more, requiring RPDs with one to three missing teeth in the anterior region of either the upper or lower arch participated. A modified semi-structured interviewer-administered questionnaire was used to collect data on sociodemographics and oral health variables. The level of satisfaction was assessed using a visual analogue scale. Data were analysed using descriptive and multivariate statistics at a significance level of P < 0.05. The participants' ages ranged between 16 and 51 years, mean age was 33.8 ± 10.01 years. Male: female ratio was 1:1 and mean duration of edentulousness was 11.37 ± 10.52 years (median - 9.50). Most 28 (93.3%) subjects delayed replacement of their missing teeth; reasons were indifference 13 (43.4%), financial constraint 10 (33.3%), ignorance 4 (13.3%) and fear of aspiration 1 (3.3%). Overall, 21 (70.0%) participants were more satisfied with the flexible dentures, 6 (20.0%) with acrylic dentures while 3 (10.0%) were equally satisfied with both types of dentures (P = 0.04). Subjects were more satisfied with the flexible RPD than the acrylic resin RPD.
|
|
ZURB Tavern - jacobwg
http://zurb.com/tavern
======
pepsi
By the name, I thought that this was going to be a MUD.
|
|
Rigid stretchers for transporting injured patients are well known. Certain known rigid stretchers are partially collapsible. These stretchers include one or more rigid support panels or beams. Because of the rigid panels or beams, these stretchers can be relatively heavy and cumbersome when handled by emergency personnel during rescue operations, and these stretchers can occupy a relatively significant amount of space in vehicles and other storage areas. Also, these known stretchers do not include a patient covering which aids in the protection of emergency personnel from hazardous body fluids from the patient and which guards the front of patient's body during transport.
One known rescue bag has been developed for keeping injured people warm while they are lying on stretchers. Though this rescue bag covers part of the patient's body, it is merely an accessory to a stretcher. Accordingly, one of the disadvantages of this rescue bag is that it does not function as a patient carrier. The emergency personnel must use a stretcher in conjunction with this rescue bag in order to pick-up, carry and transport an injured person to a desired location. In addition, such a rescue bag does not have medical treatment openings which provide emergency personnel with relatively quick access to select portions of the person's body, for example, to deliver essential treatments, such as IV solutions, heart defibrillation and the like.
Therefore, there is a need to overcome the foregoing disadvantages and to provide improvements to patient transporters.
|
|
Randomised trial comparing forced-air warming to the upper or lower body to prevent hypothermia during thoracoscopic surgery in the lateral decubitus position.
In the supine position, forced-air warming is more effective on the lower body than on the upper body to prevent intraoperative hypothermia. However, it is unknown in the lateral decubitus position. We thus compared forced-air warming on the upper and lower bodies in the lateral position. Patients (n=123) were randomised to receive forced-air warming on the upper body or lower body during thoracoscopic surgery in the lateral position. We measured the nasopharyngeal temperature at 0, 30, 60, 90, and 120 min after lateral positioning during surgery and the infrared tympanic membrane temperature at 0, 30, 60, 90, and 120 min after surgery. Patients received both upper and lower body warming at a temperature of <35.5°C. The primary outcome was the incidence of intraoperative hypothermia with a temperature of <36.0°C. Intraoperative hypothermia was less frequent with the upper body warming than with the lower body warming {21/62 vs 35/61, risk ratio [95% confidence interval (CI)] 0.6 (0.4-0.9), P=0.011}. The intraoperative temperature was higher with the upper body warming than with the lower body warming at 30 (P=0.002), 60 (P<0.001), and 90 (P<0.001) min after lateral positioning, and the postoperative temperature was higher at 0 (P<0.001) and 30 (P=0.001) min after surgery. Fewer patients received both upper and lower body warming in the upper body warming group than in the lower body warming group during surgery (1 vs 7, P=0.032). Forced-air warming was more effective on the upper body than on the lower body to prevent hypothermia during thoracoscopic surgery in the lateral decubitus position. NCT02993666.
|
|
+ 73 = -0*m - 7*m + 5*m for m.
4
Solve 0 = -6*x - 27*p + 276, 73*p - 72*p = 4*x - 146 for x.
37
Solve 6*n + 2828 = 5*z + 3287, -2*z - n = 187 for z.
-93
Solve 14*b = -b + h - 58, 5*b + 40 = -5*h + 10 for b.
-4
Solve 9*d - 537 = 3*l, -d = 5*l - 1560 + 1511 for l.
-2
Solve 4*h + 2*t + 0*t - 4*t = -0*t - 38, -43*t = -46*t - 3 for h.
-10
Solve 5*l - 10*f - 2976 + 3071 = 0, -2*l - 3*f = -11 for l.
-5
Solve -3*s - 217*i - 22 = -215*i - 4, -10*s - 49*i - 60 = -51*i for s.
-6
Solve 18*a + 5*q = 23*a + 30, 0 = -7*a + q - 36 - 18 for a.
-8
Solve 3*i - 6*i - q = -6, 6*i + 52*q = 10*q + 38 + 94 for i.
1
Solve -3*h - 7001 + 7102 = -o, 3*o - 5*h + 194 = -89 for o.
-86
Solve 4*r = 7*r - 132, -355*q - 5*r + 98 = -351*q - 122 for q.
0
Solve -4*n - 195 = -3*n + 5*i, -129 = 697*n - 694*n + 3*i for n.
-5
Solve 5*d - 27 = 4*r, r - 10*r + 10*d - 39 = 6*d for r.
-3
Solve -3*s - 29*a + 10*a = -17*a + 28, 0 = -2*s - 5*a - 26 for s.
-8
Solve 5*g + 0*g + j - 2 = -17, -70*g + 68*g = 2*j + 6 for g.
-3
Solve -o + 4402*r + 6 = 4401*r, -30 = -5*o + 6*r for o.
6
Solve -93 = -u + 3404*t - 3402*t, 2*u + 10*t + 302 = -142 for u.
3
Solve -2*j = -9*j + 2*i + 302 - 19, 2*j + 4*i - 22*i = 168 for j.
39
Solve -5*v + 0*v - 230 = 0, -17*m = v - 3 - 2 for m.
3
Solve 35 = 3*g - k, 20 = 4*g + 133*k - 129*k for g.
10
Solve -3*r - 5*z - 4 = 0, 50891*z - 50894*z + 8 = -r + 2 for r.
-3
Solve -124*l + u = -127*l + 15, -5 = 2*l - u for l.
2
Solve -3*q - 419 = -5*x - 0*q - 352, 5*q = -126*x - 246 for x.
-1
Solve -5089032*l = 5*t - 5089027*l + 60, 2*t - 5*l + 6 - 40 - 19 = 0 for t.
-1
Solve -40*q - 42 = 3*y - 46*q, 6*y - 8*y - 4*q = -12 for y.
-4
Solve y + 3 = 8196*b - 8194*b, b - 2*y = 6 for b.
0
Solve 5*t = 154*z - 123*z - 377, -4*z - 4*t = -44 for z.
12
Solve s = -2*t - 3*t - 56, 5*t + 456*s + 274*s = -785 for t.
-11
Solve f - 2 = -2*u, -5*u = -5*f - 37080 + 37150 for f.
10
Solve 0*z + 5*t = -3*z + 50, -4*z - 3*t + 16 = -20 - 5 for z.
5
Solve -375 + 527 = 4*l - 4*y, -5*y + 190 = 5*l for l.
38
Solve 2*d - 802 = -6*t - 1180, -t - 63 = -5*d for t.
-63
Solve -2*c + 7 = i, 4*i = 4*c - 2715 + 2707 for i.
1
Solve q - 2*q + 57*o - 60*o + 91 = 70, -5*q + 40 = 2*o for q.
6
Solve 40 = -12*u - 8, 0 = 183*r - 186*r + 2*u + 2 for r.
-2
Solve -1828*z + 1812*z - 298 = -5*f, 0 = 3*f - 6 - 0 for z.
-18
Solve -2*p + 16*s - 3 + 9 = 0, 2*s = 3*p - 7*p - 2*s - 60 for p.
-13
Solve 0 = -3*f - 3*p + 9, -1916*p = 4*f - 1920*p - 20 for f.
4
Solve 4*f + 2*s + 22479 = 22575, 3*f - 21*s - 27 = 0 for f.
23
Solve n - 4 = -8*x + 5*x, 5*x - 103*n + 101*n + 5 = -3 for x.
0
Solve -13*r + 28616*n - 28612*n - 176 = 0, -43 = -298*r + 301*r - n for r.
-4
Solve 4*b + 35*m - 1229 = 0, 104*b - 212*b = -105*b + 4*m - 143 for b.
1
Solve j - 12 - 363 = 5*i - 37, j = 3*i + 202 for i.
-68
Solve -3*d + 32 = -4*r, 55*r - 26*r + 3*d = 28*r - 1 + 8 for r.
-5
Solve 12*q - 214 = 5*u, -39*u - 17 - 647 = 2*q + 814 for q.
2
Solve -6 = -h + i, -4*i = -13*h - 1982125 + 1982131 for h.
-2
Solve 0 = 123291*b - 123286*b + 7*o - 5, -4*o = 0 for b.
1
Solve -13*c = 3*d - 9*c - 9*c + 137, -d + 5*d + 186 = 5*c for d.
-49
Solve -2*i + 226 - 83 = -l, -3*i - 2*l = -163 - 62 for i.
73
Solve 0 = -3*a + d - 22, -10*a + 38 = 2*a - 102*d + 35*d for a.
-8
Solve d + 4*d = l + 32, 0 = 4*l - 26*d - 1095 + 1229 for l.
-27
Solve 64*s + 286 = -2*w - 141 + 37, 3*w = -5*s - 39 for w.
-3
Solve -86*v + 82*v + j - 6 = -0*j, -2*v = -5*j + 102 for v.
4
Solve -w + 7*i + i - 7*i = -5*w, 27*w + 2*i - 76 = 0 for w.
4
Solve 19 + 68 = 25*a - 2*p, 391*a - 387*a + 4*p - 18 = 5*p for a.
3
Solve 0 = -11*i - 44, -3*i + 0*i + 22 = 516*o - 514*o - 0*i for o.
17
Solve 0 = 3*r + 34*x - 31*x - 18 - 12, 95*r - 562 = 2*x for r.
6
Solve -5*z = v + 7, -3*z + 5*v + 110 = 2*z + 19*v + 26*v for z.
-2
Solve 3*q - 3*a = 48, -29854*q - 8*a - 29 = -29853*q for q.
11
Solve -54737*u + 54733*u = 45*s - 37, -s - 3 = 2*u for s.
1
Solve -5*u + 15*r = 19*r - 22, -7*u + 6*r = -4*u + 3*r + 3 for u.
2
Solve 2*w + 4*m = -22, 0 = -520*w + 530*w - 2*m + 88 for w.
-9
Solve 111*q + 5 = 104*q + 2*f, -3*f + 45 = 2*q for q.
3
Solve -12889 + 12916 = 12*j - 15*j, -5*j - 25 = 5*b for b.
4
Solve 0 = -5*s - 100*c + 745, -12*s - 2*c + c = -16*s + 191 for s.
49
Solve -l - 4*b = 3*l - 2128 + 2060, -10*l + 275 = 5*l + 5*b for l.
19
Solve 161 + 185 = 46*d - 7*d - 15*s + 16*s, s = 5*s + 20 for d.
9
Solve t + z = -8, 26*t + 43*z = -14*t + 39*z - 248 for t.
-6
Solve 3*p - 6*t = 66, 0 = -2*p + 2654*t - 2652*t + 34 for p.
12
Solve 67*y - 44*y - 19*y + 4*f - 76 = 0, 3*y + 4*f - 76 = 0 for y.
0
Solve -745454*h + 20 = -745459*h, -4*t = -3*h - 4 for t.
-2
Solve 5*y - 9*j = -12*j + 32, 16*j - 20*j - 4 = -5*y for y.
4
Solve 403*r - 400*r + 46 = a, -5*a = -2*r - 355 + 99 for r.
2
Solve 43*f = 45*f - 2*k - 24, f + 53 = -4*k for f.
-1
Solve 2*h - 79 = 14*z - 13 + 88, -4*h - 33 = 3*z for z.
-11
Solve -14*c + 2032*n = 2034*n + 50, 0 = -5*c - 2*n - 14 for c.
-4
Solve -x = 6*t + 13, -3*t = 5*x + 2042747 - 2042682 for t.
0
Solve -11*h = 4*i - 14*h + 70, h - 25 = 3*i for i.
-1
Solve 41*g = 53*g + 48, -3*v = -v - 5*g + 6 - 32 for v.
3
Solve -2886 = 207*w - 4*k, 28553*k - 28550*k = 4*w + 47 for w.
-14
Solve 0 = -2*n - o - 21, 960*o + 18 = -3*n + 963*o for n.
-9
Solve -3*z = 0, 1 - 37 = -5*c - 8*z - 10*z + 19 for c.
11
Solve 2*v - a + 9 + 34 = -0, 163 = 3*v + 5*a for v.
-4
Solve a + 6*y = 11*y - 45659 + 45672, 3*a - 4*y + 5 = 0 for a.
-7
Solve 245*l - 496*l = -246*l - 3*d - 18, 17 = 4*l - 5*d for l.
3
Solve 2*z - 13*v = -159, 254*z - 250*z = 4*v - 208 for z.
-47
Solve -25 = -3*b + 9737*z - 9733*z, -5*b + 3*z + 60 = 0 for b.
15
Solve 3*j + 3*d = 4 - 19, 0 = 9*j - 3*d + 93 for j.
-9
Solve -4 = 6*d - 2*x, 3*d - 1595*x - 8 = -1596*x for d.
1
Solve 2*z - 4*d - 50 = 5*z, -38*z - 355 = d - 6*d for z.
-10
Solve -35*b + 78*b = 8*j + 42*b + 7, b = 3*j + 2 for j.
-1
Solve -17*q = i - 55, 4*i + 2489 = 5*q + 2490 for i.
4
Solve -5*y - 7*p - 110 + 12 = 0, -8*y - 51 = -6*y - 9*p for y.
-21
Solve 9081 = -p - 5*h + 9066, 2*p + 4*h = -18 for p.
-5
Solve 3*s + 4*v + 19 = 3*v, -56*v - 1464 = 37*v + 458 - 155 for s.
0
Solve 14*h + 90 = 4*f + 24, 0 = 6*h - 3*h + f + 16 for h.
-5
Solve 4*b = -8, -3*p - 121835 = 8*b - 121873 for p.
18
Solve 0*h - 3*h - 4*m + 10 = -10*m - 17, -5*m = -9*h + 68 for h.
7
Solve 4*t - 1 = -3*s - 2, 6*s - 11 = 5*s + 10*t for s.
1
Solve -5*k = 28*w + 158, -57*w + 52*w + k = 2*k + 28 for w.
-6
Solve -2*u - 10 = -2*o, -3*o - 22*o = -u + 2*u - 27*o + 9 for u.
-1
Solve -f - 4*f + 27*r = 25*r + 13 + 24, 0 = 3*f - 5*f + 13*r - 27 for f.
-7
Solve 4*m + 55 = -67*a - 134, 3*m = -36*a + 31*a - 6 for a.
-3
Solve -4*f + 5581*w - 40 = 5577*w, -47*w + 60 = -14*f - 49*w for f.
-5
Solve 2*z + 1045 = -61*a, 3*z - 40*a = -41*a - 29 for z.
-4
Solve 0 = -5*h + 5, 10677*h - 13 = -2*i + 10676*h for i.
6
Solve -k + 19*a - 8 - 28 = 0, -4 - 4 = -5*k + a for k.
2
Solve 11 = -5*j - 80*l + 118*l, 3*j - 2*l = -69 for j.
-25
Solve -4*k = 3*f + 11, 88404*f - 88402*f - 5*k - 8 = 0 for f.
-1
Solve -l + 3 = -3*h, -10*l - 71184*h = -71186*h - 2 for l.
0
Solve -5*r + 5*f - 137 = -f, 5452*f = 4*r + 5455*f + 133 for r.
-31
Solve 3 = -4*l - 19*c + 20*c - 5, 0 = 2*l - 15*c + 62 for l.
-1
Solve -15208*l = -15211*l - 4*f - 20, 5*f + 30 = -5*l for l.
-4
Solve -5*t - 23*x = -54*x + 27*x - 188, 4*x = -2*t - 188 for t.
0
Solve 20 = -8*u - 4*s, 94*u = 89*u - 4*s - 5 for u.
-5
Solve 8*y - 84 = 13*y + 9*b - 3 + 48 - 28, 2*y + 35 = -9*b for y.
-22
Solve 3*p + 0*p - 192*q + 386*q - 50 = 195*q, 0 = 3*p - 5*q - 46 for p.
17
Solve -15*r - g = -5*g - 73, 0*r - 3*r + 12*g + 2 = 13*g for r.
3
Solve 336*w = 337*w - j - 68, -5*w - j = 38 for w.
5
Solve 5*k - 12 = 3, 30*s - 1977 = -0*s + 2*s + k - 720 for s.
45
Solve 17*g - 13*g + 447 = 83*f, -3*f + 15 = 0 for g.
-8
Solve n - h - 211 + 243 = 0, -48*n - 1306 = -2*h for n.
-27
Solve -3*z = -4*z - 3*u + 33, -z - 44 = -211729*u + 211725*u for z.
0
Solve 4120 = 60*s + 5*t, 72*t - 31*t + 12 = 38*t for s.
69
Solve -6962 - 1622 = -65*c + 2*t, 3*c + 2*t = 4*t + 411 - 11 for c.
132
Solve -5*w = 5*j - 50, 52376*j + 2210 = 52597*j + w for j.
10
Solve i + 54 = 13*n + 267, -2*i - 14*n + 74 = -18*n for i.
5
Solve 2*w - 9 = 5*q, -w - 17*q - 45 = 4*q - 7*q for w.
-3
Solve 3*y - 6*l - 243 = 0, 6142*y - 6147*y - 197 = 4*l for y.
-5
Solve -68 = -2*m + y - 26 - 36, -4*m + 5*y = -
|
|
Expression of four growth factors in recessed extraocular muscles of rabbits.
The study was designed to determine the temporal expression of insulin-like growth factor (IGF)-I, IGF-II, basic fibroblast factor 2 (bFGF-2), and transforming growth factor beta 1 (TGF-beta1) in recessed extraocular muscles. Sixteen eyes of eight rabbits were subjected to conventional 4-mm recession of superior rectus muscles. Two rabbits were untreated as control. The rabbits were killed and their eyes were enucleated at 3 (group 3), 6 (group 6), 24 (group 24), and 72 (group 72) hours after the operation (two rabbits per group), and the expression of IGF-I, IGF-II, bFGF-2, and TGF-beta1 was immunohistochemically examined. The peak levels of IGF-I, IGF-II, and TGF-beta1 expression were observed in groups 24, 6, and 3, respectively. However, bFGF-2 was less expressed than the other growth factors in all groups. IGF-I, IGF-II, bFGF-2, and TGF-beta1 in regenerating muscle cells were expressed by different kinetics, suggesting a distinct role of each growth factor during wound healing after recession of extraocular muscles.
|
|
<html>
<body>
<h1>Directory listing</h1>
<hr/>
<pre>
<a href="management-core-3.0.4-javadoc.jar">management-core-3.0.4-javadoc.jar</a>
<a href="management-core-3.0.4-javadoc.jar.md5">management-core-3.0.4-javadoc.jar.md5</a>
<a href="management-core-3.0.4-javadoc.jar.sha1">management-core-3.0.4-javadoc.jar.sha1</a>
<a href="management-core-3.0.4-sources.jar">management-core-3.0.4-sources.jar</a>
<a href="management-core-3.0.4-sources.jar.md5">management-core-3.0.4-sources.jar.md5</a>
<a href="management-core-3.0.4-sources.jar.sha1">management-core-3.0.4-sources.jar.sha1</a>
<a href="management-core-3.0.4.jar">management-core-3.0.4.jar</a>
<a href="management-core-3.0.4.jar.md5">management-core-3.0.4.jar.md5</a>
<a href="management-core-3.0.4.jar.sha1">management-core-3.0.4.jar.sha1</a>
<a href="management-core-3.0.4.pom">management-core-3.0.4.pom</a>
<a href="management-core-3.0.4.pom.md5">management-core-3.0.4.pom.md5</a>
<a href="management-core-3.0.4.pom.sha1">management-core-3.0.4.pom.sha1</a>
</pre>
</body>
</html>
|
|
Susy and Geno, Inseparable!
Susy and Geno’s long-awaited reunion finally took place on March 11 at Market-Market Mall in Taguig!
A few weeks ago, Susy started a massive search for her missing friend Geno . Susy even put up a Facebook page where all info, photos and videos in relation to the search was posted.
Finally after weeks of anticipation, Susy and Geno reunited again where the two met up not only with each other but with their loyal and very enthusiastic supporters, waving banners and placards expressing their unwavering support.
Geno arrived at the activity center holding a fresh bouquet for Susy. It was a wonderful day for Susy and Geno and for their solid fans club. After long years of waiting, the two best friends shared a long and warm embrace.
Check out this YouTube video dance performance from Susy and Geno!
The two gladly gave a dance number people requested for. Afterwards, the pair mingled with the crowd where the latter grab the chance to take photos with them.
The reunion was also the first public appearance in many years for the faces of Sustagen Milk in the 80’s and 90’s, who disappeared from the public eye, only to re-emerge two decades later, starting with Susy’s return last February. Only then would we find out that she and Geno had actually lost touch through the years.
Meanwhile, Susy and Geno’s friends from Sustagen also did their part, providing free milk for all guests and fans.
It was a lovely day for Susy and Geno and for their loyal supporters. I’m sure happy memories came to you as you watched them reunited.
|
|
Mark,
I discussed this issue Friday with Paul, however, since it is an issue I am
not entirely knowledgeable about, I think Paul should run this by you...
---------------------- Forwarded by Tana Jones/HOU/ECT on 03/20/2000 04:44 PM
---------------------------
Paul Radous@ENRON
03/20/2000 03:12 PM
To: Tana Jones/HOU/ECT@ECT
cc:
Subject: Commodities Exchanges
Tana,
As a follow up to Friday's discoveries, on the commodities side, it appears
as though the agencies which regulate the commodity exchanges drive the rules
regarding safeguarding of client accounts. Other than 1) the US exchanges
(which are all governed by the CFTC), and 2) the OMLX, and the IPE (which we
have already addressed), what are the other exchanges whose contracts we
trade?
Thanks
Paul
|
|
Steroid hormone modulation of olfactory processing in the context of socio-sexual behaviors in rodents and humans.
Primer pheromones and other chemosensory cues are important factors governing social interactions and reproductive physiology in many species of mammals. Responses to these chemosignals can vary substantially within and between individuals. This variability can stem, at least in part, from the modulating effects steroid and non-steroid hormones exert on olfactory processing. Such modulation frequently augments or facilitates the effects that prevailing social and environmental conditions have on the reproductive axis. The mechanisms underlying the hormonal regulation of responses to chemosensory cues are diverse. They are in part behavioral, achieved through the modulation of chemoinvestigative behaviors, and in part a product of the modulation of the intrinsic responsiveness of the main and accessory olfactory systems to conspecific, as well as other classes, of chemosignals. The behavioral and non-behavioral effects complement one another to ensure that mating and other reproductive processes are confined to reproductively favorable conditions.
|
|
short party dresses 2017
(119)
it's luxurious style as well as high quality will definitely meet your needs are.by a massive lower price, you may be the particular fortunate someone to receive top selling short party dresses 2017along cheap. therefore, that inexpensive and awe-inspiring ware has to be an ideal giving to your pal.will to acquire the modern short party dresses 2017now? in addition, it is possible to browsing our site and buying various other great points on your own.your sophisticated short party dresses 2017with some other color along with dimensions will certainly suit most of the people’ohydrates flavor. most of us list all of those goods available on the online store.you could pick any one you prefer and buy this today.
|
|
VIOLENT/NON-CONSENSUAL SEX WARNING/DISCLAIMER: It is a story portraying a Conqueror/slave relationship, so it would appear non-consensual at first. As for sexual violence, there are scenes (In parts 3 and 4) which are detailed and graphic, and may not suite some readers.
Lord Conqueror of the Realm
Written by WarriorJudge
Part 19
In northern Greece , in the tavern on the border between Philippi and Macedonia , Nobleman Verosus and Nobleman Marton met with Domitia, in a room they had rented. The two Noblemen could not afford being overheard or even being seen in public with the lass.
"I don't understand. What did you do wrong?" the frustrated Nobleman Marton shouted at poor Domitia, who of no fault of her own found herself in this impossible and dangerous position. It was all Nobleman Marton could do not to resort to physical violence.
"I did exactly as I'd been told…" the young woman tried to defend herself.
Nobleman Verosus sent his fist through the wall. "Then the Conqueror should have been all over you… in and out of you!" he yelled and his eyebrows nearly touched together.
"The Conqueror wouldn't touch me," said Domitia.
Both Noblemen were still waiting for a reasonable explanation for this brilliant failure.
"Perhaps the Conqueror loves the Queen," she suggested quietly and shrugged.
Both men burst into laughter.
"Young women… All soft in the head… some of them never learn…" said Nobleman Verosus .
"Silly child," said Nobleman Marton, "the Conqueror doesn't love. The Conqueror lusts, lusts after power, lusts after blood and lusts after women, that is all. That is the source of her power. That's what sets her ever so highly above the rest of her sex. She feels no emotions and so she isn't governed by them."
"Well, the Lord Conqueror did marry the Queen," argued Domitia.
"She only married her concubine to spite us, to show us who truly rules the Empire. It is common knowledge even amongst complete idiots!"
Nobleman Marton turned to Nobleman Verosus and said, "We must consider the possibility that the Conqueror didn't take this silly girl over here because she realized it was all a ploy."
"By the Gods… what shall we do? Should we run?" Terror began to tighten its grip over Nobleman Verosus and he began fidgeting like a skittish horse.
"We are governors, we can't just disappear. Besides, there is no escaping the Conqueror. There is no place to hide, no place out of the Conqueror's reach. If we run now, the Conqueror will know we're guilty. Let me think…" Nobleman Marton said.
After some time had elapsed in silence with both men pacing restlessly from wall to wall, Nobleman Marton continued: "Lady Messalina won't say anything. She's neck deep in this and she has too much to lose."
"The Lord Conqueror knows nothing more than my name, and I am hardly the only Domitia in the Realm," she said. "And I wore nothing that would imply my station."
"That's very good. We might just come out of it alive," he said.
***
Two days had gone by. The Conqueror and the Queen were taking a stroll in the Imperial gardens, near the lily pond that the Queen adored so much. As they walked together side by side, enjoying the morning sun, the odor of blossoms and the light exercise, Gabrielle recalled the days when she had been a slave. How she used to walk in these magnificent gardens, trying to understand her Lord's moods and actions. It felt like a lifetime ago. As if to remind herself that she was in a different place now, that those days were over, Gabrielle reached for her Lord and interlaced her arm with the Conqueror's.
"They are all waiting for us in the Great Hall," Gabrielle said.
"Let them wait," the Conqueror smiled and looked at her Queen, while pressing a gentle hand over the pregnant Queen's back for support.
"There is one thing that isn't clear to me, why didn't Lady Messalina wait until after nightfall to tell me about the girl?"
"Whoever set this entire subterfuge didn't take two things into account. I wasn't familiar with the informant that disclosed Perous' whereabouts. I wasn't sure whether I could trust him or not, and I wasn't about to march blindly into a trap on the 'say so' of an informant I knew nothing about. First, I sent a scout to check the area and to confirm that Perous was indeed there and that he was alone. That took time," explained the Conqueror.
"And the second thing?"
"That I would return from Cyra alone and leave my forces behind… My desire to see you was too great. I couldn't wait."
The Queen rose to stand on her toes and placed a warm heartfelt kiss on the Conqueror's jaw, the highest place she could reach.
"You know, my Lady, you are the Realm's Sovereign."
"I know, my Lord," the Queen said and wondered why her Lord chose this time to remind her of that fact.
"And Lady Messalina is one of your ladies in waiting. She is your responsibility," the Conqueror said.
The reason for the Conqueror's words began to become apparent and clear to her. "I assume treason is punishable by death, my Lord?"
"It is, my Lady."
As they were nearing the gates of the palace, the Queen turned to the Conqueror, "My Lord?"
"Hmmm…?"
"Death is the most sever penalty for treason, is it not?" the Queen asked.
The Conqueror smiled for she understood the meaning and the reason for the Queen's question.
"It is, my Lady."
***
"The Lord Conqueror and her Majesty the Queen," the ceremony master announced as the Conqueror and the Queen entered the Great Hall.
As the Conqueror and the Queen made their way to their thrones, all present in the Great Hall bowed before them until they reached their destination and seated themselves.
"Noblemen and Ladies of the Realm," the Conqueror exclaimed, "We have summoned you all here due to a grave matter which has come to our attention and requires further investigation."
The noblemen and the ladies of the Realm began to look at one another agitatedly to see if anyone had any idea as to what the Conqueror was referring to.
"Lady Messalina," the Queen called.
Lady Messalina approached the thrones. "Your Majesties," she said and bowed before them.
As she stood before them, the Conqueror leaned over and whispered something in the Queen's ear.
"Lady Messalina, is it not true that just before noon on the day of my Lord's return from Cyra, you informed me that a young lass had been seen entering the Imperial tent?"
Lady Messalina's blood drained from her face and she grew as pale as a sheet. "It is true, your Majesty," she admitted.
"And how did you come by this bit of information?" the Queen inquired further.
"I… I can't remember, your Majesty," replied the nervous lady.
"Is it not true, that the lass in question is your very own daughter?"
Lady Messalina nearly fainted. The crowd around her gasped in surprise and walked backwards away from her, as if trying to disassociate themselves from her.
"It is, your Majesty." At this stage, lady Messalina had already realized there was no point in lying.
"Was it not your intention to cause dispute between my Lord and myself?"
Lady Messalina threw herself at the Queen's feet and began kissing them.
"You will stand up," the Queen ordered and her assertiveness gave pause to her subjects.
Lady Messalina rose back to her feet.
"You will answer the question."
"I will your Majesty," Lady Messalina replied.
"Did you act on your own volition?"
"No, your Majesty."
"Who put you up to this?" asked the Queen.
"Please, your gracious Majesty, I beg you please don't make me…"
"Nobleman Verosus and Nobleman Marton!" the Conqueror exclaimed.
Both men made their way through the crowd, mortified, joined their accomplice and bowed before the thrones.
"What have you got to say for yourselves?" the Conqueror's voice was ominous.
"Indeed not, but when her Majesty the Queen asked the question, Lady Messalina threw a glance at the two of you," said the Conqueror. "That confirmed my suspicions."
Noblemen Marton and Verosus confessed to the specifics of their scheme for all to hear by orders of the Conqueror, without trying to cast responsibility at one another and minimizing their own involvement in the traitorous conspiracy.
"Is my Lady prepared to render her verdict in the matter of Lady Messalina?" the Conqueror asked.
"I am, my Lord," the Queen replied.
"Lady Messalina, you have handled yourself poorly and reprehensibly. Being a Queen's lady in waiting is a sacred duty. It has been proven to my satisfaction that you have betrayed that duty and my trust. You have been disloyal to me and disloyal to my Lord and to the Realm. You've tried by despicable means to come between my Lord and myself. This offense I cannot and will not pardon. However, I am satisfied that there are mitigating circumstances since you were extorted. Desperation deprives some of rational thought and behooves them to take desperate measures. Therefore, it is my verdict that you should be stripped of your station and be banished from the Realm forthwith for my Lord's pleasure." The Queen voice was steady, firm and confident.
"Noblemen Marton and Verosus, greed and malice are no defense against treason. Your actions solicited, financed and facilitated an act of rebellion against us and against this Realm, which resulted in the death of several subjects and warriors of the Realm. Moreover, you have extorted her Majesty the Queen's lady in waiting and exploited her innocent daughter. You and your families will be stripped of your station and possessions. Marton and Verosus, you shall suffer a quick death in three days time. As for Macedonia , I hereby appoint Lila of Potidaea as the new governor to Macedonia and a Lady of this Realm. As for Philippi, I hereby appoint her Majesty the Queen's lady in waiting, Satrina, as the new governor to Philippi, if it pleases you, your Majesty," the Conqueror asked the Queen.
"It does, my gracious Lord," smiled the Queen.
As the guards came to remove the condemned men from the Great Hall, Lady Satrina scurried to bow before the Conqueror and the Queen.
"Your Majesties, I cannot thank you enough for your infinite kindness, honor and generosity your Majesties have shown me, and I am grateful with all my heart and soul for the great trust you place in me, but I pray you, if I may," she said and her excitement was evident in her voice.
"You may," granted the Queen.
"With your Majesties' permission, and if it pleases you, I wish to remain in her Majesty the Queen's presence and service for I am so very contented and happy with my life here in the palace," she said. "I could not have hoped to serve a kinder, nobler Sovereign than our benevolent Queen."
The Queen glanced over at the Conqueror with questioning eyes and the Conqueror, who was the one who first granted the honor, nodded her consent. Their subjects could not help but notice the silent exchange between them.
"As you wish, Lady Satrina and thank you," the Queen said and did her best to remain formal and regal and not let her own excitement be known in the forum.
"Captain Cornelius of the Imperial Guard," announced the ceremony master.
The Queen wasn't familiar with the name.
With wide determined strides, fitting a military man, Captain Cornelius approached the thrones and bowed before his Rulers.
"Your Majesties," he greeted.
It was then that the Queen recognized whom he was and fought an urge to move uncomfortably on her throne.
"With your permission, your Majesty," he humbly said and turned his attention to the Queen.
"Granted," said the Queen.
"I come before your gracious Majesty, a humble servant, to beg for forgiveness. In the past your Majesty showed me great kindness and granted excellent remedy, which I, I am ashamed to say, repaid with gross disrespect."
He chose this grand forum to offer his genuine remorse, rather than offer his apologies in private. In his mind, since he disrespected the Queen in the presence of the healer and others in the infirmary, it was only just that he should surrender his pride to the Queen in public.
He was also careful not to divulge any specifics of his transgression, including the fact that he was referring to the times back when the Queen had been a slave, so as not to cause the Queen either discomfort or embarrassment.
"I am sorry to say, I was foolish and a proud brute and I know in my heart I am not worthy of your Majesty's pardon. I assure your Majesty that as a result of your Majesty's dignity, generosity and supreme conduct towards me, which I didn't deserve I have mended my ways. I submit myself before you, your Majesty to punish as your Majesty deems fit," he said and knelt before the Queen.
"Stand up, Captain," she ordered and he obeyed.
"Your past misdeeds towards me are pardoned," the Queen said, then covered her mouth and whispered a private question in her Lord's ear, to which the latter nodded her agreement.
"You have exhibited candor and great honor, which leads me to believe your repentance is true and sincere. I hereby appoint you a nobleman to the Realm and a governor to Philippi ," the Queen said.
He lowered his head in humility and thanked his Queen for the bounty she had bestowed upon him.
"That concludes our business here today, Nobleman and Ladies of the Realm," the Conqueror stated, stood up and offered her arm to assist her pregnant Queen to her feet.
Standing in front of their subjects, the Conqueror went on to say, "As I trust you all know, today I have shown great leniency towards Marton and Verosus for their appalling treachery. By no means must you perceive it as any form of a precedent. I shall see no further division in this Realm."
As the Queen and her Lord made their way out of the Great Hall, their subjects bowed before them then began clapping their hands and chanting, " Hail to the Queen. "
Whilst strolling along the corridor that led to the Imperial chambers, the curious Queen asked, "How did my Lord know that the lass in Cyra was Messalina's daughter?"
"They have the same shade of hair color and the shape of their eyes and chins are exactly alike," the Conqueror explicated.
Alone in the privacy of their chambers, the Conqueror turned to her Queen took her small hands in hers and said with bright eyes, "I am so very proud of you, my Lady," and adorned the thin fingers with tender kisses.
***
After three days had passed, Marton and Verosus were brought to the gallows upon a wagon, which resembled one that was fit to carry small livestock. In the square stood a large crowd, as with any execution. The Conqueror always believed that even regular people, non-warriors were fascinated by death and were curious to see life as it was leaving the body. If someone else did the actual killing, then all the better.
Heavily guarded, the two men were escorted up the podium to face their Ruler and executioner. Verosus's neck was first to be stretched out and presented before the Conqueror.
As he was waiting, trembling on his knees and mumbling unintelligible words, the Conqueror unsheathed her sword, which was resting over her chiseled thigh in a leather scabbard. The polished, long and well-whetted blade caught the sun's rays.
The crowed cursed at the condemned men and cheered for their Sovereign, goading her on. It wasn't a novelty. The Conqueror knew that once she would lay the deadly strike, the cheers and the cursing would halt.
With one strike, the Conqueror put an end to his mumbling, and his severed head rolled over the floor of the podium, which was covered with sawdust to absorb the spilt blood, and his headless corpse slumped to the ground next to it.
Then came Marton's turn. Before he was shoved down to his knees by the guards and into his accomplice's pool of freshly spilt blood, the Conqueror leaned slightly towards him and whispered into his ear: “You do realize this is not retribution for some silly, inconsequential rebellion, which could have been handled quickly by a single battalion of my forces. This is mainly for trying to come between me and my Queen.”
His shocked expression was still frozen on his face when the Conqueror removed his head from his shoulders.
As the Conqueror wiped the blood off her sword and looked at Marton's head next to her boots, her mind strayed back to another execution which she had performed, the one of the British Captain, who had raped and killed some body slave, whose name the Conqueror couldn't even remember now.
Before she had sent him to his death, the Conqueror had desired to make it perfectly clear to the Captain the true and exact reason for his chastisement. When he had extended his head forward before her, whilst on his knees, she'd hissed at him, “This is for putting your filthy hands on what's mine. The slave you've raped and killed was just an excuse.”
|
|
I welcome comments and constructive criticism of my images so that I may improve my photography
Please click on image to enlarge.
Friday, 7 October 2011
Caterpillar and Fungi.
I D's required for this caterpillar and fungi please.The caterpillar was found on the backgarden path,so no idea what plant it came from.The fungi was found under a tall bank next to a stream in the Trough of Bowland.
Christian.Thanks for your comments,I put the caterpillar on the stick and held it up with one hand and took the photo with the other. Cliff thanks for your comments. The I D is spot on thank you very much.
|
|
Poly(ADP-ribose) polymerase (PARP) 1, whose primary role is initiation of DNA repair, is activated by damaged DNA and uses NAD+ to automodify itself and recruit other proteins involved in DNA repair. Due to its role in DNA repair, PARP-1 inhibition has been long targeted for treatment of different cancer types. By now there are already several different clinical APRP-1 inhibitors used in treatment of ovarian and breast cancers, and many others are under clinical trials for other types of cancer, such as prostate cancer, pancreatic cancer, blood cancer and others. PARP-1 inhibition has also been demonstrated to have promising effect for treatment of some cardiovascular conditions. Extensive DNA damage caused by number of cardiovascular conditions, such as a stroke or heart attack, can result in PARP-1's hyper-activation, leading to depletion of cellular NAD+ and subsequent cell death. It has been demonstrated that inhibition of PARP-1's activity using small molecules can prevent apoptosis and necrosis in such cells. Studies in animal models have indeed shown that inhibition of PARP-1 can have beneficiary effects for treatment of various cardiovascular conditions, such as ischemic stroke, cerebral ischemia, diabetic cardiomyopathy and others. Despite growing number of PARP-1 inhibitors, their molecular mechanism of action is not well understood. The overall objective of my project is to define the molecular mechanisms of activation and silencing of PARP-1. My central hypothesis is that the structural and dynamic changes occurring in PARP-1 upon DNA binding play key roles in the regulation of protein activation and dictate relative efficiency of PARP-1 inhibitors. Three specific aims are pursued in this project: 1. To define how PARP-1 is silenced through auto-modification and released from single-strand break (SSB) DNA, 2. To measure the effect of inhibitors on PARP1 structural dynamics for those that trap it at a SSB versus those that don't, 3. To define the organization and dynamics of the PARP- 1/nucleosome complex in conjunction with the housekeeping role of PARP-1 in transcriptional regulation. My proposed experiments will reveal key insights on the precise molecular mechanisms of PARP-1 activation and inhibition, aiding in the design of new PARP-1 inhibitors to improve outcomes in patients with various diseases.
|
|
Brown Man of the Muirs
In the folklore of the Anglo-Scottish border the Brown Man of the Muirs is a dwarf who serves as a guardian spirit of wild animals.
Folklore
William Henderson provides an account of the Brown Man and a pair of hunters in Folklore of the Northern Counties (1879), taken from a letter sent by the historian Robert Surtees to Sir Walter Scott:
In the year before the Great Rebellion two young men from Newcastle were sporting on the high moors above Elsdon, and at last sat down to refresh themselves in a green glen near a mountain stream. The younger lad went to drink at the brook, and raising his head again saw the "Brown man of the Muirs", a dwarf very strong and stoutly built, his dress brown like withered bracken, his head covered with
frizzled red hair, his countenance ferocious, and his eyes glowing like those of a bull. After some parley, in which the stranger reproved the hunter for trespassing on his demesnes and slaying the creatures who were his subjects, and informed him how he himself lived only on whortleberries, nuts, and apples, he invited him home. The youth was on the point of accepting the invitation and springing
across the brook, when he was arrested by the voice of his companion, who thought he had tarried long, and looking round again "the wee brown man was fled." It was thought that had the young man crossed the water the dwarf would have torn him to pieces. As it was he died within the year, in consequence, it was supposed, of his slighting the dwarf's admonition, and continuing his sport on the way home.Taylor, George and Raine, James (1852). A Memoir of Robert Surtees. Durham: George Andrews. pp. 81–2.
Walter Scott in a return letter to Surtees suggested that the Brown Man may be related to the duergar (dwarfs) of Northumberland.
Fairy tales
In folklore the Brown Man appears as a solitary fairy, but in fairy tale literature he is a member of a tribe of similar beings. They once lived all over England and Scotland, but in the wake of human progress they dwindled in number and now live in a cave in Cumberland. Known as the Brown Men of the Moors and Mountains, they have great strength that allows them to hurl small boulders. By day they mine the mountains for gold and diamonds, and by night they feast in their underground hall or dance on the moors. They kidnap human children and kill any man they catch alone in the wilderness. However, they can be made subservient by repeating the incantation, "Munko tiggle snobart tolwol dixy crambo".
See also
Brownie (folklore)
Redcap
References
Category:Dwarves (mythology)
Category:English folklore
Category:Scottish folklore
|
|
{#sp1 .466}
{#sp2 .467}
{#sp3 .468}
|
|
package tk.woppo.sunday.model;
import android.database.Cursor;
import com.google.gson.Gson;
import com.google.gson.annotations.SerializedName;
import java.util.HashMap;
import tk.woppo.sunday.dao.WeatherDataHelper;
import tk.woppo.sunday.dao.WeatherTodayDataHelper;
/**
* Created by Ho on 2014/7/4.
*/
public class WeatherTodayModel extends BaseModel {
private static final HashMap<String, WeatherTodayModel> CACHE = new HashMap<String, WeatherTodayModel>();
/** 城市ID */
@SerializedName("cityid")
public String id;
/** 城市名称 */
@SerializedName("city")
public String cityName;
/** 温度 */
public String temp;
/** 天气 */
public String weather;
/** 风向 */
@SerializedName("WD")
public String wind;
/** 风力 */
@SerializedName("WS")
public String ws;
/** 湿度 */
@SerializedName("SD")
public String sd;
/** 发布时间 */
public String time;
private static void addToCache(WeatherTodayModel model) {
CACHE.put(model.id, model);
}
private static WeatherTodayModel getFromCache(String id) {
return CACHE.get(id);
}
public static WeatherTodayModel fromJson(String json) {
return new Gson().fromJson(json, WeatherTodayModel.class);
}
public static WeatherTodayModel fromCursor(Cursor cursor) {
String id = cursor.getString(cursor.getColumnIndex(WeatherDataHelper.WeatherDBInfo.ID));
WeatherTodayModel model = getFromCache(id);
if (model != null) {
return model;
}
model = new Gson().fromJson(cursor.getString(cursor.getColumnIndex(WeatherTodayDataHelper.WeatherTodayDBInfo.JSON)), WeatherTodayModel.class);
addToCache(model);
return model;
}
public static class WeatherTodayRequestData {
public WeatherTodayModel weatherinfo;
}
}
|
|
The news of the 2015 remastering of Air Jordan retros has resulted in a load of early photos featuring next year’s Jordans. Normally at this time we’d be stuck pondering what was to come based off early product sheets and such, but this time around we’ve got high res previews of everything for your viewing pleasure. This time around: the Air Jordan 7 “French Blue”. So far the group of Spring 2015 Air Jordans has been a newer leaning group, and this retro+ colorway sticks with that trend. See the 2015 Air Jordan 7 “French Blue” below and watch for extended previews right here on Sneaker News.
|
|
A Blog on India
Menu
Connect The Dots
In her firstbook Stay Hungry Stay Foolish Rashmi Bansal profiled twenty five entrepreneurs who were alumni of IIM – Ahmedabad. Many had then wondered including yours truly, how important an MBA degree is to become an entrepreneur. Rashmi claims this inspired her to write Connect The Dots, story of twenty one entrepreneurs but who dont have an MBA degree. The format of the book is same as her last book. There are twenty chapters, one on each entrepreneur (Gaurav Rathore & Saurabh Vyas who co founded PoliticalEDGE are covered in one chapter) and the entire chapter is based on one single interview.
The book is divided in three sections : Jugaad, Junoon & Zubaan. Jugaadis are those who didn’t get any formal training in business but learned by observing, experimenting and applying their mind. It includes some one like Kunwer Sachdev of Su-Kam who created a Rs 500 crore company from scratch; Ganesh Ram, who started what is today India’s largest English training academy, VETA when there were no BPOs and no one knew that English coaching would be as big a market as it is now.
Junoonis as the name suggests, are passionate about something that is ahead of its time. This was my favorite section in the book. Gaurav Rathore and Saurabh Vyas envisioned a consulting and research firm exclusively for politics and founded PoliticalEDGE; Satyajit Singh, founder of Shakti Sudha not only created a new industry but also benefited thousands of farmers in rural Bihar; Chetan Maini, founder of Reva, designed a solar car and has been producing electric cars since the time when global warming was not so well known and creating electric cars seemed to make little sense.
The third section Zubaan is about creative people like Paresh Mokashi, creator of Harishchandrachi Factory, India’s official entry to Oscar last year or Krishna Reddy, whose Prince Dance Group, consisting of daily wage laborers won India’s Got Talent last year.
I had great hopes from the book as I loved Stay Hungry Stay Foolish. The first chapter on Prem Ganpathy is literally a rags to riches story of someone who came to Mumbai with no money and now owns Dosa Plaza, a fast food chain with 26 outlets in the country.The rest of the stories too are very encouraging. The book is replete with inspiring anecdotes and quotes . When I read the synopsis on the third section i.e. Zubaan, I thought it would be probably the weak link in this book as stories on creatives who had made it big in the field of art would be a misfit in this book about entrepreneurs. However, all these artists achieved commercial success by following their passion and this justifies their inclusion in this book about Entrepreneurs. Entrepreneurship after all is about following your heart.
Generally when the first book is good and successful authors fail to recreate the magic in their subsequent books and that too in the same genre, as people have high expectations. In this case Rashmi Bansal definitely exceeded my expectations. A very good book and must read for some one aspires to be an entrepreneur.
|
|
Sprint International
Sprint International may refer to:
Sprint Corporation, telecommunications company
The International (golf), golf tournament
|
|
I soon realised that Kathy and I had settled at the periphery of the rules and the order, separated categorically from the mystics and their task; we existed like stray animals sheltered in a monastery.
|
|
Ancient toolmaking site discovered near Niagara Falls
Archaeologists have found arrowheads and drills, indicating that the camps were occupied for extended periods of time.
DIGGING FOR TOOLS: Students at work in 2006 excavating a feature at the site on Grand Island that was most likely a hearth. (Photo: L.M. Anselmi)
An ancient campsite where people were manufacturing tools has been discovered near the Niagara Falls.
This find, combined with other archaeological discoveries in the area over the past few decades, suggests that such campsites lined the Niagara River as far back as 4,000 years ago.
So far, the team has unearthed more than 20,000 artifacts, mostly bits of rock broken off when people were creating stone tools, on the southeastern tip of Grand Island New York, about 12 miles (20 km) upstream from Niagara Falls. The earliest artifacts at the site date back at least 4,000 years, opening a window on a time when people were living a nomadic lifestyle based on hunting, fishing and gathering plants. [In Photos: Digging Up Niagara's History]
"I would anticipate that there would have been, back in the day, these kinds of campsites all along the Niagara River on both sides and on both sides of the island," team leader Lisa Anselmi, of Buffalo State University of New York, told LiveScience.
The archaeologists found that people at the Grand Island site were making a wide variety of tools, including spear points, arrowheads and even a few stone drills. Anselmi said that the drills "would be sharp enough to go through a piece of leather... or go through shell or some bone to create a bead."
The team also found bits of yellow and red ochre at the site; in ancient times it was common, for religious reasons, for ochre to be applied on the skin of someone who was being buried. No evidence of burials has been found so far at the site.
Stretching across time
The south tip of Grand Island appears to have been occupied for an extended time.
Fragments of pottery dating between 2,900 and 1,500 years ago found by Anselmi and her colleagues suggest inhabitants experimented with ceramic production, using pots to collect nuts and plant remains.
The team also found spear points that date back around 500 years, to a period shortly before Europeans started arriving in the area. More recent artifacts included nails from houses built in the 19th century and bullets that appear to date to the 1930s or 40s.
Anselmi said that the site probably would have been used mainly between the spring and fall, when food would have been plentiful. "The island would have had the advantage of being close to the river (with) lots of freshwater fish and other kinds of resources from the river," she said. Also, "in all likelihood there would have been a very strong deer population on the island."
Crossing the Niagara River
To get to Grand Island people in antiquity would have had to cross the Niagara River. Today, the fast-flowing waterway moves at a rate of about 2-3 feet per second near the island.
Curiously, rather than making use of rock found on the island, the ancient people imported a type of Onondaga chert — a tough limestone that they would have had to carry across the river from the mainland.
Anselmi explained that they would have brought over small bits of this rock that could then be molded into tools. "It's not necessarily that they're filling a canoe up with boulders," she said.
By using Onondaga chert the people of Grand Island were continuing a toolmaking tradition that goes back to when people were first entering New York State.
For instance, at a site called Emanon Pond, located in western New York, people were using the material almost exclusively nearly 11,000 years ago.
"With the exception of a single projectile point made from glacially derived drusy quartz, all of the artifacts are manufactured using local Onondaga chert," write Peter Neal Peregrine and Melvin Ember in the North America edition of the "Encyclopedia of Prehistory," published in 2001.
The findings were presented in May at a meeting of the Toronto chapter of the Ontario Archaeological Society.
|
|
Commonwealth Bank and the Australian Chamber Orchestra kick off the 2009 Great Romantics national tour
Sydney, 11 June 2009: The Commonwealth Bank today congratulated the Australian Chamber Orchestra (ACO) on the commencement of its Great Romantics Tour.
Commonwealth Bank Group Executive Human Resources and Group Services, Ms Barbara Chapman, said the Group was committed to supporting the Arts in Australia and helping its customers, staff and the Australian community engage with music at the highest level.
“As a partner of the ACO since 1988, we have been privileged to watch it grow into the world class orchestra that it is today,” she said.
“We are proud of our ongoing support and commitment to the ACO and excited to be the 2009 National Tour Partner for the Great Romantics.”
Ms Chapman said the Commonwealth Bank was especially proud to loan its rare Guadagnini violin – crafted in 1759 in Parma, Italy, and purchased by the Bank in 1996 – to ACO’s Principal Second Violin and leader of the ACO’s Emerging Artists Program, Helena Rathbone.
“We are delighted that on the violin’s 250th birthday, it is played by such an exquisite violinist for the enjoyment and appreciation of thousands of Australians,” she said.
Ms Chapman said the Bank’s partnership with the ACO was one of three national Arts partnerships for the Group, which included Opera Australia and Bangarra Dance Theatre.
The Australian Chamber Orchestra’s Artistic Director, Mr Richard Tognetti, said he was proud of the Orchestra’s long association with the Bank.
“When I started at the ACO in 1989, the Orchestra only had a handful of corporate supporters and we were in desperate need of committed companies who would be prepared to inject cash and help fuel some new ideas,” he said.
“My dream was to create a first-rate Australian Orchestra that could hold its own anywhere in the world. The Commonwealth Bank took a risk on my dreams and, 21 years on, we have one of the most fruitful corporate relationships I’ve ever seen.”
To find out more about the Bank’s support for the Arts, visit commbank.com.au
|
|
/*
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#import "SimpleDBMissingParameterException.h"
@implementation SimpleDBMissingParameterException
@synthesize boxUsage;
-(id)initWithMessage:(NSString *)theMessage
{
if (self = [super initWithMessage:theMessage]) {
}
return self;
}
-(void)setPropertiesWithException:(AmazonServiceException *)theException
{
[super setPropertiesWithException:theException];
if ([theException.additionalFields valueForKey:@"BoxUsage"] != nil) {
self.boxUsage = [AmazonSDKUtil convertStringToNumber:[theException.additionalFields valueForKey:@"BoxUsage"]];
}
}
-(NSString *)description
{
NSMutableString *buffer = [[NSMutableString alloc] initWithCapacity:256];
[buffer appendString:@"{"];
[buffer appendString:[[[NSString alloc] initWithFormat:@"BoxUsage: %@,", boxUsage] autorelease]];
[buffer appendString:[super description]];
[buffer appendString:@"}"];
return [buffer autorelease];
}
-(void)dealloc
{
[boxUsage release];
[super dealloc];
}
@end
|
|
Various non-informational, non-programmable nanoparticles have been known in the art, such as those disclosed in Zhang, et al., Science 272:1777-1779, 1996; LaRue et al., Macromolecules 39:309-314, 2006; Ishihara et al., Chem. Eur. J. 13:4560-4570, 2007; Kim et al., Angew. Chem., Int. Ed 46:5779-5782, 2007; Li et al., Macromolecules 41:6605-6607, 2008; Roy et al., Chem. Commun. 2106-2108, 2009; and Fernyhough et al., Soft Matter 5:1674-1682, 2009. There is a need in the art for micelles that are capable of changing morphology in a predictable or programmable way. Provided herein are solutions to these and other problems in the art.
|
|
Kaltbrunn railway station
Kaltbrunn railway station is a railway station situated in the municipality of Kaltbrunn in the Swiss canton of St. Gallen. It is located on the Uznach to Wattwil line, close to the western portal of the long Ricken Tunnel.
The station is served by hourly St. Gallen S-Bahn service S4, which operates in both directions around a loop via Wattwil, St. Gallen, Sargans, Ziegelbrücke and Uznach.
References
Category:Railway stations in the canton of St. Gallen
Category:Swiss Federal Railways stations
|
|
Frederick Lohden
Frederick Charles Lohden OBE (13 June 1871 – 13 April 1954) was an English sportsman who played rugby union as a forward at international level for England in a single game during the 1893 Home Nations Championship. After retiring from playing sport he became a sports administrator, most notably as the chairman of the Lawn Tennis Association.
Personal history
Lohden was born in Hartlepool in the north of England on 13 June 1871 to Jacob and Mary Lohden, and christened at Christ Church, Hartlepool on 12 July of that year. He attended Durham School as a youth, completing his education in France and Germany. In 1898 he was married to Margaret Emily Marshall of Broadwater, Sussex.
With the outbreak of the First World War, Lohden, who already had military experience, was promoted to Lieutenant in the 4th Durham Volunteer Artillery. He later joined the East Surrey Regiment. In 1917 he was transferred to the Ministry of Shipping and was placed in charge of Standard Steamers, Russian Steamers and Oilers. He was awarded the Order of the British Empire in the 1919 New Year Honours for his work for the Ministry of Shipping. He later moved to Cheam on the border between London and Surrey where he worked as a shipping broker. Lohden later became the mayor of Sutton and Cheam, and was also made a Justice of the Peace.
Sporting history
Lohden showed promise as a sportsman while a youth, making the Durham School rugby XV while still a 15-year-old, the biggest forward in his team. On his return from education in mainland Europe he joined Hartlepool Rovers, and by the age of 19 he was selected to play at county level for Durham. By the 1892/93 season he was playing for one of England's premier clubs, Blackheath. While representing Blackheath he came to the attention of the English selectors and was chosen for the South of England team in the trials of the England squad. He was given his first and only cap in the opening game of the 1893 Home Nations Championship against Wales at the Cardiff Arms Park. The game started well for the English side, opening a 7–0 lead in the first half, one of the two tries scored by Lohden. A further England try at the start of the second half appeared to give England an overwhelming lead only to see an historic Welsh comeback, led by their talismanic captain Arthur Gould, which snatched victory from England in the final minutes. Although Lohden never played for England again, a series of minor injuries ending his career by 1896, he was selected for invitational tourists the Barbarians in 1893, and also represented Surrey county. After retiring from playing he kept up his connection with the sport of rugby by being elected onto the Durham County Rugby Union committee, serving them from 1896 to 1902.
As well as rugby, Lohden was a keen sports shooter, and won the Baltic Exchange 'miniature' Championship for three years running. On returning to civilian life after the war, Lohden became increasingly active in the world of racket sports. A skillful badminton player he represented Surrey County playing in four consecutive London Badminton doubles finals in 1920. This was followed by the title of Veteran's Doubles Champion of England in 1921. That year Lohden also set up the Surrey Badminton Association, becoming their first honorary secretary.
In 1907 Lohden put his sporting administrative abilities to further use when he was elected to the Surrey branch of the Lawn Tennis Association. He progressed to becoming the organisations chairman, and then in 1911 he joined the Council of the LTA. In 1933 he became chairman of the LTA and the year later its vice-president.
References
Bibliography
Category:1871 births
Category:1954 deaths
Category:Rugby union forwards
Category:English rugby union players
Category:England international rugby union players
Category:Barbarian F.C. players
Category:Blackheath F.C. players
Category:Sportspeople from Hartlepool
Category:Officers of the Order of the British Empire
Category:People educated at Durham School
Category:British Army personnel of World War I
Category:East Surrey Regiment officers
Category:Tennis in the United Kingdom
|
|
Association of Chief Police Officers
The Association of Chief Police Officers (ACPO), officially The Association of Chief Police Officers of England, Wales and Northern Ireland, was a not-for-profit private limited company that for many years led the development of policing practices in England, Wales, and Northern Ireland. Established in 1948, ACPO provided a forum for chief police officers to share ideas and coordinate their strategic operational responses, and advised government in matters such as terrorist attacks and civil emergencies. ACPO coordinated national police operations, major investigations, cross-border policing, and joint law enforcement. ACPO designated Senior Investigative Officers for major investigations and appointed officers to head ACPO units specialising in various areas of policing and crime reduction.
ACPO was led by Chief Constable Sir Hugh Orde, QPM, who was, until 2009, the Chief Constable of the Police Service of Northern Ireland. He was elected as president by fellow members of ACPO in April 2009.
ACPO was funded by Home Office grants, profits from commercial activities and contributions from the 44 police authorities in England, Wales, and Northern Ireland.
Following the Parker Review into ACPO, it was replaced in 2015 by a new body, the National Police Chiefs' Council, set up under a police collaboration agreement under Section 22A of the Police Act 1996.
Background
UK policing sprang from local communities in the 1800s. Since the origins of policing, chief officers have regularly associated to discuss and share policing issues. Although ACPO as now recognised was formed in 1948, records of prior bodies go back to the early 1900s. The UK retains a decentralised model of policing based around the settlement which emerged from the Royal Commission on the work of the Police in 1962.
ACPO continued to provide a forum for chief officers across 44 local police forces and 13 national areas across England, Wales and Northern Ireland, and provided local forces with agreed national policies and guidelines.
ACPO failed to convince its sponsors to contribute to its survival and in May 2011 the BBC reported that ACPO would run out of money in February 2012 without extra funding. ACPO was half-funded by the Home Office and half by 44 police authorities. A third of police authorities refused to pay in 2010 and another third were undecided. The Association of Police Authorities said the withdrawal of funding by police authorities was "partly due to a squeeze on their income". ACPO was due to wind up formally in April 2015.
Constitutional status
Over time, demands for coordination across the police service increased as society changed, for example to take account of new developments in international terrorism and organised crime, or roles such as monitoring offenders on release from prison or working with young people to divert them from crime.
In 1997 ACPO was incorporated as a private company limited by guarantee. As a private company, ACPO was not subject to freedom of information legislation. It was not a staff association; the staff association for senior police officers was a separate body, the Chief Police Officers Staff Association (CPOSA).
The change in structure from a "band of volunteers" to a limited company allowed the organisation to employ staff, enter into contracts for accommodation and publish accounts.
A number of options were considered for the status of ACPO, including charitable status, but all were discounted.
Chief Constables and Commissioners are responsible for the direction and control of policing in their force areas. Although a national body and recognized by the government for consultation, ACPO had no powers of its own, nor any mandate to instruct chief officers. However, the organisation allowed chief officers to form a national policy rather than replicate the work in each of their forces. For example, after the 1980–81 riots in 27 British cities including in St. Pauls and Brixton ACPO began to prepare the Public Order Manual of Tactical Operations and Related Matters. Police forces began training in its tactics late in 1983.
Membership
ACPO was not a staff association. It acted for the police service, not its members. The separate Chief Police Officers Staff Association acts for chief officers.
ACPO was composed of the chief police officers of the 44 police forces in England & Wales and Northern Ireland, the Deputy Chief Constable and Assistant Chief Constable of 42 of those forces and the Deputy Commissioner, Assistant Commissioner, Deputy Assistant Commissioner and Commanders of the remaining two - the Metropolitan Police and City of London Police. Certain senior non-police staff and senior members of national police agencies and certain other specialised and non-geographical forces in the UK, the Isle of Man and the Channel Islands were also members.
As of March 2010 there were 349 members of ACPO. The membership elected a full-time President, who held the office of Chief Constable under the Police Reform Act 2002.
ACPO bodies
ACPO was responsible for several ancillary bodies, which it either funded or which received Home Office funding but which reported to ACPO:
ACPO Criminal Records Office
The ACPO Criminal Records Office (ACRO) was set up in 2006 in response to a perceived gap in the police service's ability to manage criminal records and in particular to improve links to biometric data. The initial aim of ACRO was to provide operational support relating to criminal records and associated biometric data, including DNA and fingerprint recognition.
It also issues police certificates, for a fee, needed to obtain immigration visas for countries including Australia, Belgium, Canada, Cayman Islands, New Zealand, South Africa and the United States.
The organization continues under the style "ACRO Criminal Records Office" under the control of Hampshire Constabulary.
ACPO Vehicle Crime Intelligence Service
The Association of Chief Police Officers Vehicle Crime Intelligence Service (AVCIS), later the National Vehicle Crime Intelligence Service (NAVCIS), was managed by ACPO, and was responsible for combating organised vehicle crime and the use of vehicles in crime.
National Community Tension Team
The National Community Tension Team (NCTT) was an ACPO body which monitored religious, racial, or other tensions within communities, and provided liaison between police forces and community organisations.
National Counter Terrorism Security Office
The National Counter Terrorism Security Office was funded by, and reported to, ACPO and advised the British government on its counter terrorism strategy.
Police National Information and Co-ordination Centre
ACPO was responsible for coordinating the national mobilisation of police resources at times of national need through the Police National Information and Co-ordination Centre (PNICC), which it set up in 2003. This included ensuring policing resilience during major events such as emergency response to serious flooding or the investigation of a terrorist attack. PNICC sat alongside the government in COBR (Cabinet Office Briefing Room) to advise on national issues. PNICC also handled support to overseas crises involving UK nationals.
It employed three full-time staff, with other staff seconded to it as needed and is funded by contributions from each of the police forces.
Counter Terrorism Internet Referral Unit
The Counter Terrorism Internet Referral Unit (CTIRU) was set up in 2010 by ACPO (and run by the Metropolitan Police) to remove unlawful terrorist material content from the Internet with a focus on UK based material.
The December 2013 report of the Prime Minister's Extremism task force said that it would "work with internet companies to restrict access to terrorist material online which is hosted overseas but illegal under UK law" and "work with the internet industry to help them in their continuing efforts to identify extremist content to include in family-friendly filters" which would likely involve lobbying ISPs to add the CTIRU list to their filters without the need for additional legislation.
National Wildlife Crime Unit
The National Wildlife Crime Unit is a national police unit that gathers intelligence on wildlife crime and provides analytical and investigative support to law enforcement agencies.
Controversies
Freedom of information
ACPO had been criticised as being unaccountable to Parliament or the public by virtue of its limited company status. In October 2009 Sir Hugh Orde stated that ACPO would be "more than happy" to be subject to the Freedom of Information Act. On 30 March 2010, the Ministry of Justice announced that ACPO would be included under the FOI Act from October 2011. In its response, the organisation stated that "Although organisations cannot voluntarily comply with the Act, a large proportion of ACPO's work is public already or available under FOI through any police force". In January 2011 its website still said it: "is unable to do is to respond to requests for information under the Act. The organisation is too small and there are too few members of staff to be able to conduct the necessary research and to compile the responses". From November 2011, however, FOI requests could be made to ACPO.
Confidential Intelligence Unit
In February 2009, the Mail on Sunday highlighted the involvement of ACPO in setting up the "Confidential Intelligence Unit" as a specialised unit to monitor left-wing and right-wing political groups throughout the UK.
Commercial activities
The February 2009 Mail on Sunday investigation also highlighted other activities of the ACPO including selling information from the Police National Computer for £70 despite it costing them only 60p to access it, marketing "police approval" logos to firms selling anti-theft devices and operating a separate private firm offering training to speed camera operators.
Apartments
The organisation was criticised in February 2010 for allegedly spending £1.6 million per year from government anti-terrorist funding grants on renting up to 80 apartments in the centre of London which were reported as being empty most of the time. The organisation responded that it had reviewed this policy and would reduce the number of apartments.
Undercover activities
As a result of The Guardian articles with regards to the activities and accusations of PC Mark Kennedy of the National Public Order Intelligence Unit within the National Extremism Tactical Co-ordination Unit, and the collapse of the subsequent trial of six activists, a number of initiatives and changes were announced:
Acknowledging that "something had gone very wrong" in the Kennedy case to the Home Affairs Select Committee, Home Office minister Nick Herbert stated that ACPO would lose control of three teams involved in tackling domestic extremism. Herbert announced that the units would be transferred to the Metropolitan Police, with acting commissioner Tim Godwin confirming that this would occur at the earliest possible timescale.
Her Majesty's Inspectorate of Constabulary announced that Bernard Hogan-Howe would lead an investigation into ACPO, to assess whether undercover operations had been "authorised in accordance with law" and "proportionate".
The Association of Police Authorities said it was ending its annual £850,000 grant to ACPO.
DNA database
ACPO has supervised the creation of one of the world's largest per-capita DNA databases, containing the DNA profiles of more than one million innocent people. ACPO's guidelines that these profiles should only be deleted in "exceptional circumstances" were found to be unlawful by the UK Supreme Court in May 2011. They were found to be incompatible with the European Convention on Human Rights, following the ruling by the European Court of Human Rights in S and Marper v United Kingdom. On 1 May 2012, the Protection of Freedoms Act 2012 completed its passage through Parliament and received Royal Assent. To date, ACPO has not reissued revised guidelines to replace its unlawful DNA exceptional procedure. Big Brother Watch, in a report of June 2012, concludes that despite the Protection of Freedoms Act, the retention of DNA in England and Wales remains an uncertain and illiberal regime.
Fake uniforms
During the summer of 2011, Hugh Orde, then president of the ACPO, was seen wearing a dark blue police-style uniform with ACPO insignia, and was accused of wearing a fake uniform. Senior police officers claimed that the uniform was not that of any police force in the country but "closely resembled" the uniform worn by former Metropolitan Police Commissioner, Paul Stephenson. Sam Leith, an author, journalist and literary editor of The Spectator, mocked Orde's decision "to wear this Gadaffi-style pretend uniform on television", and suggested it was "a subliminal pitch for the Met Commissioner's job." Brian Paddick, at the time the Police Commander for the London Borough of Lambeth, said: "It's unusual for the president of ACPO to appear in all these interviews in uniform. He is sending a clear signal: how would I look in the commissioner's uniform?" One officer noted: "If anything, Hugh should be wearing the uniform of the Police Service of Northern Ireland because that's where he served. But their uniform is green, not the dark blue he currently wears." An ACPO spokesperson stated that the "Police Reform Act 2002 states that the President of the Association of Chief Police Officers holds the rank of chief constable. Not being a member of a particular force, the President wears a generic police uniform".
Parker Review
In 2013, an independent review of ACPO by General Sir Nick Parker was published. It recommended that ACPO be replaced by a new body, in the interests of greater transparency and cost effectiveness. On the basis of these recommendations, a new organization, the National Police Chiefs' Council, was set up to replace ACPO, which it did on 1 April 2015.
Notable members
Commander Christine Jones (Metropolitan Police), lead on mental health issues
References
External links
Association of Chief Police Officers website (archived link from March 2015)
Category:Law enforcement in England and Wales
Category:Law enforcement in Northern Ireland
Category:Organizations established in 1948
Category:British intelligence agencies
Category:Privately held companies of the United Kingdom
Category:Counter-intelligence agencies
Category:1948 establishments in the United Kingdom
Category:2015 disestablishments in the United Kingdom
Category:Law enforcement-related professional associations
|
|
Visual attention to features by associative learning.
Expecting a particular stimulus can facilitate processing of that stimulus over others, but what is the fate of other stimuli that are known to co-occur with the expected stimulus? This study examined the impact of learned association on feature-based attention. The findings show that the effectiveness of an uninformative color transient in orienting attention can change by learned associations between colors and the expected target shape. In an initial acquisition phase, participants learned two distinct sequences of stimulus-response-outcome, where stimuli were defined by shape ('S' vs. 'H'), responses were localized key-presses (left vs. right), and outcomes were colors (red vs. green). Next, in a test phase, while expecting a target shape (80% probable), participants showed reliable attentional orienting to the color transient associated with the target shape, and showed no attentional orienting with the color associated with the alternative target shape. This bias seemed to be driven by learned association between shapes and colors, and not modulated by the response. In addition, the bias seemed to depend on observing target-color conjunctions, since encountering the two features disjunctively (without spatiotemporal overlap) did not replicate the findings. We conclude that associative learning - likely mediated by mechanisms underlying visual object representation - can extend the impact of goal-driven attention to features associated with a target stimulus.
|
|
ABC News’ Good Morning America outstripped NBC News’ Today by 761,000 viewers and 279,000 news demo viewers the week of April 7. It’s GMA‘s seventh consecutive week on top of the morning infotainment show race in both metrics, and its largest demo margin in three months. GMA has ranked No. 1 in overall audience for 89 of the past 93 weeks, and No. 1 in the news demo for 25 of this season’s 29 weeks to date.
Today meanwhile, boasted it finished first with the younger, 18-49 year old age bracket, for the 42nd consecutive week. Today is on top of the ratings in the daypart with men 25-54 this season, NBC noted — as well as adults, men and women 18-49. Today has posted seven consecutive months of ratings growth in total viewers, and both the 25-54 and 18-49 demos which NBC says is the show’s biggest ratings uptick since ’97.
For the week, GMA clocked 5.617 million viewers — 2.212 million in the demo. Today logged 4.856 million viewers — 1.933 million in the demo. GMA bested CBS This Morning‘s 3.041 million viewers — 956,000 in the news demo.
8 Comments
now if they would only get rid of Roker and Daly, maybe I would watch again. Also replace Hall in the 9 o’clock hour. She is awful. GIVE ME MY GEIST BACK
B stock • on Apr 17, 2014 8:54 am
I love GMA but they really need to get rid of the music that you have to listen to even when the anchors are talking…. So annoying….off today and excited to watch but had to turn channel because the music is too loud and so annoying… George even asked for the music to be turned down!
Bill • on Apr 17, 2014 8:54 am
who cares
Carol Dehart • on Apr 17, 2014 8:54 am
I miss Sam and josh very much. Congrats over you numbers. Please have Sarah on more
edna • on Apr 17, 2014 8:54 am
I love GMA, but I miss Sam and Josh.
Carla • on Apr 17, 2014 8:54 am
Format is fantastic – notice Today ditched there ugly sofa for the “round table.” Nothing like GMA comradery! Little late Today producers! Greatly miss Gosh and Sam. Not so keen w/Ginger – maybe trying too hard, not found her “nitch.” Only complaint? Too much Estrogen on the show! Enjoy success GMA!
Barrack • on Apr 17, 2014 8:54 am
I love, The new Weather Person…….Sam was great, but it was good that he moved on. Ginger is fresh and of course the storm chaser!
Lara, has done well in her position. I did not think anyone could
take Dianne’s place she has done very well. Now as for Josh, well he did not stay long enough to matter. Easy to replace. Robin is a fixture, so is George. The rest just compliment them. Ohhhhhh and
Stahan wow that will be awesome!! Go GMA!!
Sixto • on Apr 17, 2014 8:54 am
Thanks God that people are discarding Lauer and in the future Al Roker as hosts of Today. People are being conscientious that Lauer is pucking and that Al is passe with the same phrase over and over and over “now lets see whats happening in your neck of the woods” .
|
|
Irinotecan (CPT-11, Campto®) -- a semisynthetic, water-soluble derivative of the plant alkaloid camptothecin -- is the standard of care in the treatment of advanced colorectal cancer when 5-fluorouracil (5-FU)-based therapy has failed ([Cunningham *et al*, 2001](#bib5){ref-type="other"}). Phase II trials have demonstrated objective response rates of 16--27% in pretreated patients, with stabilisation of disease in a further 40--60% of patients ([Rougier *et al*, 1997](#bib13){ref-type="other"}; [Van Cutsem *et al*, 1999](#bib17){ref-type="other"}). Median overall survival rates of up to 10 months are achievable when irinotecan is used in relapsed/refractory colorectal cancer ([Shimada *et al*, 1993](#bib15){ref-type="other"}; [Rothenberg *et al*, 1996](#bib12){ref-type="other"}, [1999](#bib11){ref-type="other"}; [Pitot *et al*, 1997](#bib10){ref-type="other"}; [Rougier *et al*, 1997](#bib13){ref-type="other"}; [Van Cutsem *et al*, 1999](#bib17){ref-type="other"}). Two European phase III trials investigating the efficacy and safety of irinotecan, following 5-FU failure in advanced colorectal cancer, have demonstrated significant improvements in survival compared with best supportive care and 5-FU ([Cunningham *et al*, 1998](#bib6){ref-type="other"}; [Rougier *et al*, 1998](#bib14){ref-type="other"}). The main adverse events accompanying treatment with irinotecan in these trials were diarrhoea, neutropenia, fatigue, nausea and vomiting.
Although 350 mg m^−2^ as an intravenous infusion every 3 weeks is the standard recommended dosage of irinotecan, pharmacokinetic parameters of irinotecan-lactone and the active metabolite SN-38-lactone vary between individuals ([Xie *et al*, 2002](#bib18){ref-type="other"}). This may be attributed to differences in the levels of the enzymes that metabolise irinotecan, notably carboxylesterase for SN-38. Furthermore, the variable interindividual patient exposure to SN-38 has been identified as an important determinant of toxicity ([Mathijssen *et al*, 2002](#bib8){ref-type="other"}).
At the same time, there is convincing evidence of a dose--response relationship, and therefore a rationale for increasing doses when possible. In a phase I trial by [Abigerges *et al* (1995)](#bib1){ref-type="other"}, there were two recommended doses: 350 mg m^−2^ without high-dose loperamide and 600 mg m^−2^ with high-dose loperamide. With the exception of one responder treated at 260 mg m^−2^, all objective responses were observed at dose levels above 350 mg m^−2^. [Merrouche *et al* (1997)](#bib9){ref-type="other"} provided further support for this from a phase I trial in which an increased tumour response was seen at an irinotecan dose level of 500 mg m^−2^.
Thus, these data suggest that a fixed-dose strategy for administration of irinotecan may not be optimal for all patients, thereby comprising treatment. The interindividual variability in pharmacokinetic parameters and dose--response relationship provided the rationale for investigating a dose optimisation strategy for irinotecan ([Chabot *et al*, 1995](#bib4){ref-type="other"}). The present study investigated different strategies, using doses of irinotecan up to 500 mg m^−2^, as single-agent therapy in the treatment of patients with metastatic colorectal cancer resistant to 5-FU.
METHODS
=======
Patients
--------
Eligibility criteria included metastatic, histologically proven adenocarcinoma of the colon or rectum progressing on 5-FU-based chemotherapy (adjuvant and/or palliative); administration of ⩽2 5-FU-based regimens in the adjuvant setting or ⩽1 in the palliative setting; World Health Organization (WHO) performance status (PS) of ⩽2; adequate haematological, renal and hepatic function. Exclusion criteria included prior treatment with topoisomerase-I inhibitors; evidence of central nervous system metastases; prior history of chronic diarrhoea; current infection; or any other serious illness or medical condition.
Study design and conduct
------------------------
This was a prospective, randomised, multicentre, open-label, phase II study. The study was conducted in accordance with the Declaration of Helsinki (Hong Kong revision, 1989) and with the approval of the Ethics Committee (Institutional Review Board) at each participating centre. Written informed consent was obtained from each patient prior to his or her enrolment into the trial. An independent Monitoring Committee regularly assessed the safety and efficacy issues and reviewed the conduct of the study if needed. An External Response Review Committee (ERRC) assessed tumour responses without knowledge of the randomisation arm. The aim of the study was to determine the optimal dosing strategy in terms of efficacy and safety of single-agent irinotecan (by individual dose optimisation based on patient tolerance to treatment, or optimisation based on specific baseline risk factors) in the treatment of 5-FU-resistant patients with metastatic colorectal cancer. The primary efficacy endpoint was the overall response rate.
### Dosing scenarios
Patients were randomised to one of three groups (A, B and C (outlined below)), each group receiving irinotecan as a 30 min intravenous infusion scheduled every 21 days. This dosing interval could be extended to a maximum of 35 days in the event of persistent toxicity to allow satisfactory recovery from the previous cycle. Doses \<250 mg m^−2^ or \>500 mg m^−2^ were not used in this study; patients who exhibited significant toxicity at 250 mg m^−2^ were withdrawn from the study.
Group A was the reference group in which a fixed dose of 350 mg m^−2^ of irinotecan was administered on Day 1. In subsequent cycles, the dose of irinotecan could be decreased (but not increased) according to the presence of significant toxicity at this dose.
Groups B and C investigated dosing scenarios to select patients for whom the higher dose of irinotecan (500 mg m^−2^) could be optimally used. Patients randomised to Group B received irinotecan at a starting dose of 250 mg m^−2^ followed by increasing doses (350 and 500 mg m^−2^) depending on the tolerance observed in the preceding cycle. In the event of significant toxicity, dose reductions were implemented.
In Group C, the irinotecan dose was based on protocol-defined toxicity risk factors identified at baseline: grade 3--4 neutropenia (bilirubin \>70% upper limit of normal (UNL), haemoglobin \<12 g dl^−1^, \>3 organs involved) and/or grade 3--4 diarrhoea (PS⩾1, creatinine \>70% UNL ([Freyer *et al*, 2000](#bib7){ref-type="other"})). Patients could be started at an irinotecan dose of 500 mg m^−2^ in the absence of toxicity risk factors. The starting dose of irinotecan was 350 mg m^−2^ in patients with one risk factor or one factor from each group, and 250 mg m^−2^ for patients with \>2 risk factors or two factors from the same group. The dose was not escalated, but could be reduced to 250 mg m^−2^ in the event of significant treatment-emergent toxicity.
Concomitant treatments and follow-up
------------------------------------
Antiemetic drugs were administered as premedication to irinotecan infusions. Atropine was permitted for acute anticholinergic symptoms and loperamide (or similar) for delayed diarrhoea. In addition, preventative oral antibiotic therapy (e.g. an oral fluoroquinolone) was administered to patients with persistent (\>48 h) grade 4 diarrhoea or for diarrhoea associated with grade 3--4 neutropenia or fever. No granulocyte-colony-stimulating factor (G-CSF) support was allowed. All patients were followed until disease progression, unacceptable toxicity or death occurred, or the patient chose to withdraw from the trial. In all cases, in each group where toxicity necessitated a dose reduction, delay or study treatment termination, the patient was followed up until the event had resolved.
Efficacy, safety and pharmacokinetic evaluations
------------------------------------------------
Tumour response rate, the primary efficacy end point, was measured according to WHO criteria and evaluated by the ERRC. Response was defined as complete (CR) plus partial (PR) response and as tumour growth control in terms of stabilisation of disease (PR plus no change/stable disease). Secondary efficacy variables were the duration of response and disease stabilisation, time to progression (TTP), time to treatment failure (TTF) and overall survival. The duration of response was measured from the first day of infusion of irinotecan to the first date that disease progression was noted or to the date of death for any reason. Time to progression was calculated from the date of randomisation to the first documented date of progression or the date of death for any reason. Time to treatment failure was the period between the date of randomisation and the date of tumour progression or treatment discontinuation for any reason. Survival was defined as the time between randomisation and death. Efficacy evaluations were performed using intent-to-treat (ITT) and per-protocol (eligible and evaluable) patient populations.
The safety population comprised all patients who had started at least one infusion of study treatment. Safety was assessed according to the National Cancer Institute Common Toxicity Criteria or, if this was not applicable, graded as mild, moderate, severe or life threatening. The safety analysis was based on the worst grade by patient and by cycle. Deaths during the trial and up to 30 days from the last infusion were recorded.
Pharmacokinetic evaluations were performed using a population approach ([Chabot *et al*, 1995](#bib4){ref-type="other"}; [Canal *et al*, 1996](#bib3){ref-type="other"}). At 30 min prior to infusion, and at 5 min and 3--4 h postinfusion (an additional sample was collected at 24 h postinfusion in some cases), three 5 ml blood samples (plus one predrug sample) were taken for analysis at the first cycle of chemotherapy for Groups A and C, and at the first, second and third cycles for Group B. Plasma levels of irinotecan and SN-38 were measured using reverse-phase high-performance liquid chromatography with camptothecin as an internal standard. Peak plasma concentration (*C*~max~) and the area under the plasma concentration--time curve (AUC) were calculated for both irinotecan and SN-38. In addition, total body clearance was calculated for irinotecan, and the time to reach *C*~max~ (*t*~max~) as well as the AUC normalised to l mg of irinotecan (AUC~N~) were calculated for SN-38. A three- and two-compartment model was used for irinotecan and SN-38, respectively.
Statistical considerations
--------------------------
Using the hypothesis that the response rate in Groups B and C would be 20%, a total of 64 patients in each of these groups were required to yield a confidence interval (CI) band of ⩽20%. For the reference group (Group A), the number of subjects randomised was half that of Groups B and C. The 95% CIs were estimated for response, using the exact method. Confidence intervals on median values were estimated using the method described by Brookmeyer and Crowley ([Simon *et al*, 1985](#bib16){ref-type="other"}). Descriptive statistics only were used for the pharmacokinetic parameters in each group.
RESULTS
=======
Patients
--------
A total of 164 patients entered the study: 36 in Group A, 62 in Group B and 66 in Group C ([Table 1](#tbl1){ref-type="table"}). The majority of patients (⩾97%) had received surgery and 20--30% had received radiotherapy and/or prior adjuvant chemotherapy. Based on the assessment of baseline risk factors previously described, 23 (35%) patients in Group C were assigned to receive a starting dose of 250 mg m^−2^ irinotecan, 37 (56%) patients to 350 mg m^−2^ and six (9%) patients to 500 mg m^−2^.
A total of 144 (88%) patients (31, 51 and 62 in Groups A, B and C, respectively) were eligible and evaluable for the efficacy analyses. Nine patients were ineligible due to major protocol violations (\>1 line of palliative chemotherapy, and past or concurrent history of neoplasm other than colorectal adenocarcinoma in one patient) and 12 patients (not mutually exclusive) were nonevaluable for response. Early discontinuation because of adverse events rendered eight patients nonevaluable.
Extent of exposure to irinotecan
--------------------------------
The median dose intensity of irinotecan was similar in the three arms: 114.21 mg m^−2^ week^−1^ (95% CI 76.14--119.21) in Group A, 101.36 mg m^−2^ week^−1^ (95% CI 68.22--158.17) in Group B and 106.69 mg m^−2^ week^−1^ (95% CI 67.11--170.93) in Group C. However, the median cumulative dose was higher in Group A (1948.80 mg m^−2^) than in Groups B (1564.26 mg m^−2^) and C (1326.77 mg m^−2^), possibly due to the longer median treatment time in this group (18 weeks, compared with 16 and 13 weeks in Groups B and C, respectively).
The percentage of cycles delivered at doses of 250, 350 and 500 mg m^−2^ were as follows: 3, 92 and 0% (as this was not an option) in Group A; 41, 30 and 27% in Group B; and 33, 51 and 8% in Group C. A few cycles in each group were given at intermediate doses or at doses above 525 mg m^−2^.
In Group B, the only dose escalation option, 63% of patients had at least one dose escalation from the 250 mg m^−2^ start dose.
More than 80% of patients in each group did not require dose reduction. A total of 36--40% of patients experienced a cycle delay ([Table 2](#tbl2){ref-type="table"}). Although the majority of dose reductions in each group were made for treatment-related reasons (mostly nonhaematological adverse events across all arms), the majority of cycle delays occurred for reasons unrelated to treatment.
Efficacy
--------
### Response rate
In the total (ITT) patient population (*n*=164), the overall response rates were 8, 13 and 9% in Groups A, B and C, respectively ([Table 3](#tbl3){ref-type="table"}). There were no CRs. Tumour growth control rates were higher in Groups A and B and the rates of progressive disease were lower, compared with Group C ([Table 3](#tbl3){ref-type="table"}). The pattern of response across the groups was maintained in the per-protocol (eligible and evaluable) patient population (*n*=144), with overall response rates (no CR) of 10, 16 and 10% in Groups A, B and C, respectively. Corresponding tumour growth control rates were 61, 65 and 53%.
Responses occurred at all dose levels ([Table 3](#tbl3){ref-type="table"}). However, there were only two responses at the 250 mg m^−2^ dose of irinotecan, both in Group C. Although it is difficult to interpret the data based on the small patient numbers in this study, they suggest that starting patients on a dose of 250 mg m^−2^ was not beneficial.
The median duration of response and TTP were significantly longer in Groups A and B compared with Group C (*P*=0.030) ([Table 3](#tbl3){ref-type="table"}). Despite a trend towards a shorter TTF and median overall survival in Group C, there were no significant differences across the arms for these parameters.
Safety and tolerability
-----------------------
All patients were evaluable for safety. At least one adverse event was reported in all patients. However, grade 3--4 adverse events possibly or probably related to the study treatment were reported in less than half of the patients in each group ([Table 4](#tbl4){ref-type="table"}). Most of these were related to haematological or gastrointestinal (GI) events ([Table 4](#tbl4){ref-type="table"}). Grade 3--4 neutropenia with fever or infection was infrequent. Although anaemia was common, it was infrequently reported at grade 3--4 level of severity ([Table 4](#tbl4){ref-type="table"}). Diarrhoea was the most common GI event, occurring in 85% of patients, although grade 3--4 diarrhoea was less frequent (31, 21 and 27% in Groups A, B and C, respectively) ([Table 4](#tbl4){ref-type="table"}). There were no significant between-group differences for any of the adverse events reported. In addition, analysis of adverse events at the different dose levels showed no consistent evidence that toxicity increased with increasing dosage. There was no difference between the three treatment groups for the number of patients reporting ⩾1 grade 3--4 adverse event considered to be possibly or probably treatment-related (Group A, 42%; Group B, 48%; Group C, 49%). Overall, 74 serious adverse events considered possibly or probably related to study medication occurred in 39 patients.
Treatment discontinuations
--------------------------
At the designated study end date, 159 (96.95%). patients had discontinued treatment (Group A, 97%; Group B, 95%; Group C, 99%) ([Table 5](#tbl5){ref-type="table"}). Disease progression resulted in proportionately fewer discontinuations in Group B (57%) than in Groups A (72%) and C (80%), and included fatalities arising from progressive disease (one patient in each of Groups A and B, and two patients in Group C). There was also one fatality: a case of aspiration pneumonia secondary to vomiting in a patient in Group B receiving the first cycle of irinotecan 250 mg m^−2^. Five (42%) of the patients who discontinued treatment from Group B were receiving the 250 mg m^−2^ dose option during cycle 1 at the time of withdrawal. Adverse events leading to discontinuations are listed in [Table 5](#tbl5){ref-type="table"}.
Pharmacokinetic parameters
--------------------------
The principal pharmacokinetic parameters for irinotecan and SN-38 measured in 29 assessable patients are presented in [Table 6](#tbl6){ref-type="table"}. The mean total body clearance values of irinotecan were similar across all three groups and no relevant differences in dose-normalised exposure were seen. Exposure to irinotecan and SN-38 increased proportionally over the 250--500 mg m^−2^ irinotecan dose range. In the population pharmacokinetic analysis, exposure to irinotecan appeared to be increased in patients with PS 1 or 2, and in patients with high alkaline phosphate levels.
DISCUSSION
==========
The results of this phase II study confirm the activity of single-agent irinotecan in patients with metastatic colorectal cancer who have failed previous therapy with 5-FU. All three treatment strategies investigated were active and demonstrated acceptable tolerability patterns. Although almost all patients in the study had ⩾1 adverse event, less than half of the patients in each treatment strategy had grade 3--4 toxicity.
The main aim of this study was to determine the optimal irinotecan dosing regimen for the treatment of this population, with the primary end point being response rate. The highest overall response rate was seen in patients in Group B (13%). In this group, four (21%) of the 19 patients receiving irinotecan 500 mg m^−2^ achieved a response. There was little difference in the overall response rates in Groups A and C (8 and 9%, respectively). An interesting observation in this study was the relatively higher rate of progressive disease in Group C (44%) compared with Groups A and B (36 and 31%). None of the differences in response rate between the groups were statistically significant. It is worth mentioning that the response rate observed in Group A was unusually low, and less than that seen in published studies of similar populations of patients treated with the same schedule ([Rougier *et al*, 1997](#bib13){ref-type="other"}; [Van Cutsem *et al*, 1999](#bib17){ref-type="other"}). This may be due to changes in first-line treatment that have occurred in recent years; compared with patients treated in earlier studies, those in the present study may have been more heavily pretreated with 5-FU and oxaliplatin in the first-line setting, thus making them more chemotherapy resistant. Despite the lower response rate in Group A, it is within the CIs of previous studies and so can be considered representative.
The lack of a significant difference in overall response rates between the groups may reflect the fact that the median dose intensity of irinotecan delivered was relatively constant across the three groups, despite a proportion of patients in Groups B (34%) and C (9%) receiving an irinotecan dose of 500 mg m^−2^. This finding is probably due mainly to the fact that a disproportionate number of patients (more than one-third) in each of Groups B and C never received a dose of more than 250 mg m^−2^, and so were possibly underdosed. The likelihood of underdosing in Groups B and C is supported by the observation that only 6% of patients in Group A required dose reduction from 350 to 250 mg m^−2^.
There were no significant differences between Groups A and B in TTP or overall survival. This may be due to an insufficient powering of the study and/or too small a difference in response rates. A previous meta-analysis conducted in patients with advanced colorectal cancer reported that only large differences in response rate correspond to a significant difference in TTP ([Buyse *et al*, 2000](#bib2){ref-type="other"}). Both TTP and duration of response were significantly shorter in Group C than in Groups A and B, and there was also a trend for a shorter overall survival in this group. The relatively poor efficacy seen in Group C may have been due to a combination of underdosing (i.e. a significant number of patients receiving irinotecan 250 mg m^−2^) and the small number of patients who received the high dose of irinotecan (500 mg m^−2^) (six patients or 9%).
There was a trend towards a better safety profile in Group B. Grade 3--4 neutropenia was 31% in Group B, 47% in A and 44% in Group B. Similarly, there was less grade 3--4 diarrhoea in Group B as compared with Groups A and C (21 *vs* 31 and 27%, respectively), despite 34% of patients receiving the highest irinotecan dose. We cannot exclude the contribution to this difference of imbalances in gender ratio (more male patients in Group B) and PS (more patients with PS=0 in Group B). However, it is possible that the results reflect the aim of the strategy adopted in Group B, which was to avoid subjecting patients to higher doses than they were able to tolerate. Indeed 10 out of 12 patients in Group B who withdrew from the study due to treatment-related adverse events received the lowest dose of irinotecan (250 mg m^−2^) and therefore would not have tolerated an increased dose of irinotecan. However, it should also be noted that severe toxicity leading to treatment discontinuation occurred more frequently in Group B despite the low dose given to all patients in the first cycle. In Group C, despite the strategy of basing the initial irinotecan dose on predetermined risk factors, patients administered the 250 mg m^−2^ dose demonstrated higher rates of grade 3--4 anaemia and diarrhoea compared with those receiving the 350 and 500 mg m^−2^ doses.
This study demonstrates that intrapatient dose escalation based on toxicity in the preceding cycle dose, as practised in Group B, is feasible. Although the increase in the response rate over the whole group was modest compared with the standard irinotecan dose, clinical benefit may be seen in those patients who are able to receive 350 and 500 mg m^−2^, which, in this study, was associated with a response rate of 25 and 21%, respectively. The findings of our study in pretreated patients are in agreement with those of a nonrandomised study in previously untreated patients ([Ychou *et al*, 2002](#bib19){ref-type="other"}): the greater proportion of patients who are able to receive the higher dose and the higher response rate achieved in the latter study compared with our study is probably a reflection of interstudy differences in the starting dose, dose escalation guidelines and in the study population (previous treatment compared with no previous treatment).
In contrast with the feasibility of the strategy in Group B, the use of dose optimisation according to the baseline risk characteristics identified in our study protocol (as practised in Group C) appeared not to be an appropriate approach. This may be because the risk characteristics identified were not valid in this setting or that the algorithm for dose assignment was not relevant. Further investigation is required to clarify this.
In conclusion, the data from our randomised phase II study suggest that individual dose optimisation based on toxicity in the preceding cycle is feasible and merits further investigation. Increasing the dose of irinotecan to 500 mg m^−2^ can be of benefit in selected patients. It will be necessary to identify the most appropriate starting dose, as the dose of 250 mg m^−2^ used in this study was possibly too conservative. Data from pharmacogenomic research are likely to be useful in the future for identifying the most appropriate starting dose of irinotecan for individual patients.
The following additional investigators contributed to this study: F Cavalli (Switzerland), A Etxeberria (Spain), C Focan (Belgium), H Honegger (Switzerland), R Mathijs (Belgium), M Pestalozzi (Switzerland), M Symann (Belgium) and A Tres (Spain).
######
Patient demographics and baseline characteristics
**Treatment group**
---------------------------------------------------------------------------- --------------------------- --------------------------- ----------------------------
Number of patients (*n*); randomised (eligible and evaluable) 36 (31) 62 (51) 66 (62)
Gender; male : female (%) 50 : 50 71 : 29 62 : 38
Age in years; median (range) 60 (29--71) 59 (33--70) 60 (30--70)
Weight loss at baseline in relation to usual body weight (% of population)
⩽5% 88.9 85.5 87.9
\>5% 5.6 4.8 3.0
Unknown 5.6 9.7 9.1
Mean loss (kg) 1.1 0.9 1.0
WHO PS
Median 1 0 1
0 (%) 50.0 59.7 45.5
1 (%) 44.4 35.5 53.0
2 (%) 5.6 4.8 1.5
Primary tumour location
Colon 63.9 66.1 66.7
Rectum 36.1 33.9 33.3
Number of organs with metastatic involvement; median (range) 2 (1--3) 2 (1--3) 2 (1--4)
Synchronous metastases (%) 41.7 59.7 56.1
Sites of metastatic disease (%)
Liver 69.4 79.0 80.3
Liver alone 48.0 53.1 37.7
Liver and other organs 52.0 46.9 62.3
Lung 41.7 30.6 31.8
Peritoneum 11.1 4.8 13.6
Lymph nodes 11.1 21.0 22.7
Colon 0 6.5 1.5
All others^a^ 27.8 22.6 30.3
Median (range) time to randomisation (months) from
First diagnosis 18.1 (4.7--82.3) (*n*=35) 12.7 (3.0--76.3) (*n*=61) 12.6 (3.2--160.1) (*n*=66)
First metastasis 9.1 (0.0--54.7) (*n*=35) 9.0 (0.6--42.7) (*n*=62) 8.1 (0.1--51.6) (*n*=65)
Prior anticancer treatment (% of patients)
Surgery 97.2 98.4 97.0
Radiotherapy 30.6 21.0 22.7
Adjuvant chemotherapy 33.3 25.8 21.2
At least one symptom at baseline
(% of patients) 72.2 62.9 77.3
At least one abnormal laboratory value at baseline (% of patients) 97.2 95.2 93.9
Soft tissue, bone, adrenal, pelvis, abdomen, pleura, retroperitoneum, spleen, mediastinum, skin.
WHO, World Health Organization.
######
Extent of exposure to irinotecan
**Treatment group**
----------------------------------------------------------- --------------------------- ----------------------------- ---------------------------
Number of patients exposed 36 62 66
Number of treatment cycles 216 370 333
Median (range) number of cycles 6 (1--24) 5 (1--21) 4 (1--15)
Median (range) treatment duration (weeks) 18 (3--78) 16 (3--64) 13 (3--46)
Cycles by dose (% of cycles)^a^
250 mg m^−2^ 3 41 33
350 mg m^−2^ 92 30 51
500 mg m^−2^ --- 27 8
Median actual dose intensity (mg m^−2^ week^−1^) (95% CI) 114.21 (76.14--119.21) 101.36 68.22--158.17) 106.69 (67.11--170.93)
Median cumulative dose (mg m^−2^) (95% CI) 1948.80 (314.65--8373.08) 1564.26 (247.52--10 100.00) 1326.77 (249.73--4899.13)
At least one dose increase (% of patients) --- 63 ---
At least one dose reduction^b^
% of patients 17 15 17
% of cycles 4 3 5
At least one cycle delayed^b^
% of patients 36 40 36
% of cycles 19 15 15
Some cycles were administered at intermediate doses.
For any reason (see text).
######
Efficacy results
**Parameter** **Group A (*n*=36)** **Group B (*n*=62)** **Group C (*n*=66)** ***P*-value^a^**
--------------------------------------------------- ---------------------- ---------------------- ---------------------- ------------------
Overall response rate, % (95% CI)^b^ 8 (1.8--22.5) 13 (5.7--23.9) 9 (3.4--18.7)
Overall response rate, % (95% CI)^b^ Per protocol 10 (2.0--25.8) 16 (7.0--28.6) 10 (3.6--19.9)
250 mg m^−2^ ^c^ --- (0/16) 0% (2/20) 10% NC
350 mg m^−2^ ^c^ (3/31) 10% (4/16) 25% (4/36) 11% NC
500 mg m^−2^ ^c^ --- (4/19) 21% (0/6) 0% NC
Tumour growth control rate (%) 58% 60% 50% NC
Progressive disease (%) 36% 31% 44% NC
Median duration of response (months) 6.4 6.6 4.3 0.03
Median TTP (months) 4.1 4.2 3.0 0.019
Median TTF (months) 3.7 3.4 2.5 NS
Median overall survival (months) 12.5 12.1 10.9 NS
Results are presented for the ITT population, unless otherwise stated.
A *vs* C and B *vs* C.
There were no CRs.
Response rate is expressed as a percentage of patients treated at that dose level as their highest dose in each group.
CI, confidence interval; NC, not calculated; NS, not significant.
######
Adverse events
**Grade 3--4 adverse events^a^** **Treatment group: *n* (% of patients)**
----------------------------------------------------------- ------------------------------------------ --------- ---------
At least one grade 3--4 adverse event^a^ 15 (42) 30 (48) 32 (48)
Haematological
Leukopenia 9 (25) 15 (24) 21 (32)
Neutropenia 17 (47) 19 (31) 29 (44)
Anaemia 3 (8) 1 (2) 5 (8)
Infection (grade 3--4 neutropenia present) 2 (6) 0 2 (3)
Fever without infection (grade 3--4 neutropenia present) 0 2 (3) 3 (5)
Gastrointestinal (GI)
Vomiting 5 (14) 10 (16) 6 (9)
Diarrhoea 11 (31) 13 (21) 18 (27)
Nausea 4 (11) 7 (11) 7 (11)
All other GI events^b^ 5 (14) 5 (8) 4 (6)
Other adverse events
Fatigue 3 (8) 7 (11) 8 (12)
Fever (grade 3--4 neutropenia absent) 0 1 (2) 3 (5)
Infection (grade 3--4 neutropenia absent) 2 (6) 1 (2) 3 (5)
Possibly or probably related to study treatment.
Anorexia, five (3%) cases; cholinergic syndrome, three (2%) cases; GI pain, two (1%) cases; dehydration, three (2%) cases; stomatitis, one (1%) case.
######
Patient discontinuations
**Treatment group: *n* (% of patients)**
--------------------------------------------------- ------------------------------------------ --------- ---------
No. of patients still on treatment at cutoff date 1 (3) 3 (5) 1 (2)
Total treatment discontinuations 35 (97) 59 (95) 65 (99)
*Nonfatal reasons*
Progressive disease 25 (69) 34 (55) 51 (77)
Treatment-related adverse event 2 (6) 12 (19) 6 (9)
Adverse events leading to discontinuation^a^
Fatigue 1 (3) 3 (5) 2 (3)
Vomiting 1 (3) 3 (5) 2 (3)
Diarrhoea --- 4 (7) 2 (3)
Nausea --- 2 (3) 2 (3)
Neutropenia --- 2 (3) 1 (2)
Febrile neutropenia --- 2 (3) ---
Neutropenic infection 1 (3) --- ---
Infection --- --- 2 (3)
Fever (infection absent) --- 1 (2) ---
All other nonfatal events^b^ --- 5 (8) 1 (2)
Patient refusal 1 (3) 4 (7) 1 (2)
Other 6 (17) 7 (11) 4 (6)
*Fatal reasons*
Death due to treatment-related adverse events --- 1 (2) ---
Death due to progressive disease 1 (3) 1 (2) 2 (3)
Cardio-respiratory failure --- --- 1 (2)
Not mutually exclusive. Patients may have discontinued treatment for more than one adverse event reason.
Group B: aggravation reaction, two (3%) cases; anorexia, one (2%) case; dehydration, one (2%) case; small bowel obstruction, one (2%) case. Group C: anorexia, one (2%) case.
######
Pharmacokinetic profiles of irinotecan and SN-38 at different doses of irinotecan
**Arm A** **Arm B** **Arm C**
------------------------ ------------------- -------------------- -------------------- ------------------- ------------------- -------------------
Cycle 1 1 2 3 1 1
No. of patients 6 13 8 5 5 5
Dose (*n*) (mg m^−2^) 350 250 350 300 (1)/500 (4) 250 350
*Irinotecan*
Infusion duration (h) 0.5--1.5 0.5--1.5 0.5--1.0 0.5--1.1 0.5--1.6 0.5--1.1
*C*~max~ (mg l^−1^) 5.88 (4.79--9.18) 4.55 (3.05--5.87) 6.12 (5.33--6.70) 8.40 (4.57--8.62) 3.61 (3.26--4.56) 7.13 (5.10--7.79)
AUC (mg h l^−1^) 32.7 (14.3--36.4) 20.5 (11.6--30.9) 27.8 (20.4--39.2) 44.7 (28.0--50.6) 19.7 (15.3--29.0) 33.4 (22.9--46.3)
Clearance (l h m^−2^) 9.3 (8.4--21.3) 10.6 (7.0--18.9) 10.9 (8.2--14.9) 9.1 (5.9--15.6) 10.6 (7.53--14.4) 9.03 (6.55--13.3)
*SN-38*
Median *t*~max~ (h) 0.7 (0.6--1.5) 0.6 (0.5--1.6) 0.6 (0.5--1.0) 0.7 (0.6--1.1) 1.5 (0.6--1.6) 1.0 (0.6--1.1)
*C*~max~ (μg l^−1^) 61.9 (33.5--86.7) 49.7 (24.0--138.0) 58.7 (38.0--168.0) 80.7 (34.9--97.3) 40.6 (33.9--962) 67.9 (50.2--135)
AUC (μg h l^−1^) 668 (362--1110) 676 (324--1140) 960 (546--1300) 1420 (609--1610) 595 (403--903) 768 (579--1395)
AUC~N~^a^ (μg h l^−1^) 1.9 (1.1--2.9) 2.4 (1.3--5.0) 2.6 (1.6--4.4) 2.6 (1.3--5.1) 2.0 (1.5--3.2) 2.5 (1.5--4.4)
Normalised to 1 mg irinotecan dose.
Data are expressed as median (95% CI) unless otherwise stated.
AUC, area under the plasma concentration--time curve; *C*~max~, maximum plasma concentration; *t*~max~, time to reach maximum plasma concentration.
|
|
This subproject is one of many research subprojects utilizing the resources provided by a Center grant funded by NIH/NCRR. The subproject and investigator (PI) may have received primary funding from another NIH source, and thus could be represented in other CRISP entries. The institution listed is for the Center, which is not necessarily the institution for the investigator. Most organisms rely on an innate immune system as their first line of defense against infection. Within the innate immune system, the Toll-like receptors (TLRs), a family of evolutionarily ancient receptors found on the surface of many cell types, are critical for pathogen recognition outside the cell. About 12 TLRs recognize structures specific to pathogens, such as bacterial cell wall components, bacterial filament proteins, or certain types of nucleic acid. This recognition event initiates a signal inside the cell, which induces the rapid secretion of antimicrobial and inflammatory proteins. Inside the cell, the NOD proteins and RNA helicases such as MDA5 recognize similar pathogen-associated structures to those recognized by TLRs. Remarkably, given the structural diversity of the structures that they recognize, all TLRs and NODs rely on a "leucine-rich repeat" (LRR) domain to recognize pathogen-associated structures. The overall goal of our research program on innate immune sensors is to understand how they recognize conserved molecular patterns in pathogens, and how this recognition is translated into an innate immune response. Our structural approach will provide unique insights into these important processes. First, we aim to determine the structure of one or more TLR-ligand complexes, by X-ray crystallography. Alternative crystallization targets are NOD-ligand or helicase-RNA complexes. We propose to use novel protein expression techniques to maximize protein yields. Our structures will likely define novel principles of molecular recognition. By revealing the conformational changes associated with ligand binding, the structures will provide insight on how pathogen recognition is translated into a signal in the cell that elicits an immune response. Our work will also guide efforts to design synthetic agonists or antagonists with immunomodulatory properties. Such compounds would have a wide range of medical applications, particularly as vaccine adjuvants or anti-inflammatory therapeutics.
|
|
The prior art has proposed various methods and apparatus to produce composite materials. U.S. Pat. No. 2,931,082 to Brennan discloses a casting method and apparatus wherein a composite metal article is formed by continuously casting molten metal against a longitudinally moving base such as a metal strip or the like. In Brennan, a strip is disposed between the material being cast and a rotating casting wheel.
U.S. Pat. No. 5,077,094 to McCall et al. discloses a process for applying a metal coating to a metal strip substrate. In this process, a melt pool of a metal coating material is deposited on a casting surface of the substrate material and rapidly cooled to form the coated metal strip.
U.S. Pat. No. 4,224,978 to Klein discloses a twin roll casting method and apparatus for forming a composite material. In this method, a material having a mechanical strength and melting point substantially higher than that of aluminum is plated on at least one face of a continuously cast aluminum core material. Referenced in this patent is French Patent No. 1,364,758 which describes in principle a continuous casting method in which still liquid metal is introduced between two cooled work rolls and in which a metal plating strip is interposed between the liquid metal and the work rolls. The metal plating strip is thus plated onto the continuously cast material. This French patent discloses plating an aluminum blank with a strip of aluminum.
In the prior art, it is also known to provide a brazing sheet comprising a core of an aluminum alloy and a brazing material, i.e. a coating of a lower melting point filler metal. Typically, the coatings are roll bonded to one or both sides of the core sheet during fabrication. Brazing sheet can then be formed without removing the coating, assembled, fluxed and brazed without placing additional filler metal at a joint site.
In one type of roll bonding, the brazing material is bonded to a core material at an ingot stage. The bonded ingot must then be hot rolled to brazing sheet thicknesses, typically 0.125". This hot rolling step is conducive to the formation of surface oxides which impair the quality of the brazing sheet and can adversely affect brazing performance.
Alternatively, the filler metal can be produced by casting into an ingot form and rolled to a thin gauge liner stock. After rolling, the wrought filler metal can be roll bonded to the aluminum core material using conventional techniques. This method requires numerous annealing and surface preparation steps to prepare the thin gauge liner stock for bonding. The core material may vary depending on the application. AA3003 or AA6951 aluminum alloys are typical examples of core materials. The brazing filler metals can also vary depending on the desired use, usually comprising an AA4XXX-type aluminum alloy.
Besides the drawbacks noted above concerning excessive surface oxides in hot rolled brazing sheet and the additional processing steps of annealing and surface cleaning for wrought liner stock, prior art methods of making brazing sheet lack the ability to vary the cladding or filler metal composition for a given core material.
In response to the drawbacks and disadvantages of the prior art discussed above, a need has developed to provide an improved method for making twin roll cast composite materials offering flexibility in choice of composition, cost effectiveness and energy efficiency.
In response to this need, the present invention provides a method for making a twin roll cast clad material having an acceptable structure and quality in combination with low operating and capital costs and the ability to utilize different brazing filler materials with a single core material.
|
|
HaberkipCollege or work? Gap year or victory lap? And how should a young person choose among the multitude of programs offered through universities, colleges or a combination of both?Those are just some of the questions faced by today’s high school graduates.In...
College or work? Gap year or victory lap? And how should a young person choose among the multitude of programs offered through universities, colleges or a combination of both?Those are just some of the questions faced by today’s high school graduates.In...
College or work? Gap year or victory lap? And how should a young person choose among the multitude of programs offered through universities, colleges or a combination of both?
Those are just some of the questions faced by today’s high school graduates.
In an era of tough competition for jobs, the rise of precarious employment and the disappearance of a linear path from school to work, teaching kids career and life planning is more important than ever.
But a new report from People for Education says Ontario students aren’t getting what they need from the province’s careers strategy, introduced over a three-year period beginning in 2013.
Principals surveyed by the research and advocacy group cited problems implementing the plan, a shortage of guidance counsellors and lack of teacher training to help students at all levels.
“The bottom line is it’s been hard for schools to implement this policy, which is a laudable policy, it’s something we need to be doing in our schools,” said Annie Kidder, executive director of People for Education. “We need to be thinking about the now multiple paths that our kids are going to end up being on as they grow up.”
“The evidence tells us now you’re probably going to have multiple jobs in multiple different areas and also multiple paths even through your education.”
So helping them understand themselves and their interests even as young students is key to making sure they have the tools to navigate a complex path.
The Ontario strategy includes such mandatory components as: portfolios for every student from kindergarten to Grade 12 to help them reflect on their interests, strengths, learning and later career possibilities; career and life-planning committees in every school; and professional development for teachers to help them integrate career and life planning into the classroom.
It is also linked to the existing 40 hours of mandatory community volunteering for high school students and the compulsory Grade 10 careers course.
The survey of 1,100 principals found:
Mandatory career and life-planning committees were in place in only 15 per cent of elementary schools and 39 per cent of high schools. And of those, only 8 per cent of secondary schools included community members.
Thirty-four per cent of elementary and 56 per cent of secondary schools reported that every student had a career/life-planning portfolio.
Teacher training on career and life planning was available at fewer than one in four elementary schools and 40 per cent of high schools.
While high school guidance counsellors are the primary staff members responsible for student portfolios and planning, 16 per cent of secondary schools don’t have a full-time guidance counsellor.
The average ratio is one counsellor for every 380 students — in line with what provincial funding provides — but one in 10 schools struggles with a ratio of 600 students per teacher.
Principals said two years of education labour disputes interfered with the new strategy, but also blamed lack of technology support, workload issues, and a lack of overall understanding of the policy.
“While lots of them talked about how great the policy was, an equal number talked about how difficult it was to implement,” says Kidder.
She cited “initiative exhaustion” among teachers and administrators following a stream of new education strategies ranging from math to well-being to experiential learning, which can leave staff overwhelmed. And she called for better integration of the career and life lessons with all school subjects.
For Bruce Lawson of the Counselling Foundation of Canada, making the most of the provincial strategy is key. And he says despite the challenges addressed in the report, it is one of the best in the country.
By the time today’s elementary students graduate, at least one third of the occupations open to them will be jobs that don’t currently exist, says Lawson, president of the foundation, which promotes career planning and development. For kindergarten students, it amounts to more than half.
“Given how the world is changing at such a rapid pace, we really need to equip young people with the skills, competency and resilience to be able to navigate the 21st-century workplace.”
The Toronto Star and thestar.com, each property of Toronto Star Newspapers Limited, One Yonge Street, 4th Floor, Toronto, ON, M5E 1E6. You can unsubscribe at any time. Please contact us or see our privacy policy for more information.
Our editors found this article on this site using Google and regenerated it for our readers.
|
|
<?php
/*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This software consists of voluntary contributions made by many individuals
* and is licensed under the LGPL. For more information, see
* <http://www.doctrine-project.org>.
*/
namespace Doctrine\ORM\Internal\Hydration;
use Doctrine\DBAL\Connection;
/**
* Hydrator that produces flat, rectangular results of scalar data.
* The created result is almost the same as a regular SQL result set, except
* that column names are mapped to field names and data type conversions take place.
*
* @author Roman Borschel <[email protected]>
* @since 2.0
*/
class ScalarHydrator extends AbstractHydrator
{
/** @override */
protected function _hydrateAll()
{
$result = array();
$cache = array();
while ($data = $this->_stmt->fetch(\PDO::FETCH_ASSOC)) {
$result[] = $this->_gatherScalarRowData($data, $cache);
}
return $result;
}
/** @override */
protected function _hydrateRow(array $data, array &$cache, array &$result)
{
$result[] = $this->_gatherScalarRowData($data, $cache);
}
}
|
|
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// VpnSitesClient is the network Client
type VpnSitesClient struct {
BaseClient
}
// NewVpnSitesClient creates an instance of the VpnSitesClient client.
func NewVpnSitesClient(subscriptionID string) VpnSitesClient {
return NewVpnSitesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewVpnSitesClientWithBaseURI creates an instance of the VpnSitesClient client.
func NewVpnSitesClientWithBaseURI(baseURI string, subscriptionID string) VpnSitesClient {
return VpnSitesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates a VpnSite resource if it doesn't exist else updates the existing VpnSite.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being created or updated.
// vpnSiteParameters - parameters supplied to create or update VpnSite.
func (client VpnSitesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters VpnSite) (result VpnSitesCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, vpnSiteName, vpnSiteParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client VpnSitesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters VpnSite) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithJSON(vpnSiteParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) CreateOrUpdateSender(req *http.Request) (future VpnSitesCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) CreateOrUpdateResponder(resp *http.Response) (result VpnSite, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a VpnSite.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being deleted.
func (client VpnSitesClient) Delete(ctx context.Context, resourceGroupName string, vpnSiteName string) (result VpnSitesDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.Delete")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, vpnSiteName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client VpnSitesClient) DeletePreparer(ctx context.Context, resourceGroupName string, vpnSiteName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) DeleteSender(req *http.Request) (future VpnSitesDeleteFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get retrieves the details of a VPNsite.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being retrieved.
func (client VpnSitesClient) Get(ctx context.Context, resourceGroupName string, vpnSiteName string) (result VpnSite, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, vpnSiteName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client VpnSitesClient) GetPreparer(ctx context.Context, resourceGroupName string, vpnSiteName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) GetResponder(resp *http.Response) (result VpnSite, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists all the VpnSites in a subscription.
func (client VpnSitesClient) List(ctx context.Context) (result ListVpnSitesResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.List")
defer func() {
sc := -1
if result.lvsr.Response.Response != nil {
sc = result.lvsr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.lvsr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "List", resp, "Failure sending request")
return
}
result.lvsr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client VpnSitesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnSites", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) ListResponder(resp *http.Response) (result ListVpnSitesResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client VpnSitesClient) listNextResults(ctx context.Context, lastResults ListVpnSitesResult) (result ListVpnSitesResult, err error) {
req, err := lastResults.listVpnSitesResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client VpnSitesClient) ListComplete(ctx context.Context) (result ListVpnSitesResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup lists all the vpnSites in a resource group.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
func (client VpnSitesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ListVpnSitesResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.ListByResourceGroup")
defer func() {
sc := -1
if result.lvsr.Response.Response != nil {
sc = result.lvsr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.lvsr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.lvsr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client VpnSitesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) ListByResourceGroupResponder(resp *http.Response) (result ListVpnSitesResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client VpnSitesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListVpnSitesResult) (result ListVpnSitesResult, err error) {
req, err := lastResults.listVpnSitesResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client VpnSitesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ListVpnSitesResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// UpdateTags updates VpnSite tags.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being updated.
// vpnSiteParameters - parameters supplied to update VpnSite tags.
func (client VpnSitesClient) UpdateTags(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters TagsObject) (result VpnSitesUpdateTagsFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.UpdateTags")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, vpnSiteName, vpnSiteParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "UpdateTags", nil, "Failure preparing request")
return
}
result, err = client.UpdateTagsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "UpdateTags", result.Response(), "Failure sending request")
return
}
return
}
// UpdateTagsPreparer prepares the UpdateTags request.
func (client VpnSitesClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithJSON(vpnSiteParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTagsSender sends the UpdateTags request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) UpdateTagsSender(req *http.Request) (future VpnSitesUpdateTagsFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateTagsResponder handles the response to the UpdateTags request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) UpdateTagsResponder(resp *http.Response) (result VpnSite, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
|
|
Gordhan asks for fresh thinking
Business News / 9 July 2012, 4:48pm
SAPA
Cape Town 141010
Finance Minister, Pravin Gordhan briefing parliment on the annal business report.South African Finance Minister Pravin Gordhan said on Thursday the world was heading towards a "currency war" unless developed nations gave ground in negotiations at the Group of 20 (G20).
picture : neil baynes
Gordhan told the 16th World Economic History Congress in Stellenbosch there was an imbalance between the locus of production and that of growth, and between political beliefs and the predominant reality.
“The question is, is there an epochal transition, are we seeing a new configuration of political and social power?”
Gordhan said history was essential to understanding society, but the challenge was to turn these insights into practice.
“What we learnt from Karl Marx is that philosophers interpret the world. However the point is to change it.”
When Gordhan became finance minister in 2009, much was made of his early affiliation to the SA Communist Party, but he said he was a no longer a member and had explored Marxism as a set of humanist values. - Sapa
|
|
Located in the Mohawk Valley of New York State just outside of Schenectady, Pathways Nursing and Rehabilitation Center is a Sentosa Care affiliated facility. Sentosa Care is an organization formed to service and assist affiliated nursing facilities in fulfilling their commitment to quality healthcare. The long-term FHA financing for Pathways carries a 30-year term at a low, fixed rate, and enables Sentosa Care to invest significant capital in upgrading the facility’s existing spaces and amenities, as well as construct a newly planned vent unit and upgrade the pediatric unit.
“It’s especially gratifying to us at Greystone that we are able to provide capital to a facility like Pathways that provides care to difficult cases in their specialty units,” said Mr. Levine. “We truly value the trust Sentosa Care has in us to provide financing for their facilities and will continually work to exceed their expectations.”
|
|
Jabal Omar
'Jabal Omar (جبل عمر ) is a neighbourhood located in Makkah, Saudi Arabia south of the Al Haram district.
Description
Jabal Omar is named for the hill Mount Omar that traditionally stood on the southern outskirts of Mecca and currently consists of a group of old housing units that were built randomly over the years.
There are currently no facilities in the Jabal Omar area, especially sanitation facilities. However, in late 2006, a clearance program was begun in Jabal Omar to provide the necessary space for the establishment of the Jabal Omar project.
Jabal Omar is in the Sub Municipality of Ajyad (بلدية أجياد).
References
Category:Neighborhoods of Mecca
|
|
The Difference Between Botox and Dermal Fillers
Written by CG Cosmetic on February 19, 2015
CG Cosmetic understands the difficulties that come with aging. Everyone has days where they look in the mirror and are concerned with what they see. Whether or not you see deep lines and wrinkles, or fine lines, aging is inevitable. The question then, is what can you do about these signs of aging? Perhaps the most common solution people have heard of is Botox. However, Botox is not the only option. While CG Cosmetic offers Botox procedures, we also offer Dermal Fillers.
Botox
Most men and women have heard about celebrities using Botox to rejuvenate their skin and appear more youthful, but Botox isn’t just for celebrities. Botox has provided amazing and effective results that are also safe and convenient for many individuals. Facial lines and wrinkles often occur because of the way your muscles work underneath your skin. Overtime, as muscles tense due to making repeated facial expressions, your skin creates lines and wrinkles. Botox works by gently relaxing the muscles in your face, softening the wrinkles and leaving you with long-lasting smooth skin. CG Cosmetic patients have described their Botox experience as quick and painless, with most appointments taking less than one hour. Call to set up an appointment with our expert Botox specialist, Dr. Mayra Diaz, who has been in private practice for over 25 years.
Dermal Fillers
CG Cosmetic also specializes in Dermal Fillers. Different from Botox, dermal fillers work by lifting and plumping up skin, replacing collagen lost by the natural aging process. In addition to gently filling the skin, most dermal fillers also stimulate skin to encourage it to produce more collagen on its own. Dermal fillers are a great way to fix lines and wrinkles in the face, but they are also used for lip augmentations, creating fuller, plumper lips.
It is important to fully research all of your options for cosmetic surgery before making a decision about what is right for you. Start your research with a call to CG Cosmetic and talk to a specialist by scheduling a free consultation: 305-446-7277.
|
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"Dinda",
"Dilolo"
],
"DAY": [
"Lumingu",
"Nkodya",
"Nd\u00e0ay\u00e0",
"Ndang\u00f9",
"Nj\u00f2wa",
"Ng\u00f2vya",
"Lubingu"
],
"MONTH": [
"Ciongo",
"L\u00f9ishi",
"Lus\u00f2lo",
"M\u00f9uy\u00e0",
"Lum\u00f9ng\u00f9l\u00f9",
"Lufuimi",
"Kab\u00e0l\u00e0sh\u00ecp\u00f9",
"L\u00f9sh\u00eck\u00e0",
"Lutongolo",
"Lung\u00f9di",
"Kasw\u00e8k\u00e8s\u00e8",
"Cisw\u00e0"
],
"SHORTDAY": [
"Lum",
"Nko",
"Ndy",
"Ndg",
"Njw",
"Ngv",
"Lub"
],
"SHORTMONTH": [
"Cio",
"Lui",
"Lus",
"Muu",
"Lum",
"Luf",
"Kab",
"Lush",
"Lut",
"Lun",
"Kas",
"Cis"
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "FrCD",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a4",
"posPre": "",
"posSuf": "\u00a4"
}
]
},
"id": "lu-cd",
"pluralCat": function (n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
|
Dominik Brunner - Nordketten Check out from go-shred.com on Vimeo.
Home is where your heart is! Our heart, our office and our home is Innsbruck right now. One of the most amazing cities in the world! Probably the only SPOT where you can get to the slopes in less then 15 minutes.
Certainly you ask
Sadly this year K.O.T. Gruam was boycotted by bad weather, misunderstanding and lack of snow. But still some warriors showed up for chilling and grilling under the Dragon tent (thank you Dragon so much for keeping us dry).
Near the bonfire amazing stories were told about the past weeks here in Nor
|
|
The VC-2 video compression standard is an open free-use video-decoding standard contributed by British Broadcasting Corporation (BBC) to the Society of Motion Picture and Television Engineers (SMPTE) standard. The VC-2 standard uses discrete-wavelet-transform (DWT) and interleaved exponential-Golomb (IEG) variable-length-encoding to achieve the desired video compression. Originally designed to compete with the prevailing H.264 standard, it is expected that DWT results in fewer blocky artifacts than the prevailing discrete-cosine-transform (DCT)-based systems. To achieve the low-delay requirement in a serial data interface (SDI) transmission system, SMPTE standardized two low-delay profiles, which include the level-64 using the (2, 2) DWT, and the level-65, using the overlapped (5, 3) DWT. It has been shown that in order to fit a high definition (HD) video into a standard definition SDI (SD-SDI) payload with excellent video quality, the level-65 compression is required.
The VC-2 level-65 is a subset of the low-delay profile with the following attributes: 1. 4:2:2 10-bit sampling with supported resolutions 1920×1080i29.97, 1920×1080i25, 1280×720p59.94, 1280×720p50. 2. The codec uses only Low-Delay Profile. 3. The codec uses only the LeGall (5, 3) wavelet transform (wavelet index=1). 4. The wavelet depth is exactly 3 levels. 5. The slice size is fixed to be 16 (horizontal)×8 (vertical) in luminance and 8 (horizontal)×8 (vertical) in chrominance.
Conventionally, overlapped DWT is used in the JPEG-2000 standard which is used extensively in digital cameras and medical imaging systems. In the literature, there are many publications on how to reduce the implementation complexity of 2-D DWT. A common property of this technology is that JPEG-2000 based implementation uses an external frame-buffer memory for processing the on-chip DWT/IDWT data. Thus, such publications have primarily focused on how to: minimize the read and write access to the external memory; reduce the on-chip internal memory; speed up data processing; and choose a scan scheme to minimize the memory usage. However, an external memory typically increases costs associated with the chip package size and power consumption, as well as the overall system complexity and bill-of-material (BOM) costs.
|
|
Executive and Special Sessions Thursday July, 26 2018
Executive and Special Sessions Thursday July, 26 2018
The Devils Lake Water Improvement District Board will be holding an Executive Session immediately followed by a Special Session (per ORS 192.640) at 10:00 a.m., Thursday July 26, 2018. This meeting will be held at Oregon Coast Community College Room 108, located at 3788 SE High School Dr. in Lincoln City, Oregon.
The purpose of this meeting is to discuss and finalize the contract for installation of the lake bottom aeration system All whom are interested are encouraged to attend this public meeting.
Devils Lake in Lincoln City, Oregon is a naturally shallow, coastal lake. It is uniquely placed in the world sitting on the Pacific coast edge of the North American Continent, intersecting the 45th parallel, the mark half way between the equator and the North Pole. Devils Lake is managed by the Devils Lake Water Improvement District.
|
|
package com.android.inputmethodcommon;
class InputMethodSettingsInterface {
}
class InputMethodSettingsImpl {
int mContext;
int mImi;
int mImm;
int mSubtypeEnablerIcon;
int mSubtypeEnablerIconRes;
int mSubtypeEnablerTitle;
int mSubtypeEnablerTitleRes;
int mInputMethodSettingsCategoryTitle;
int mInputMethodSettingsCategoryTitleRes;
int mSubtypeEnablerPreference;
}
class InputMethodSettingsFragment {
int mSettings;
}
class InputMethodSettingsActivity {
int mSettings;
}
|
|
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="@color/appBackground"
android:foreground="?android:attr/selectableItemBackground"
android:gravity="center_vertical"
android:orientation="horizontal"
android:paddingBottom="15dp"
android:paddingLeft="10dp"
android:paddingRight="10dp"
android:paddingTop="15dp">
<ImageView
android:id="@+id/song_item_img"
android:layout_width="50dp"
android:layout_height="50dp"
android:layout_weight="0" />
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginStart="15dp"
android:layout_weight="1"
android:orientation="vertical">
<TextView
android:id="@+id/song_item_name"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:singleLine="true"
android:textColor="#000"
android:textSize="16sp" />
<TextView
android:id="@+id/song_item_artist"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:singleLine="true"
android:textColor="#989898"
android:textSize="14sp" />
</LinearLayout>
<ImageView
android:id="@+id/song_item_menu"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginRight="5dp"
android:layout_weight="0"
android:background="@drawable/unbounded_ripple"
android:foregroundTint="#434343"
android:padding="5dp"
android:src="@drawable/abc_ic_menu_moreoverflow_mtrl_alpha"
android:theme="@style/Theme.AppCompat.Light" />
</LinearLayout>
|
|
Olefin cyclopropanation via carbene transfer catalyzed by engineered cytochrome P450 enzymes.
Transition metal-catalyzed transfers of carbenes, nitrenes, and oxenes are powerful methods for functionalizing C=C and C-H bonds. Nature has evolved a diverse toolbox for oxene transfers, as exemplified by the myriad monooxygenation reactions catalyzed by cytochrome P450 enzymes. The isoelectronic carbene transfer to olefins, a widely used C-C bond-forming reaction in organic synthesis, has no biological counterpart. Here we report engineered variants of cytochrome P450(BM3) that catalyze highly diastereo- and enantioselective cyclopropanation of styrenes from diazoester reagents via putative carbene transfer. This work highlights the capacity to adapt existing enzymes for the catalysis of synthetically important reactions not previously observed in nature.
|
|
Maroš Ferenc
Maroš Ferenc (born 19 February 1981, in Prešov) is a Slovak football goalkeeper who currently plays for 1. FC Tatran Prešov.
References
Category:1981 births
Category:Living people
Category:Slovak footballers
Category:Association football goalkeepers
Category:1. FC Tatran Prešov players
Category:AS Trenčín players
Category:MEAP Nisou players
Category:MFK Zemplín Michalovce players
Category:FC Eindhoven players
Category:Slovak Super Liga players
Category:Sportspeople from Prešov
|
|
Introduction {#sec1}
============
Acute aortic dissection (AAD) is a relatively uncommon medical emergency with a high mortality after symptom onset. The mortality of acute type A aortic dissection increases by 1--2% per hour during the first 48 h if no treatment is received \[[@cit0001]\]. Meanwhile, other common causes of acute chest pain, such as acute myocardial infarction (AMI) and pulmonary embolism (PE), also require rapid differentiation from AAD due to their critical and lethal characteristics \[[@cit0002]\]. However, the misdiagnosis rate of AAD has been reported to be approximately 30% on initial evaluation \[[@cit0003], [@cit0004]\]. Currently, noninvasive imaging modalities, including enhanced computed tomography (CT), transesophageal echocardiography (TEE) and magnetic resonance imaging (MRI), have been developed to improve the diagnosis of AAD, but these imaging modalities are expensive, time-consuming and unavailable at the bedside. Therefore, a rapid, cheap, reliable and sensitive laboratory test is urgently needed to diagnose AAD.
D-dimer, the degradation product of cross linked fibrin, is significantly elevated in AAD patients \[[@cit0005]--[@cit0008]\] and has been suggested for use as a complementary marker to rule out AAD \[[@cit0005]--[@cit0007], [@cit0009]--[@cit0011]\]. However, in real-world clinical practice, AAD, PE and AMI are all thrombogenic diseases with high mortality, and whether the D-dimer level is helpful for differentiating these diseases remains to be elucidated. We therefore conducted a prospective cohort study to evaluate the validity and reliability of D-dimer level for differentiating AAD from other types of acute chest pain, including PE, AMI, unstable angina (UA), and other uncertain diagnoses of chest pain.
Material and methods {#sec2}
====================
Study population {#sec2.1}
----------------
A single-center, prospective cohort study was conducted in Fuwai Hospital (the National Center for Cardiovascular Diseases in China) from January 2009 to January 2010. A series of consecutive patients with acute chest pain who presented to the emergency department (ED) of Fuwai Hospital within 24 h of symptom onset were enrolled in a prospective manner. Baseline clinical characteristics such as sex, age, Stanford types of AAD, intervals from onset of symptoms to hospital admission, medical histories, baseline parameters of physical examinations and laboratory tests including C-reactive protein (CRP), imaging examinations, in-hospital managements, ED diagnosis and discharge diagnosis were recorded according to pre-designed case report forms. The study protocols were approved by the appropriate institutional review boards of Fuwai Hospital and complied with the Declaration of Helsinki. All subjects provided written informed consent.
D-dimer test and diagnosis {#sec2.2}
--------------------------
Plasma D-dimer levels were measured using a stago-evolution device (France) in patients with chest pain immediately following admission. The results collected are expressed in micrograms per milliliter. The effective detection range of the assay is 0.22--20 µg/ml. Diagnoses of AAD and PE were confirmed by aorta or pulmonary angiography with multi-detector CT scan. Acute myocardial infarction was confirmed by acute chest pain, elevated cardiac-enzyme levels (cardiac troponin I or T, or the MB fraction of creatine kinase exceeded the 99^th^ percentile upper reference limit), documented findings of a new ST segment elevation/depression or a new T wave inversion on electrocardiography, and/or with evidence of obstructive coronary artery on angiography. Unstable angina was confirmed by chest pain, ST segment depression or T wave changes with evidence of obstructive coronary artery on angiography, but without the elevation of cardiac enzymes.
Statistical analysis {#sec2.3}
--------------------
Continuous variables are presented as mean ± SD or median and interquartile range according to whether they follow Gaussian distributions. Categorical data are presented as numbers and proportions. Baseline characteristics between groups were compared using Student's *t* test or the nonparametric Mann-Whitney test for continuous data and the χ^2^ test for categorical data. Receiver-operating characteristic (ROC) curves were constructed to calculate the sensitivity for AAD. The area under the curve (AUC) was calculated. A *p-*value \< 0.05 was considered statistically significant. The statistical calculations were performed with SPSS 19.0 (SPSS Inc., Chicago, Illinois, USA).
Results {#sec3}
=======
A total of 790 patients were enrolled, including 202 AAD, 43 PE, 315 AMI, 136 UA, and 94 cases with other uncertain diagnoses. Of the 202 AAD patients confirmed by CT angiography, 119 (58.9%) were Stanford type A AAD cases and 83 (41.0%) were Stanford type B AAD cases.
Patient demographics and baseline characteristics are shown in [Table I](#t0001){ref-type="table"}. Compared to the patients with other causes of chest pain, AAD patients were more likely to be younger and male and tended to have concomitant hypertension but rarely have diabetes mellitus (all *p* \< 0.001).
######
Baseline characteristics of AAD patients and non-AAD (PE, UA, AMI, and uncertain diagnosis)
Parameter AAD (*n* = 202) Non-AAD *P*-value
------------------------------------ ----------------- ----------- ------------ ------------ ----------- ----------
Age \[years\] 51 ±12 55 ±17 61 ±12 60 ±12 54 ±17 \< 0.001
Male, *n* (%) 169 (83.7) 21 (48.8) 102 (75.0) 254 (80.6) 65 (69.1) \< 0.001
Systolic blood pressure \[mm Hg\] 141 ±31 129 ±21 138 ±23 128 ±23 133 ±23 \< 0.001
Diastolic blood pressure \[mm Hg\] 80 ±21 81 ±10 87 ±57 79 ±14 81 ±14 0.535
Heart rate \[beats per minute\] 81 ±19 87 ±17 72 ±13 76 ±18 80 ±28 \< 0.001
Body mass index \[kg/m^2^\] 24.6 ±3.2 25.7 ±3.7 26.7 ±4.2 25.5 ±3.4 26.2 ±4.9 0.450
Creatinine kinases \[U/l\] 269 ±544 85 ±61 97 ±84 497 ±688 109 ±105 \< 0.001
Fasting blood glucose \[mmol/l\] 7.5 ±1.9 6.3 ±1.6 7.4 ±3.1 8.4 ±3.4 7.1 ±2.7 \< 0.001
Hypertension, *n* (%) 133 (65.8) 13 (31.0) 86 (63.2) 161 (51.3) 42 (46.2) \< 0.001
Diabetes mellitus, *n* (%) 5 (2.5) 2 (4.8) 31 (22.8) 68 (21.7) 13 (14.3) \< 0.001
Hypercholesterolemia, *n* (%) 18 (8.9) 3 (7.1) 34 (25.0) 75 (24.0) 13 (14.3) \< 0.001
Stroke, *n* (%) 10 (5.0) 2 (4.8) 13 (9.6) 33 (10.5) 7 (7.7) 0.471
Smoker, n (%) 64 (31.7) 7 (16.7) 31 (22.8) 105 (33.5) 18 (19.8) 0.060
Drinker, *n* (%) 21 (10.4) 0 (0.0) 6 (4.4) 14 (4.5) 6 (6.6) 0.110
AAD -- acute aortic dissection, PE -- pulmonary embolism, UA -- unstable angina, AMI -- acute myocardial infarction.
The D-dimer level was elevated (\> 0.50 µg/ml) in 190 (94.1%) AAD patients. The D-dimer level in AAD patients was approximately 9-fold higher than that in non-AAD patients (median: 4.19 vs. 0.45 µg/ml, *p* \< 0.05). [Figure 1](#f0001){ref-type="fig"} shows the D-dimer level in patients with different causes of chest pain. The D-dimer level was significantly higher in patients with AAD than in patients with UA (median: 0.38 µg/ml, *p* \< 0.001), AMI (median: 0.45 µg/ml, *p* \< 0.001) and other uncertain diagnoses (median: 0.44 µg/ml, *p* \< 0.001), but it was comparable with that of PE patients (median: 2.72 µg/ml, *p* = 0.065). Similarly, the D-dimer level in PE patients was significantly higher than that in patients with UA, AMI, or other uncertain diagnoses (all *p* \< 0.001). Moreover, patients with type A AAD had higher D-dimer levels than those with type B AAD (median: 4.64 vs. 4.0 µg/ml, *p* = 0.022).
{#f0001}
[Figure 2](#f0002){ref-type="fig"} shows the ROC for patients with AAD versus non-AAD patients. The AUC value was 0.90 (95% CI: 0.87--0.93) for patients with AAD vs. all non-AAD patients. The AUC value was 0.59 (95% CI: 0.5--0.68) vs. PE, 0.91 (95% CI: 0.88--0.94) vs. AMI, 0.95 (95% CI: 0.93--0.97) vs. UA, and 0.93 (95% CI: 0.91--0.96) vs. patients with other uncertain diagnoses. Moreover, the best cut-off value of D-dimer for predicting PE was 1.14 µg/ml by ROC analysis with an AUC of 0.79 (95% CI: 0.74--0.84). The sensitivity and specificity were 88.4% and 71.2%, respectively.
{#f0002}
The diagnostic performance at the cutoff level of 0.5 µg/ml was analyzed. At this cutoff level, the sensitivity was 94.0% and the specificity was 56.8% for AAD compared to non-AAD patients; the negative and positive likelihood ratio were 0.10 and 2.18, respectively with a positive predictive value of 42.6% and a negative predictive value of 96.6%. The specificity was 4% for PE, 56% for AMI, 72.9% for UA, and 65.1% for uncertain diagnostic cases ([Table II](#t0002){ref-type="table"}).
######
Diagnostic performance of D-dimer at the cutoff level of 0.5 µg/ml
Variable Sensitivity (%) Specificity (%) Youden's index PPV (%) NPV (%) PLR NLR
----------- ----------------- ----------------- ---------------- --------- --------- ------ ------
AAD 94.0
Non-AAD: 56.8 0.51 42.6 96.6 2.18 0.10
PE 4.0 --0.02 81.1 14.2 0.97 1.25
AMI 56.0 0.49 57.5 93.5 2.11 0.12
UA 72.9 0.67 83.7 89.2 3.48 0.08
Uncertain 65.1 0.56 86.3 83.3 1.44 0.09
PLR -- positive likelihood ratio, NLR -- negative likelihood ratio, PPV -- positive predictive value, NPV -- negative predictive value, AAD -- acute aortic dissection, PE -- pulmonary embolism, UA -- unstable angina, AMI -- acute myocardial infarction.
Discussion {#sec4}
==========
The present study demonstrated a significantly higher admission D-dimer level in patients with AAD within 24 h after symptom onset than those with AMI, UA, and other uncertain diagnoses. At the widely used cutoff level of 0.5 µg/ml, a favorable negative likelihood ratio of 0.10 and negative predictive value of 96.6% were found in patients with AAD. However, the D-dimer level was not significantly different between patients with AAD and PE. Our study suggests that a plasma D-dimer test within 24 h of symptom onset may be helpful for differentiating AAD and PE from other causes of acute chest pain.
Acute aortic dissection is a catastrophic medical emergency, which requires early and accurate diagnosis and treatment. Imaging modalities, including enhanced CT and MRI, can facilitate an accurate diagnosis. However, these methods are limited due to unavailability at the bedside and their time-consuming nature, and they are not cost effective for routine screening. Thus, a rapid and reliable biomarker is urgently needed. Previous studies have evaluated several biomarkers for AAD, such as the smooth muscle myosin heavy chain \[[@cit0012]--[@cit0014]\], the BB-isozyme of creatine kinase \[[@cit0015]\], and calponin \[[@cit0016]\]. However, none of these markers have been adopted into routine clinical practice due to their inability to meet the requirements of a 'gold standard' biomarker including having adequate sensitivity and specificity in addition to a favorable time course of release that covers a time window necessary for nonambiguity in the clinical setting \[[@cit0017]\]. D-dimer is a fibrin fragment seen in coagulopathic disorders, and measurements are routinely used for the exclusion of venous thromboembolic diseases and PE \[[@cit0018]--[@cit0020]\]. In recent years, multiple studies have confirmed that D-dimer is elevated in AAD, and several studies have assessed its diagnostic value for AAD. However, at a defined cutoff value, the sensitivity and specificity of D-dimer for the diagnosis of AAD have been reported to vary, possibly due to different assay methods used in different studies. Generally, when a cutoff value of 0.5 µg/ml is used, the sensitivity and negative predictive value can reach almost 100% with a specificity of 54--68.6% \[[@cit0005], [@cit0009]\], and the specificity can be increased to 73% when the cutoff value is 0.626 µg/ml \[[@cit0006]\]. Shimony *et al.* \[[@cit0021]\] recently performed a meta-analysis of D-dimer to diagnose AAD and found that at a cutoff value of 0.5 µg/ml, the sensitivity and negative predictive value were 0.97 and 0.96, respectively. However, the specificity and positive predictive value were low, 0.56 and 0.60, respectively. Moreover, the negative likelihood ratio showed an excellent discriminative ability (0.06), whereas the positive likelihood ratio did not (2.43). They concluded that a plasma D-dimer level \< 0.5 µg/ml was a useful screening tool to identify patients who do not have AAD. Therefore, the plasma D-dimer level may thus be used to identify subjects who are unlikely to benefit from further aortic imaging. Our results were consistent with this study, suggesting that the cutoff value of D-dimer \< 0.5 µg/ml, which is widely used for excluding PE \[[@cit0022]\], is also applicable for the exclusion of AAD. However, the D-dimer level in patients with AAD is not always elevated, and several studies \[[@cit0023], [@cit0024]\], including ours, have observed this phenomenon. Hazui *et al.* \[[@cit0025]\] proposed that younger patients with a short dissection length and a thrombosed false lumen without ulcer-like projections may have false-negative D-dimer results. Therefore, patients who present classic characteristics of AAD but have a negative D-dimer test should receive further aortic imaging.
Due to its non-specific characteristics, an elevated D-dimer level is also seen in patients with other morbidities such as PE, AMI, UA, and other diseases. Therefore, further investigation is necessary to clarify whether D-dimer tests can differentiate AAD from other diseases that presented with elevated D-dimer levels. Suzuki *et al.* \[[@cit0026]\] reported that when the cutoff level was 1.6 µg/ml, D-dimer was a useful tool for differentiating AAD from AMI, angina or other ischemic heart diseases within the first 6 h, and when the cutoff value was 0.8 or 0.9 µg/ml, the D-dimer level could differentiate AAD from AMI \[[@cit0027]\]. Sakamoto *et al.* \[[@cit0028]\] also found that a cutoff value of 0.5 µg/ml was effective for distinguishing AAD and PE from AMI, with a sensitivity of 68% and a specificity of 90%. Although their results were mostly consistent with ours, the cutoff values used in these studies were different and the obtained D-dimer levels in various causes of acute chest pain varied greatly. One possible explanation for this variation was the different measurement equipment and the test strip used. Therefore, a standard and unified detection protocol may improve the heterogeneity of measurement, making the detection value more reliable.
Additionally, the D-dimer level was elevated in both AAD and PE patients, with no significant difference in our study, consistent with the findings of Sakamoto *et al.* \[[@cit0028]\] and Eggebrecht *et al.* \[[@cit0006]\]. Given the high mortality of the two morbidities, immediate contrast CT imaging or tissue Doppler imaging \[[@cit0029]\] may be good choices to differentiate AAD from PE.
In the setting of AMI/UA, rupture of atherosclerotic plaques causes thrombopoiesis and activates fibrin degradation, leading to D-dimer formation. Therefore, D-dimer is elevated in patients with AMI/UA but not in patients with stable angina and healthy controls \[[@cit0030], [@cit0031]\]. Although the D-dimer level does not directly reflect the degree of myocardial damage, it has been confirmed that an elevated D-dimer level is a strong predictor of mortality in patients with AMI/UA \[[@cit0032], [@cit0033]\]. Therefore, the D-dimer level is not only a useful tool for the differentiation of diagnoses, but it also plays an important role in the prognostic evaluation for some cardiovascular diseases.
Some limitations of the present study need to be addressed. First, although our study shows good prediction for AAD with the D-dimer level at the cutoff of 0.5 µg/ml, the specificity is low (56.8%). Indeed, D-dimer as a diagnostic biomarker of AAD did have some limitations due to the relatively high false positive rate. Therefore, for patients with a D-dimer level \> 0.5 µg/ml, the D-dimer level should be combined with other diagnostic tests, especially imaging tests, for an accurate diagnosis of AAD. Second, the small sample size of PE patients may affect the statistical power. Furthermore, the difference in D-dimer levels was not evaluated between patients with ST-segment elevation AMI and non-ST-segment elevation AMI. Therefore, further large, prospective, multi-center studies are needed.
In conclusion, the D-dimer level within 24 h after symptom onset might be helpful for differentiating patients with suspected AAD from other causes of chest pain.
The first two authors contributed equally to this study.
We wish to thank the patients for their participations in our study, and we are also grateful to other clinical doctors and nurses for their help in the study. This work was supported by a grant (81170286) from the National Natural Science Foundation of China to Dr. Fan Xiaohan.
Conflict of interest
====================
The authors declare no conflict of interest.
|
|
---
abstract: 'In state space models, smoothing refers to the task of estimating a latent stochastic process given noisy measurements related to the process. We propose an unbiased estimator of smoothing expectations. The lack-of-bias property has methodological benefits: independent estimators can be generated in parallel, and confidence intervals can be constructed from the central limit theorem to quantify the approximation error. To design unbiased estimators, we combine a generic debiasing technique for Markov chains with a Markov chain Monte Carlo algorithm for smoothing. The resulting procedure is widely applicable and we show in numerical experiments that the removal of the bias comes at a manageable increase in variance. We establish the validity of the proposed estimators under mild assumptions. Numerical experiments are provided on toy models, including a setting of highly-informative observations, and a realistic Lotka-Volterra model with an intractable transition density.'
author:
- |
Pierre E. Jacob[^1]\
Department of Statistics, Harvard University\
Fredrik Lindsten and Thomas B. Schön\
Department of Information Technology, Uppsala University
bibliography:
- 'Biblio.bib'
title: '**Smoothing with Couplings of Conditional Particle Filters**'
---
\#1
[*Keywords:*]{} couplings, particle filtering, particle smoothing, debiasing techniques, parallel computation.
Introduction\[sec:introduction\]
================================
Goal and content
----------------
In state space models, the observations are treated as noisy measurements related to an underlying latent stochastic process. The problem of smoothing refers to the estimation of trajectories of the underlying process given the observations [@cappe:ryden:2004]. For finite state spaces and linear Gaussian models, smoothing can be performed exactly. In general models, numerical approximations are required, and many state-of-the-art methods are based on particle methods [@douc:moulines:2014; @kantas2015particle]. Following this line of work, we propose a new method for smoothing in general state space models. Unlike existing methods, the proposed estimators are unbiased, which has direct benefits for parallelization and for the construction of confidence intervals.
The proposed method combines recently proposed conditional particle filters [@andrieu:doucet:holenstein:2010] with debiasing techniques for Markov chains [@glynn2014exact]. Specifically, we show in Section \[sec:unbiasedsmoothing\] how to remove the bias of estimators constructed with conditional particle filters, in exchange for an increase of variance; this variance can then be controlled with tuning parameters, and arbitrarily reduced by averaging over independent replicates. The validity of the proposed approach relies on the finiteness of the computational cost and of the variance of the proposed estimators, which we establish under mild conditions in Section \[sec:newsmoother:theory\]. Methodological improvements are presented in Section \[sec:newsmoother:practical\], and comparisons with other smoothers in Section \[sec:comparison\]. Numerical experiments are provided in Section \[sec:numerics\], and Section \[sec:discussion\] concludes.
Smoothing in state space models \[sec:intro:smoothing\]
-------------------------------------------------------
The latent stochastic process $(x_{t})_{t\geq 0}$ takes values in $\mathbb{X}\subset
\mathbb{R}^{d_x}$, and the observations $(y_t)_{t\geq 1}$ are in $\mathbb{Y}\subset
\mathbb{R}^{d_y}$ for some $d_x,d_y \in\mathbb{N}$. A model specifies an initial distribution $m_0(dx_{0}|\theta)$ and a transition kernel $f(dx_{t}| x_{t-1},\theta)$ for the latent process. We will assume that we have access to deterministic functions $M$ and $F$, and random variables $U_t$ for $t\geq 0$, such that $M(U_0,\theta)$ follows $m_0(dx_0|\theta)$ and $F(x_{t-1},U_t,\theta)$ follows $f(dx_t|x_{t-1},\theta)$; we refer to these as random function representations of the process [see @diaconis1999iterated]. Conditionally upon the latent process, the observations are independent and their distribution is given by a measurement kernel $g(dy_{t}| x_{t},\theta)$. The model is parameterized by $\theta\in\Theta\subset \mathbb{R}^{d_\theta}$, for $d_\theta\in\mathbb{N}$. Filtering consists in approximating the distribution $p(dx_{t}|
y_{1:t},\theta)$ for all times $t\geq 1$, whereas smoothing refers to the approximation of $p(dx_{0:T}|y_{1:T},\theta)$ for a fixed time horizon $T$, where for $s,t\in\mathbb{N}$, we write $s:t$ for the set $\{s,\ldots,t\}$, and $v_{s:t}$ for the vector $(v_s,\ldots,v_t)$. The parameter $\theta$ is hereafter fixed and removed from the notation, as is usually done in the smoothing literature [see Section 4 in @kantas2015particle]; we discuss unknown parameters in Section \[sec:discussion\]. Denote by $h$ a test function from $\mathbb{X}^{T+1}$ to $\mathbb{R}$, of which we want to compute the expectation with respect to the smoothing distribution $\pi(dx_{0:T})=p(dx_{0:T}|y_{1:T})$; we write $\pi(h)$ for $\int_{\mathbb{X}^{T+1}} h(x_{0:T}) \pi(dx_{0:T})$. For instance, with $h:x_{0:T}\mapsto x_t$ where $t\in 0:T$, $\pi(h)$ is the smoothing expectation $\mathbb{E}[x_t|y_{1:T}]$.
Postponing a discussion on existing smoothing methods to Section \[sec:comparison\], we first describe the conditional particle filter [CPF, @andrieu:doucet:holenstein:2010], which is a variant of the particle filter [@doucet:defreitas:gordon:2001]. Given a “reference” trajectory $X = x_{0:T}$, a CPF generates a new trajectory $X^\prime = x_{0:T}^\prime$ as described in Algorithm \[alg:conditional-particle-filter\], which defines a Markov kernel on the space of trajectories; we will write $x^\prime_{0:T} \sim \text{CPF}(x_{0:T},\cdot)$. This Markov kernel leaves $\pi$ invariant and ergodic averages of the resulting chains consistently estimate integrals with respect to $\pi$, under mild conditions [@andrieu:doucet:holenstein:2010; @ChopinS:2015; @LindstenDM:2015; @andrieuvihola2013uniform; @kuhlenschmidt2018stability; @Lee2018ccbpf]. We denote by $(X^{(n)})_{n\geq 0}$ a chain starting from a path $X^{(0)}$, and iterating through $X^{(n)}\sim\text{CPF}(X^{(n-1)},\cdot)$ for $n\geq 1$.
1. 2.
<!-- -->
1. 2. 3.
<!-- -->
1. 2.
In step 2.1. of Algorithm \[alg:conditional-particle-filter\], the resampling distribution $r(da^{1:N-1}|w^{1:N})$ refers to a distribution on $\{1,\ldots,N\}^{N-1}$ from which “ancestors” are drawn according to particle weights. The resampling distribution is an algorithmic choice; specific schemes for the conditional particle filter are described in @ChopinS:2015. Here we will use multinomial resampling throughout. In step 2.3., “normalize the weights” means dividing them by their sum. Instead of bootstrap particle filters [@gordon:salmon:smith:1993], where particles are propagated from the model transition, more sophisticated filters can readily be used in the CPF procedure. For instance, performance gains can be obtained with auxiliary particle filters [@pitt1999filtering; @johansen2008note], as illustrated in Section \[sec:numerics:hiddenar\]. In presenting algorithms we focus on bootstrap particle filters for simplicity. When the transition density is tractable, extensions of the CPF include backward sampling [@whiteleycommentonpmcmc; @LindstenS:2013] and ancestor sampling [@LindstenJS:2014], which is beneficial in the proposed approach as illustrated in Section \[sec:numerics:hiddenar\]. The complexity of a standard CPF update is of order $NT$, and the memory requirements are of order $T + N\log N$ [@jacob2015path].
The proposed method relies on CPF kernels but is different from Markov chain Monte Carlo (MCMC) estimators: it involves independent copies of unbiased estimators of $\pi(h)$. Thus it will be amenable to parallel computation and confidence intervals will be constructed in a different way than with standard MCMC output [e.g. Chapter 7 in @gelman2010handbook]; see Section \[sec:comparison\] for a comparison with existing smoothers.
Debiasing Markov chains \[sec:debiasing\]
-----------------------------------------
We briefly recall the debiasing technique of @glynn2014exact, see also @McLeish:2011 [@Rhee:Glynn:2012; @vihola2015unbiased] and references therein. Denote by $(X^{(n)})_{n\geq 0}$ and $({\tilde{X}}^{(n)})_{n\geq 0}$ two Markov chains with invariant distribution $\pi$, initialized from a distribution $\pi_0$. Assume that, for all $n\geq 0$, $X^{(n)}$ and ${\tilde{X}}^{(n)}$ have the same marginal distribution, and that $\lim_{n\to\infty} \mathbb{E}[h(X^{(n)})] = \pi(h)$. Writing limit as a telescopic sum, and swapping infinite sum and expectation, which will be justified later on, we obtain $$\begin{aligned}
\pi(h)
&= \mathbb{E}[h(X^{(0)})] + \sum_{n=1}^\infty \mathbb{E}[h(X^{(n)}) - h(\tilde{X}^{(n-1)})]
= \mathbb{E}[h(X^{(0)}) + \sum_{n=1}^\infty (h(X^{(n)}) - h(\tilde{X}^{(n-1)}))].\end{aligned}$$ Then, if it exists, the random variable $H_0 = h(X^{(0)}) + \sum_{n=1}^\infty (h(X^{(n)}) - h(\tilde{X}^{(n-1)}))$, is an unbiased estimator of $\pi(h)$. Furthermore, if the chains are coupled in such a way that there exists a time $\tau$, termed the *meeting time*, such that $X^{(n)}={\tilde{X}}^{(n-1)}$ almost surely for all $n\geq \tau$, then $H_0$ can be computed as $$H_0 = h(X^{(0)}) + \sum_{n=1}^{\tau - 1} (h(X^{(n)}) - h(\tilde{X}^{(n-1)})). \label{eq:RGestimator}$$ We refer to $H_0$ as a Rhee–Glynn estimator. Given that the cost of producing $H_0$ increases with $\tau$, it will be worth keeping in mind that we would prefer $\tau$ to take small values with large probability. The main contribution of the present article is to couple CPF chains and to use them in a Rhee–Glynn estimation procedure. Section \[sec:newsmoother:theory\] provides guarantees on the cost and the variance of $H_0$ under mild conditions, and Section \[sec:newsmoother:practical\] contains alternative estimators with reduced variance and practical considerations.
Unbiased smoothing \[sec:unbiasedsmoothing\]
============================================
Coupled conditional particle filters \[sec:ccpf\]
-------------------------------------------------
Our goal is to couple CPF chains $(X^{(n)})_{n\geq 0}$ and $({\tilde{X}}^{(n)})_{n\geq 0}$ such that the meeting time has finite expectation, in order to enable a Rhee–Glynn estimator for smoothing. A coupled conditional particle filter (CCPF) is a Markov kernel on the space of pairs of trajectories, such that $(X^\prime,{\tilde{X}}^\prime)\sim \text{CCPF}((X,{\tilde{X}}), \cdot)$ implies that $X^\prime\sim \text{CPF}(X, \cdot)$ and ${\tilde{X}}^\prime \sim \text{CPF}({\tilde{X}}, \cdot)$.
Algorithm \[alg:coupled-conditional-particle-filter\] describes CCPF in pseudo-code, conditional upon $X = x_{0:T}$ and ${\tilde{X}}= {\tilde{x}}_{0:T}$. Two particle systems are initialized and propagated using common random numbers. The resampling steps and the selection of trajectories at the final step are performed jointly using couplings of discrete distributions. To complete the description of the CCPF procedure, we thus need to specify these couplings (for steps 2.1. and 3.1. in Algorithm \[alg:coupled-conditional-particle-filter\]). With the Rhee–Glynn estimation procedure in mind, we aim at achieving large meeting probabilities $\mathbb{P}(X^\prime = {\tilde{X}}^\prime | X,{\tilde{X}})$, so as to incur short meeting times on average.
1. 2. 3.
<!-- -->
1. 2. 3.
<!-- -->
1. 2.
Coupled resampling \[sec:couplingparticlesystems\]
--------------------------------------------------
The temporal index $t$ is momentarily removed from the notation: the task is that of sampling pairs $(a,{\tilde{a}})$ such that $\mathbb{P}(a=j)=w^{j}$ and $\mathbb{P}({\tilde{a}}=j)={\tilde{w}}^{j}$ for all $j\in 1:N$; this is a sufficient condition for CPF kernels to leave $\pi$ invariant [@andrieu:doucet:holenstein:2010].
A joint distribution on $\{1,\ldots,N\}^{2}$ is characterized by a matrix $P$ with non-negative entries $P^{ij}$, for $i,j\in\{ 1,\ldots,N\}$, that sum to one. The value $P^{ij}$ represents the probability of the event $(a,{\tilde{a}}) = (i,j)$. We consider the set $\mathcal{J}(w,{\tilde{w}})$ of matrices $P$ such that $P\mathds{1}=w$ and $P^{\mathsf{T}}\mathds{1}={\tilde{w}}$, where $\mathds{1}$ denotes a column vector of $N$ ones, $w = w^{1:N}$ and ${\tilde{w}}= {\tilde{w}}^{1:N}$. Matrices $P\in \mathcal{J}(w,{\tilde{w}})$ are such that $\mathbb{P}(a=j)=w^{j}$ and $\mathbb{P}({\tilde{a}}=j)={\tilde{w}}^{j}$ for $j\in 1:N$.
Any choice of probability matrix $P\in\mathcal{J}(w,{\tilde{w}})$, and of a way of sampling $(a,{\tilde{a}})\sim P$, leads to a *coupled* resampling scheme. In order to keep the complexity of sampling $N$ pairs from $P$ linear in $N$, we focus on a particular choice. Other choices of coupled resampling schemes are given in @deligiannidis2015correlated [@jacob2016coupling; @sen2018coupling], following earlier works such as @pitt2002smooth [@lee2008towards].
We consider the *index-coupled* resampling scheme, used by @ChopinS:2015 in their theoretical analysis of the CPF, and by @jasra2015multilevel in a multilevel Monte Carlo context, see also Section 2.4 in @jacob2016coupling. The scheme amounts to a maximal coupling of discrete distributions on $\{1,\ldots,N\}$ with probabilities $w^{1:N}$ and ${\tilde{w}}^{1:N}$, respectively. This coupling maximizes the probability of the event $\{a = \tilde{a}\}$ under the marginal constraints. How to sample from a maximal coupling of discrete distributions is described e.g. in @lindvall2002lectures. The scheme is intuitive at the initial step of the CCPF, when $x_0^j = {\tilde{x}}_0^j$ for all $j=1,\ldots,N-1$: one would want pairs of ancestors $(a_0,{\tilde{a}}_0)$ to be such that $a_0 = {\tilde{a}}_0$, so that pairs of resampled particles remain identical. At later steps, the number of identical pairs across both particle systems might be small, or even null. In any case, at step 2.2. of Algorithm \[alg:coupled-conditional-particle-filter\], the same random number $U_{t}^j$ is used to compute $x^j_{t}$ and ${\tilde{x}}^j_{t}$ from their ancestors. If $a_{t-1}^j = {\tilde{a}}_{t-1}^j$, we select ancestor particles that were, themselves, computed with common random numbers at the previous step, and we give them common random numbers again. Thus this scheme maximizes the number of consecutive steps at which common random numbers are used to propagate each pair of particles.
We now discuss why propagating pairs of particles with common random numbers might be desirable. Under assumptions on the random function representation of the latent process, using common random numbers to propagate pairs of particles results in the particles contracting. For instance, in an auto-regressive model where $F(x,U,\theta) = \theta x + U$, where $\theta \in (-1,1)$ and $U$ is the innovation term, we have $|F(x,U,\theta) - F({\tilde{x}},U,\theta)| = |\theta| |x-{\tilde{x}}|$, thus a pair of particles propagated with common variables $U$ contracts at a geometric rate. We can formulate assumptions directly on the function $x\mapsto \mathbb{E}_U[F(x,U,\theta)]$, such as Lipschitz conditions with respect to $x$, after having integrated $U$ out, for fixed $\theta$. Discussions on these assumptions can be found in @diaconis1999iterated, and an alternative method that would not require them is mentioned in Section \[sec:discussion\].
Rhee–Glynn smoothing estimator \[sec:rgsmoothing\]
--------------------------------------------------
We now put together the Rhee–Glynn estimator of Section \[sec:debiasing\] with the CCPF algorithm of Section \[sec:ccpf\]. In passing we generalize the Rhee–Glynn estimator slightly by starting the telescopic sum at index $k\geq 0$ instead of zero, and denote it by $H_k$; $k$ becomes a tuning parameter, discussed in Section \[sec:newsmoother:practical\]. The procedure is fully described in Algorithm \[alg:rheeglynnsmoother\]; CPF and CCPF refer to Algorithms \[alg:conditional-particle-filter\] and \[alg:coupled-conditional-particle-filter\] respectively.
By convention the sum from $k+1$ to $\tau-1$ in the definition of $H_k$ is set to zero whenever $k+1>\tau-1$. Thus the estimator $H_k$ is equal to $h(X^{(k)})$ on the event $\{k+1>\tau-1\}$. Recall that $h(X^{(k)})$ is in general a biased estimator of $\pi(h)$, since there is no guarantee that a CPF chain reaches stationarity within $k$ iterations. Thus the term $\sum_{n=k+1}^{\tau - 1}(h(X^{(n)}) - h({\tilde{X}}^{(n-1)}))$ acts as a bias correction.
1. 2. 1. 2.
3.
At step 1. of Algorithm \[alg:rheeglynnsmoother\], the paths $X^{(0)}$ and ${\tilde{X}}^{(0)}$ can be sampled independently or not from $\pi_0$. In the experiments we will initialize chains independently and $\pi_0$ will refer to the distribution of a path randomly chosen among the trajectories of a particle filter.
Theoretical properties\[sec:newsmoother:theory\]
================================================
We give three sufficient conditions for the validity of Rhee–Glynn smoothing estimators.
\[assumption:upperbound\] The measurement density of the model is bounded from above: there exists $\bar{g} < \infty$ such that, for all $y\in \mathbb{Y}$ and $x\in\mathbb{X}$, $g(y | x) \leq \bar{g}$.
\[assumption:couplingmatrix\] The resampling probability matrix $P$, with rows summing to $w^{1:N}$ and columns summing to ${\tilde{w}}^{1:N}$, is such that, for all $i\in \{1,\ldots,N\}$, $P^{ii} \geq w^i {\tilde{w}}^i$. Furthermore, if $w^{1:N} = {\tilde{w}}^{1:N}$, then $P$ is a diagonal matrix with entries given by $w^{1:N}$.
\[assumption:mixing\] Let $(X^{(n)})_{n \geq 0}$ be a Markov chain generated by the conditional particle filter and started from $\pi_0$, and $h$ a test function of interest. Then $\mathbb{E}\left[h(X^{(n)})\right] \xrightarrow[n\to \infty]{} \pi(h)$. Furthermore, there exists $\delta > 0$, $n_0 < \infty$ and $C<\infty$ such that, for all $n\geq n_0$, $\mathbb{E}\left[h(X^{(n)})^{2+\delta}\right]\leq C$.
The first assumption is satisfied for wide classes of models where the measurements are assumed to be some transformation of the latent process with added noise. However, it would not be satisfied for instance in stochastic volatility models where it is often assumed that $Y|X=x\sim \mathcal{N}(0,
\exp(x)^2)$ or variants thereof [e.g. @fulop2013efficient]. There, the measurement density would diverge when $y$ is exactly zero and $x\to -\infty$. A similar assumption is discussed in Section 3 of @whiteley2013stability. One can readily check that the second assumption always holds for the index-coupled resampling scheme. The third assumption relates to the validity of MCMC estimators generated by the CPF algorithm, addressed under general assumptions in @ChopinS:2015 [@LindstenDM:2015; @andrieuvihola2013uniform].
Our main result states that the proposed estimator is unbiased, has a finite variance, and that the meeting time $\tau$ has tail probabilities bounded by those of a geometric variable, which implies in particular that the estimator has a finite expected cost.
Under Assumptions \[assumption:upperbound\] and \[assumption:couplingmatrix\], for any initial distribution $\pi_0$, any number of particles $N\geq 2$ and time horizon $T\geq 1$, there exists $\varepsilon>0$, which might depend on $N$ and $T$, such that for all $n\geq 2$, $$\mathbb{P}(\tau > n) \leq (1-\varepsilon)^{n-1},$$ and therefore $\mathbb{E}[\tau]<\infty$. Under the additional Assumption \[assumption:mixing\], the Rhee–Glynn smoothing estimator $H_k$ of Algorithm \[alg:rheeglynnsmoother\] is such that, for any $k\geq 0$, $\mathbb{E}[H_k] = \pi(h)$ and $\mathbb{V}[H_k] < \infty$. \[thm:finitevariance\]
The proof is in Appendices \[sec:proof:intermed\] and \[sec:proof:unbiased\]. Some aspects of the proof, not specific to the smoothing setting, are similar to the proofs of Theorem 1 in @rhee:phd, Theorem 2.1 in @McLeish:2011, Theorem 7 in @vihola2015unbiased, and results in @glynn2014exact. It is provided in univariate notation but the Rhee–Glynn smoother can estimate multivariate smoothing functionals, in which case the theorem applies component-wise.
Improvements and tuning \[sec:newsmoother:practical\]
=====================================================
Since $H_\ell$ is unbiased for all $\ell\geq 0$, we can compute $H_\ell$ for various values of $\ell$ between two integers $k\leq m$, and average these estimators to obtain $H_{k:m}$ defined as $$\begin{aligned}
\label{eq:timeaverage}
H_{k:m} & = \frac{1}{m-k+1}\sum_{n = k}^m \{h(X^{(n)}) + \sum_{\ell = n + 1}^{\tau - 1} (h(X^{(\ell)}) - h({\tilde{X}}^{(\ell-1)}))\} \nonumber \\
&= \frac{1}{m-k+1}\sum_{n = k}^m h(X^{(n)}) + \sum_{n =k + 1}^{\tau - 1} \frac{\min(m-k+1, n-k)}{m-k+1} (h(X^{(n)}) - h({\tilde{X}}^{(n-1)})).\end{aligned}$$ The term $(m-k+1)^{-1} \sum_{n = k}^m h(X^{(n)})$ is a standard ergodic average of a CPF chain, after $m$ iterations and discarding the first $k-1$ steps as burn-in. It is a biased estimator of $\pi(h)$ in general since $\pi_0$ is different from $\pi$. The other term acts as a bias correction. On the event $\tau - 1< k+1$ the correction term is equal to zero.
As $k$ increases the bias of the term $(m-k+1)^{-1} \sum_{n = k}^m h(X^{(n)})$ decreases. The variance inflation of the Rhee–Glynn estimator decreases too, since the correction term is equal to zero with increasing probability. On the other hand, it can be wasteful to set $k$ to an overly large value, in the same way that it is wasteful to discard too many iterations as burn-in when computing MCMC estimators. In practice we propose to choose $k$ according to the distribution of $\tau$, which can be sampled from exactly by running Algorithm \[alg:rheeglynnsmoother\], as illustrated in the numerical experiments of Section \[sec:numerics\]. Conditional upon a choice of $k$, by analogy with MCMC estimators we can set $m$ to a multiple of $k$, such as $2k$ or $5k$. Indeed the proportion of discarded iterations is approximately $k/m$, and it appears desirable to keep this proportion low. We stress that the proposed estimators are unbiased and with a finite variance for any choice of $k$ and $m$; tuning $k$ and $m$ only impacts variance and cost.
For a given choice of $k$ and $m$, the estimator $H_{k:m}$ can be sampled $R$ times independently in parallel. We denote the independent copies by $H_{k:m}^{(r)}$ for $r\in 1:R$. The smoothing expectation of interest $\pi(h)$ can then be approximated by $\bar{H}_{k:m}^R = R^{-1}\sum_{r=1}^R H_{k:m}^{(r)}$, with a variance that decreases linearly with $R$. From the central limit theorem the confidence interval $[\bar{H}_{k:m}^R + z_{\alpha/2} \hat{\sigma}^R/\sqrt{R}, \bar{H}_{k:m}^R + z_{1-\alpha/2} \hat{\sigma}^R/\sqrt{R}]$, where $\hat{\sigma}^R$ is the empirical standard deviation of $(H_{k:m}^{(r)})_{r=1}^R$ and $z_a$ is the $a$-th quantile of a standard Normal distribution, has $1-\alpha$ asymptotic coverage as $R\to \infty$. The central limit theorem is applicable as a consequence of Theorem \[thm:finitevariance\].
The variance of the proposed estimator can be further reduced by Rao–Blackwellization. In Eq. , the random variable $h(X^{(n)})$ is obtained by applying the test function $h$ of interest to a trajectory drawn among $N$ trajectories, denoted by say $x_{0:T}^k$ for $k=1,\ldots,N$, with probabilities $w_T^{1:N}$; see step 3 in Algorithms \[alg:conditional-particle-filter\] and \[alg:coupled-conditional-particle-filter\]. Thus the random variable $\sum_{k=1}^N w_T^{k}h(x_{0:T}^{k})$ is the conditional expectation of $h(X^{(n)})$ given the trajectories and $w_T^{1:N}$, which has the same expectation as $h(X^{(n)})$. Thus any term $h(X^{(n)})$ or $h({\tilde{X}}^{(n)})$ in $H_{k:m}$ can be replaced by similar conditional expectations. This enables the use of all the paths generated by the CPF and CCPF kernels, and not only the selected ones.
As in other particle methods the choice of the number of particles $N$ is important. Here, the estimator $\bar{H}_{k:m}^R$ is consistent as $R\to \infty$ for any $N\geq 2$, but $N$ plays a role both on the cost and of the variance of each $H^{(r)}_{k:m}$. We can generate unbiased estimators for different values of $N$ and compare their costs and variances in preliminary runs. The scaling of $N$ with the time horizon $T$ is explored numerically in Section \[sec:numerics:hiddenar\]. If possible, one can also employ other algorithms than the bootstrap particle filter, as illustrated in Section \[sec:numerics:hiddenar\] with the auxiliary particle filter.
Comparison with existing smoothers \[sec:comparison\]
=====================================================
The proposed method combines elements from both particle smoothers and MCMC methods, but does not belong to either category. We summarize advantages and drawbacks below, after having discussed the cost of the proposed estimators.
Each estimator $H_{k:m}$ requires two draws from $\pi_0$, here taken as the distribution of a trajectory selected from a particle filter with $N$ particles. Then, the estimator as described in Algorithm \[alg:rheeglynnsmoother\] requires a draw from the CPF kernel, $\tau-1$ draws from the CCPF kernel, and finally $m-\tau$ draws of the CPF kernel on the events $\{m>\tau\}$. The cost of a particle filter and of an iteration of CPF is usually dominated by the propagation of $N$ particles and the evaluation of their weights. The cost of an iteration of CCPF is approximately twice larger. Overall the cost of $H_{k:m}$ is thus of order $C(\tau,m,N) = N\times (3+2(\tau-1)+\max(0,m-\tau))$, for fixed $T$. The finiteness of the expected cost $\mathbb{E}[C(\tau,m,N)]$ is a consequence of Theorem \[thm:finitevariance\]. The average $\bar{H}_{k:m}^R$ satisfies a central limit theorem parametrized by the number of estimators $R$, as discussed in Section \[sec:newsmoother:practical\]; however, since the cost of $H_{k:m}$ is random, it might be more relevant to consider central limit theorems parametrized by computational cost, as in @glynn1992asymptotic. The asymptotic inefficiency of the proposed estimators can be defined as $\mathbb{E}[C(\tau,m,N)]\times\mathbb{V}[H_{k:m}]$, which can be approximated with independent copies of $H_{k:m}$ and $\tau$, obtained by running Algorithm \[alg:rheeglynnsmoother\].
State-of-the-art particle smoothers include fixed-lag approximations [@kitagawa2001monte; @cappe:ryden:2004; @olsson2008sequential], forward filtering backward smoothers [@GodsillDW:2004; @del2010forward; @douc2011sequential; @taghavi2013adaptive], and smoothers based on the two-filter formula [@briers2010smoothing; @kantas2015particle]. These particle methods provide consistent approximations as $N\to\infty$, with associated mean squared error decreasing as $1/N$ [Section 4.4 of @kantas2015particle]; except for fixed-lag approximations for which some bias remains. The cost is typically of order $N$ with efficient implementations described in @fearnheadwyncolltawn2010 [@kantas2015particle; @olsson2017efficient], and is linear in $T$ for fixed $N$. Parallelization over the $N$ particles is mostly feasible, the main limitation coming from the resampling step [@murray2015parallel; @lee2015forest; @whiteley2016role; @paige2014asynchronous; @murray2016anytime]. The memory cost of particle filters is of order $N$, or $N\log N$ if trajectories are kept [@jacob2015path], see also @Koskela2018. Assessing the accuracy of particle approximations from a single run of these methods remains a major challenge; see @lee2015variance [@olsson2017numerically] for recent breakthroughs. Furthermore, we will see in Section \[sec:numerics:unlikely\] that the bias of particle smoothers cannot always be safely ignored. On the other hand, we will see in Section \[sec:numerics:pz\] that the variance of particle smoothers can be smaller than that of the proposed estimators, for a given computational cost. Thus, in terms of mean squared error per unit of computational cost, the proposed method is not expected to provide benefits.
The main advantage of the proposed method over particle smoothers lies in the construction of confidence intervals, and the possibility of parallelizing over independent runs as opposed to interacting particles. Additionally, a user of particle smoothers who would want more precise results would increase the number of particles $N$, if enough memory is available, discarding previous runs. On the other hand, the proposed estimator $\bar{H}_{k:m}^R$ can be refined to arbitrary precision by drawing more independent copies of $H_{k:m}$, for a constant memory requirement.
Other popular smoothers belong to the family of MCMC methods. Early examples include Gibbs samplers, updating components of the latent process conditionally on other components and on the observations [e.g. @carter1994gibbs]. The CPF kernel described in Section \[sec:intro:smoothing\] can be used in the standard MCMC way, averaging over as many iterations as possible [@andrieu:doucet:holenstein:2010]. The bias of MCMC estimators after a finite number of iterations is hard to assess, which makes the choice of burn-in period difficult. Asymptotically valid confidence intervals can be produced in various ways, for instance using the CODA package [@plummer2006coda]; see also @vats2018strong. On the other hand, parallelization over the iterations is intrinsically challenging with MCMC methods [@rosenthal2000parallel].
Therefore the proposed estimators have some advantages over existing methods, the main drawback being a potential increase in mean squared error for a given (serial) computational budget, as illustrated in the numerical experiments.
Numerical experiments\[sec:numerics\]
=====================================
We illustrate the tuning of the proposed estimators, their advantages and their drawbacks through numerical experiments. All estimators of this section employ the Rao–Blackwellization technique described in Section \[sec:newsmoother:practical\], and multinomial resampling is used within all filters.
Hidden auto-regressive model\[sec:numerics:hiddenar\]
-----------------------------------------------------
Our first example illustrates the proposed method, the impact of the number of particles $N$ and that of the time horizon $T$, and the benefits of auxiliary particle filters. We consider a linear Gaussian model, with $x_{0}\sim\mathcal{N}\left(0,1\right)$ and $x_{t}=\eta
x_{t-1}+\mathcal{N}\left(0,1\right)$ for all $t \geq 1$, with $\eta=0.9$. We assume that $y_{t}\sim\mathcal{N}\left(x_{t},1\right)$ for all $t \geq 1$.
We first generate $T = 100$ observations from the model, and consider the task of estimating all smoothing means, which corresponds to the test function $h:
x_{0:T}\mapsto x_{0:T}$. With CPF kernels using bootstrap particle filters, with $N = 256$ particles and ancestor sampling [@LindstenJS:2014], we draw meeting times $\tau$ independently, and represent a histogram of them in Figure \[fig:ar1:meetings\]. Based on these meeting times, we can choose $k$ as a large quantile of the meeting times, for instance $k = 10$, and $m$ as a multiple of $k$, for instance $m = 2k = 20$. For this choice, we find the average compute cost of each estimator to approximately equal that of a particle filter with $28\times 256$ particles, with a memory usage equivalent to $2\times 256$ particles. How many of these estimators can be produced in a given wall-clock time depends on available hardware. With $R=100$ independent estimators, we obtain $95\%$ confidence intervals indicated by black error bars in Figure \[fig:ar1:smoothingmeans\]. The true smoothing means, obtained by Kalman smoothing, are indicated by a line.
The method is valid for all $N$, which prompts the question of the optimal choice of $N$. Intuitively, larger values of $N$ lead to smaller meeting times. However, the meeting time cannot be less than $2$ by definition, which leads to a trade-off. We verify this intuition by numerical simulations with $1,000$ independent runs. For $N=16$, $N=128$, $N=256$, $N=512$ and $N=1,024$, we find average meeting times of $97$, $15$, $7$, $4$ and $3$ respectively. After adjusting for the different numbers of particles, the expected cost of obtaining a meeting is approximately equivalent with $N=16$ and $N=512$, but more expensive for $N=1,024$. In practice, for specific integrals of interest, one can approximate the cost and the variance of the proposed estimators for various values of $N$, $k$ and $m$ using independent runs, and use the most favorable configuration in subsequent, larger experiments.
Next we investigate the effect of the time horizon $T$. We expect the performance of the CPF kernel to decay as $T$ increases for a fixed $N$. We compensate by increasing $N$ linearly with $T$. Table \[table:effecthorizon\] reports the average meeting times obtained from $R=500$ independent runs. We see that the average meeting times are approximately constant or slightly decreasing over $T$, implying that the linear scaling of $N$ with $T$ is appropriate or even conservative, in agreement with the literature [e.g. @huggins2015sequential]. The table contains the average meeting times obtained with and without ancestor sampling [@LindstenJS:2014]; we observe significant reductions of average meeting times with ancestor sampling, but it requires tractable transition densities. Finally, for the present model we can employ an auxiliary particle filter, in which particles are propagated conditionally on the next observation. Table \[table:effecthorizon\] shows a significant reduction in expected meeting time. The combination of auxiliary particle filter and ancestor sampling naturally leads to the smallest expected meeting times.
A hidden auto-regressive model with an unlikely observation {#sec:numerics:unlikely}
-----------------------------------------------------------
We now illustrate the benefits of the proposed estimators in an example taken from @ruiz2016particle where particle filters exhibit a significant bias. The latent process is defined as $x_{0}\sim\mathcal{N}\left(0,0.1^{2}\right)$ and $x_{t}=\eta
x_{t-1}+\mathcal{N}\left(0,0.1^{2}\right)$; we take $\eta=0.9$ and consider $T=10$ time steps. The process is observed only at time $T=10$, where $y_{T}=1$ and we assume $y_{T}\sim\mathcal{N}\left(x_{T},0.1^{2}\right)$. The observation $y_{T}$ is unlikely under the model. Therefore the filtering distributions and the smoothing distributions have little overlap, particularly for times $t$ close to $T$. This toy model is a stylized example of settings with highly-informative observations [@ruiz2016particle; @del2015sequential].
We consider the task of estimating the smoothing mean $\mathbb{E}[x_9|y_{10}]$. We run particle filters for different values of $N$, $10,000$ times independently, and plot kernel density estimators of the distributions of the estimators of $\mathbb{E}[x_9|y_{10}]$ in Figure \[fig:unlikely:pf\]. The dashed vertical line represents the estimand $\mathbb{E}[x_9|y_{10}]$, obtained analytically. We see that the bias diminishes when $N$ increases, but that it is still significant with $N=16,384$ particles. For any fixed $N$, if we were to ignore the bias and produce confidence intervals using the central limit theorem based on independent particle filter estimators, the associated coverage would go to zero as the number of independent runs would increase.
In contrast, confidence intervals obtained with the proposed unbiased estimators are shown in Figure \[fig:unlikely:rg\]. For each value of $N$, the average meeting time was estimated from $100$ independent runs (without ancestor sampling), and then $k$ was set to that estimate, and $m$ equal to $k$. Then, $R=10,000$ independent estimators were produced, and confidence intervals were computed as described in Section \[sec:newsmoother:practical\]. This leads to precise intervals for each choice of $N$. The average costs associated with $N=128$, $N=256$, $N=512$ and $N=1024$ were respectively matching the costs of particle filters with $3814$, $4952$, $9152$ and $13,762$ particles. To conclude, if we match computational costs and compare mean squared errors, the proposed method is not necessarily advantageous. However, if the interest lies in confidence intervals with adequate coverage, the proposed approach comes with guarantees thanks to the lack of bias and the central limit theorem for i.i.d. variables.
Prey-predator model \[sec:numerics:pz\]
---------------------------------------
Our last example involves a model of plankton–zooplankton dynamics taken from @jones2010bayesian, in which the transition density is intractable [@breto2009time; @jacob2015sequential]. The bootstrap particle filter is still implementable, and one can either keep the entire trajectories of the particle filter, or perform fixed-lag approximations to perform smoothing. On the other hand, backward and ancestor sampling are not implementable.
The hidden state $x_t = (p_t, z_t)$ represents the population size of phytoplankton and zooplankton, and the transition from time $t$ to $t+1$ is given by a Lotka–Volterra equation, $$\frac{dp_t}{dt} = \alpha p_t - c p_t z_t , \quad \text{and}\quad \frac{dz_t}{dt} = e c p_t z_t -m_l z_t -m_q z_t^2,$$ where the stochastic daily growth rate $\alpha$ is drawn from $\mathcal{N}(\mu_\alpha,\sigma_\alpha^2)$ at every integer time $t$. The propagation of each particle involves solving the above equation numerically using a Runge-Kutta method in the `odeint` library [@ahnert2011odeint]. The initial distribution is given by $\log p_0 \sim \mathcal{N}(\log 2 , 1)$ and $\log z_0 \sim \mathcal{N}(\log 2, 1)$. The parameters $c$ and $e$ represent the clearance rate of the prey and the growth efficiency of the predator. Both $m_l$ and $m_q$ parameterize the mortality rate of the predator. The observations $y_t$ are noisy measurements of the phytoplankton $p_t$, $\log y_t \sim \mathcal{N}(\log
p_t, 0.2^2)$; $z_t$ is not observed. We generate $T = 365$ observations using $\mu_\alpha = 0.7, \sigma_\alpha = 0.5$, $c = 0.25$, $e = 0.3$, $m_l = 0.1$, $m_q = 0.1$. We consider the problem of estimating the mean population of zooplankton at each time $t\in0:T$, denoted by $\mathbb{E}[z_t|y_{1:T}]$, given the data-generating parameter.
The distribution of meeting times obtained with $N=4,096$ particles over $R=1,000$ experiments is shown in Figure \[fig:pz:meetings\]. Based on this graph, we choose $k=7$, $m=2k=14$, and produce $R=1,000$ independent estimators of the smoothing means $\mathbb{E}[z_t|y_{1:T}]$. We compute the smoothing means with a long CPF chain, taken as ground truth. We then compute the relative variance of our estimators, defined as their variance divided by the square of the smoothing means. We find the average cost of the proposed estimator to be equivalent to that of a particle filter with $78,377$ particles. To approximately match the cost, we thus run particle filters with $2^{16}=65,536$ particles, with and without fixed-lag smoothing with a lag of $10$. The resulting relative variances are shown in Figure \[fig:pz:relvar\]. We see that the proposed estimators yield a larger variance than particle filters, but that the difference is manageable. Fixed-lag smoothing provides significant variance reduction, particularly for earlier time indices. We can also verify that the bias of fixed-lag smoothing is negligible in the present example; this would however be hard to assess with fixed-lag smoothers alone.
Discussion\[sec:discussion\]
============================
The performance of the proposed estimator is tied to the meeting time. As in @ChopinS:2015, the coupling inequality [@lindvall2002lectures] can be used to relate the meeting time with the mixing of the underlying conditional particle filter kernel. The proposed approach can be seen as a framework to parallelize CPF chains and to obtain reliable confidence intervals over independent replicates. Any improvement in the CPF directly translates into more efficient Rhee–Glynn estimators, as we have illustrated in Section \[sec:numerics:hiddenar\] with auxiliary particle filters and ancestor sampling. The methods proposed e.g. in @SinghLM:2017 [@del2015sequential; @guarniero2015iterated; @gerber2015sequential; @heng2017controlled] could also be used in Rhee–Glynn estimators, with the hope of obtaining shorter meeting times and smaller variance.
We have considered the estimation of latent processes given known parameters. In the case of unknown parameters, joint inference of parameters and latent processes can be done with MCMC methods, and particle MCMC methods in particular [@andrieu:doucet:holenstein:2010]. Couplings of generic particle MCMC methods could be achieved by combining couplings proposed in the present article with those described in @jacob2017unbiased for Metropolis–Hastings chains. Furthermore, for fixed parameters, coupling the particle independent Metropolis–Hastings algorithm of @andrieu:doucet:holenstein:2010 would lead to unbiased estimators of smoothing expectations that would not require coupled resampling schemes (see Section \[sec:couplingparticlesystems\]).
The appeal of the proposed smoother, namely parallelization over independent replicates and confidence intervals, would be shared by perfect samplers. These algorithms aim at the more ambitious task of sampling exactly from the smoothing distribution [@leedoucetperfectsimulation]. It remains unknown whether the proposed approach could play a role in the design of perfect samplers. We have established the validity of the Rhee–Glynn estimator under mild conditions, but its theoretical study as a function of the time horizon and the number of particles deserves further analysis [see @Lee2018ccbpf for a path forward]. Finally, together with Fisher’s identity [@douc:moulines:2014], the proposed smoother provides unbiased estimators of the score for models where the transition density is tractable. This could help maximizing the likelihood via stochastic gradient ascent.
**Acknowledgements.** The authors thank Marco Cuturi, Mathieu Gerber, Jeremy Heng and Anthony Lee for helpful discussions. This work was initiated during the workshop on *Advanced Monte Carlo methods for complex inference problems* at the Isaac Newton Institute for Mathematical Sciences, Cambridge, UK held in April 2014. We would like to thank the organizers for a great event which led to this work.
Intermediate result on the meeting probability \[sec:proof:intermed\]
=====================================================================
Before proving Theorem \[thm:finitevariance\], we introduce an intermediate result on the probability of the chains meeting at the next step, irrespective of their current states. The result provides a lower-bound on the probability of meeting in one step, for coupled chains generated by the coupled conditional particle filter (CCPF) kernel.
Let $N\geq 2$ and $T\geq 1$ be fixed. Under Assumptions \[assumption:upperbound\] and \[assumption:couplingmatrix\], there exists $\varepsilon>0$, depending on $N$ and $T$, such that $$\forall X \in \mathbb{X}^{T+1}, \quad \forall {\tilde{X}}\in \mathbb{X}^{T+1}, \quad \mathbb{P}(X' = {\tilde{X}}' | X, {\tilde{X}}) \geq \varepsilon,$$ where $(X',{\tilde{X}}') \sim \text{CCPF}((X,{\tilde{X}}), \cdot)$. Furthermore, if $X = {\tilde{X}}$, then $X' = {\tilde{X}}'$ almost surely. \[lemma:meetingprobability\]
The constant $\varepsilon$ depends on $N$ and $T$, and on the coupled resampling scheme being used. Lemma \[lemma:meetingprobability\] can be used, together with the coupling inequality [@lindvall2002lectures], to prove the ergodicity of the conditional particle filter kernel, which is akin to the approach of @ChopinS:2015. The coupling inequality states that the total variation distance between $X^{(n)}$ and ${\tilde{X}}^{(n-1)}$ is less than $2\mathbb{P}(\tau > n)$, where $\tau$ is the meeting time. By assuming ${\tilde{X}}^{(0)}\sim\pi$, ${\tilde{X}}^{(n)}$ follows $\pi$ at each step $n$, and we obtain a bound for the total variation distance between $X^{(n)}$ and $\pi$. Using Lemma \[lemma:meetingprobability\], we can bound the probability $\mathbb{P}(\tau > n)$ from above by $(1-\varepsilon)^n$, as in the proof of Theorem \[thm:finitevariance\] below. This implies that the computational cost of the proposed estimator has a finite expectation for all $N\geq 2$ and $T\geq 1$.
*Proof of Lemma \[lemma:meetingprobability\]*. We write ${{\mathbb{P}}_{x_{0:t},\tilde x_{0:t}}}$ and ${{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}$ for the conditional probability and expectation, respectively, with respect to the law of the particles generated by the CCPF procedure conditionally on the reference trajectories up to time $t$, $(x_{0:t}, \tilde x_{0:t})$. Furthermore, let $\mathcal{F}_t$ denote the filtrations generated by the CCPF at time $t$. We denote by $x_{0:t}^k$, for $k\in1:N$, the surviving trajectories at time $t$. Let $I_t \subseteq 1:N-1$ be the set of common particles at time $t$ defined by $I_t = \{j \in 1:N-1 : x_{0:t}^j = \tilde x_{0:t}^j \}$. The meeting probability can then be bounded by: $$\begin{gathered}
{{\mathbb{P}}_{x_{0:T},\tilde x_{0:T}}}(x_{0:T}^\prime = \tilde x_{0:T}^\prime) = {{\mathbb{E}}_{x_{0:T},\tilde x_{0:T}}}\left[{\mathds{1}}\!\left(x_{0:T}^{b_T} = \tilde x_{0:T}^{\tilde{b}_T} \right)\right]
\geq \sum_{k=1}^{N-1} {{\mathbb{E}}_{x_{0:T},\tilde x_{0:T}}}[{\mathds{1}}\!\left(k \in I_T\right) P_T^{kk}] \\
= (N-1){{\mathbb{E}}_{x_{0:T},\tilde x_{0:T}}}[{\mathds{1}}\!\left(1\in I_T \right) P_T^{11}]
\geq \frac{N-1}{ (N\bar{g})^2} {{\mathbb{E}}_{x_{0:T},\tilde x_{0:T}}}[{\mathds{1}}\!\left(1\in I_T \right) g_T(x_T^1) g_T(\tilde x_T^1)],\end{gathered}$$ where we have used Assumptions \[assumption:upperbound\] and \[assumption:couplingmatrix\].
Now, let $\psi_t : {\mathbb{X}}^t \mapsto {\mathbb{R}}_+$ and consider $$\begin{aligned}
\label{eq:crude:h}
{{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[{\mathds{1}}\!\left( 1\in I_t \right) \psi_t(x_{0:t}^1) \psi_t(\tilde x_{0:t}^1)] =
{{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[{\mathds{1}}\!\left( 1\in I_t \right) \psi_t(x_{0:t}^1)^2],\end{aligned}$$ since the two trajectories agree on $\{1\in I_t\}$. We have $$\begin{aligned}
{\mathds{1}}\!\left( 1\in I_t \right) \geq \sum_{k=1}^{N-1} {\mathds{1}}\!\left(k\in I_{t-1} \right) {\mathds{1}}\!\left(a_{t-1}^1 = \tilde a_{t-1}^1 = k \right),\end{aligned}$$ and thus $$\begin{gathered}
\label{eq:crude:h2}
{{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[{\mathds{1}}\!\left( 1\in I_t \right) \psi_t(x_{0:t}^1)^2] \\
\geq {{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[\sum_{k=1}^{N-1} {\mathds{1}}\!\left(k\in I_{t-1} \right) {{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[ {\mathds{1}}\!\left(a_{t-1}^1 = \tilde a_{t-1}^1 = k \right) \psi_t(x_{0:t}^1)^2 \mid \mathcal{F}_{t-1} ]] \\
= (N-1){{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[{\mathds{1}}\!\left(1\in I_{t-1} \right) {{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[ {\mathds{1}}\!\left(a_{t-1}^1 = \tilde a_{t-1}^1 = 1 \right) \psi_t(x_{0:t}^1)^2 \mid \mathcal{F}_{t-1} ]].\end{gathered}$$ The inner conditional expectation can be computed as $$\begin{gathered}
\label{eq:cruce:h2-inner}
{{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[ {\mathds{1}}\!\left(a_{t-1}^1 = \tilde a_{t-1}^1 = 1 \right) \psi_t(x_{0:t}^1)^2 \mid \mathcal{F}_{t-1} ] \\
=\sum_{k,\ell=1}^N P_{t-1}^{k\ell} {\mathds{1}}\!\left(k=\ell=1\right) \int \psi_t((x_{0:t-1}^k, x_t ))^2 f(dx_t|x_{t-1}^k) \\
= P_{t-1}^{11} \int \psi_t((x_{0:t-1}^1, x_t))^2 f(dx_t|x_{t-1}^1) \\
\geq \frac{g_{t-1}(x_{t-1}^1) g_{t-1}(\tilde x_{t-1}^1) }{(N\bar{g})^2} \left( \int \psi_t((x_{0:t-1}^1, x_t )) f(dx_t|x_{t-1}^1) \right)^2,\end{gathered}$$ where we have again used Assumptions \[assumption:upperbound\] and \[assumption:couplingmatrix\]. Note that this expression is independent of the final states of the reference trajectories, $(x_t, \tilde x_t)$, which can thus be dropped from the conditioning. Furthermore, on $\{1\in I_{t-1}\}$ it holds that $x_{0:t-1}^1 = \tilde x_{0:t-1}^1$ and therefore, combining Eqs. – we get $$\begin{gathered}
{{\mathbb{E}}_{x_{0:t},\tilde x_{0:t}}}[{\mathds{1}}\!\left( 1\in I_t \right) \psi_t(x_{0:t}^1) \psi_t(\tilde x_{0:t}^1)] \\
\geq \frac{(N-1)}{(N\bar{g})^2}{{\mathbb{E}}_{x_{0:t-1},\tilde x_{0:t-1}}}\Big[{\mathds{1}}\!\left(1\in I_{t-1} \right) g_{t-1}(x_{t-1}^1) \int \psi_t((x_{0:t-1}^1, x_t )) f(dx_t|x_{t-1}^1) \\ \times
g_{t-1}(\tilde x_{t-1}^1) \int \psi_t((\tilde x_{0:t-1}^1, x_t )) f(dx_t|\tilde x_{t-1}^1)
\Big].\end{gathered}$$ Thus, if we define for $t=1,\ldots,T-1$, $\psi_t(x_{0:t}) = g_t(x_t) \int \psi_{t+1}(x_{0:t+1}) f(dx_{t+1}|x_t)$, and $\psi_T(x_{0:T}) = g_T(x_T)$, it follows that $$\begin{aligned}
{{\mathbb{P}}_{x_{0:T},\tilde x_{0:T}}}(x_{0:T}^\prime= \tilde x_{0:T}^\prime) &\geq \frac{(N-1)^{\mathsf{T}}}{(N\bar{g})^{2T}} {{\mathbb{E}}_{x_{0},\tilde x_{0}}}[{\mathds{1}}\!\left(1\in I_1 \right) \psi_1(x_1^1)\psi_1(\tilde x_1^1)] \\
&= \frac{(N-1)^{\mathsf{T}}}{(N\bar{g})^{2T}} {{\mathbb{E}}_{x_{0},\tilde x_{0}}}[\psi_1(x_1^1)^2] \geq \frac{(N-1)^{\mathsf{T}}}{(N\bar{g})^{2T}} Z^2 > 0,\end{aligned}$$ where $Z > 0$ is the normalizing constant of the model, $Z=\int m_0(dx_0) \prod_{t=1}^{\mathsf{T}}g_t(x_t) f(dx_t|x_{t-1})$. This concludes the proof of Lemma \[lemma:meetingprobability\].
For any fixed $T$, the bound goes to zero when $N\to \infty$. The proof fails to capture accurately the behaviour of $\varepsilon$ in Lemma \[lemma:meetingprobability\] as a function of $N$ and $T$. Indeed, we observe in the numerical experiments of Section \[sec:numerics\] that meeting times decrease when $N$ increases.
Proof of Theorem \[thm:finitevariance\] \[sec:proof:unbiased\]
==============================================================
The proof is similar to those presented in @rhee:phd, in @McLeish:2011, @vihola2015unbiased, and @glynn2014exact. We can first upper-bound $\mathbb{P}\left(\tau>n\right)$, for all $n\geq2$, using Lemma \[lemma:meetingprobability\] [e.g. @williams1991probability exercise E.10.5]. We obtain for all $n\geq2$, $$\mathbb{P}\left(\tau>n\right)\leq\left(1-\varepsilon\right)^{n-1}.\label{eq:meetingtime:survival2}$$ This ensures that $\mathbb{E}[\tau]$ is finite; and that $\tau$ is almost surely finite. We then introduce the random variables $Z_{m}=\sum_{n=0}^{m} \Delta^{(n)}$ for all $m\geq 1$. Since $\tau$ is almost surely finite, and since $\Delta^{(n)} = 0$ for all $n \geq \tau$, then $Z_m\to Z_\tau = H_0$ almost surely when $m\to\infty$. We prove that $(Z_m)_{m\geq 1}$ is a Cauchy sequence in $L_2$, i.e. $\sup_{m'\geq m} \mathbb{E}\left[ (Z_{m'} - Z_m)^2 \right]$ goes to $0$ as $m\to\infty$. We write $$\begin{aligned}
\label{eq:zcauchy}
\mathbb{E}[(Z_{m'} - Z_m)^2] &= \sum_{n = m + 1}^{m'}\sum_{\ell = m + 1}^{m'} \mathbb{E}[\Delta^{(n)}\Delta^{(\ell)}].\end{aligned}$$ We use Cauchy-Schwarz inequality to write $(\mathbb{E}[\Delta^{(n)}\Delta^{(\ell)}])^2 \leq \mathbb{E}[(\Delta^{(n)})^2]\mathbb{E}[(\Delta^{(\ell)})^2]$, and we note that $(\Delta^{(n)})^2= \Delta^{(n)}\mathds{1}(\tau>n)$. Together with Hölder’s inequality with $p=1+\delta/2$, and $q=(2+\delta)/\delta$, where $\delta$ is as in Assumption \[assumption:mixing\], we can write $$\begin{aligned}
\mathbb{E}\left[(\Delta^{(n)})^{2}\right] & \leq\mathbb{E}\left[(\Delta^{(n)})^{2+\delta}\right]^{1/(1+\delta/2)}\left(\left(1-\varepsilon\right)^{\delta/(2+\delta)}\right)^{n-1}.\end{aligned}$$ Furthermore, using Assumption \[assumption:mixing\] and Minkowski’s inequality, we obtain the bound $$\begin{aligned}
\forall n\geq n_0, \qquad & \mathbb{E}\left[(\Delta^{(n)})^{2+\delta}\right]^{1/(1+\delta/2)}\leq C_{1},\end{aligned}$$ where $C_1$ is independent of $n$. The above inequalities lead to the terms $\mathbb{E}[\Delta^{(n)}\Delta^{(\ell)}]$ being upper bounded by an expression of the form $C_1 \eta^n \eta^\ell$, where $\eta \in (0,1)$. Thus we can compute a bound on Eq. , by computing geometric series, and finally conclude that $(Z_m)_{m \geq 1}$ is a Cauchy sequence in $L_2$.
By uniqueness of the limit, since $(Z_m)_{m \geq 1}$ goes almost surely to $H_0$, $(Z_m)_{m \geq 1}$ goes to $H_0$ in $L_2$. This shows that $H_0$ has finite first two moments. We can retrieve the expectation of $H_0$ by $$\mathbb{E}Z_{m}=\sum_{n=0}^{m}\mathbb{E}[\Delta^{(n)}]=\mathbb{E}\left[h(X^{(m)})\right] \xrightarrow[m\to \infty]{} \pi(h),$$ according to Assumption \[assumption:mixing\]. This concludes the proof of Theorem \[thm:finitevariance\] for $H_k$ with $k=0$, and a similar reasoning applies for any $k\geq 0$.
[^1]: The authors gratefully acknowledge the Swedish Foundation for Strategic Research (SSF) via the projects *Probabilistic Modeling and Inference for Machine Learning* (contract number: ICA16-0015) and ASSEMBLE (contract number: RIT15-0012), the Swedish Research Council (VR) via the projects *Learning of Large-Scale Probabilistic Dynamical Models* (contract number: 2016-04278) and *NewLEADS - New Directions in Learning Dynamical Systems* (contract number: 621-2016-06079), and the National Science Foundation through grant DMS-1712872.
|
|
The establishment of a radioactive waste disposal facility in Western Australia for low level waste.
The Radiation Health Section of the Health Department of Western Australia has been a repository for unwanted radioactive sources for many years. They had been placed in the radioactive store located on the Queen Elizabeth II Medical Centre Campus. After a collection period of more than 20 years the storage facilities of the Radiation Health Section were nearing capacity. A decision was made to relocate these sources into a permanent near surface burial facility. Following extensive community consultation and site investigations, waste originating in Western Australia was disposed of at Mt Walton (East), 80 km North East of Koolyanobbing, Western Australia in November 1992.
|
|
Mucosal involvement is a risk factor for poor clinical outcomes and relapse in patients with pemphigus treated with rituximab.
Many studies have reported the outcome of rituximab use in pemphigus but studies regarding the clinical risk factors for poor clinical outcomes or relapse are lacking. To clarify the risk factors for poor clinical outcomes or relapse in patients with pemphigus treated with rituximab, a retrospective chart analysis was performed on patients with pemphigus who were treated with rituximab in the dermatology clinic of Seoul National University Hospital. Forty patients with pemphigus were treated with rituximab, of which 39 (97.5%) experienced remission and 19 (48.7%) experienced relapse. Patients with mucosal lesions demonstrated poor clinical outcomes. The risk for relapse was 4.626 (confidence interval: 1.126-19.001, p = .034) times higher in patients with mucosal lesions than in those without lesions. In patients with pemphigus treated with rituximab, the presence of mucosal lesions resulted in poor clinical outcomes and frequent recurrence.
|
|
If you or your colleagues still "dump" static data from line-of-business systems into a tool like Excel to manipulate, analyze, or present it; or if you have colleagues who re-key data from Office tools like Word and Excel into line-of-business systems for processing, read on:
|
|
I'm pretty sure I'm gonna get a tattoo on my ass shaped like Rainbow Dashes cutie mark and than I'm gonna cut out every piece of clothing I have into that sign so I can walk down the street with a cutie mark.
I've had an idea. Coats matching the mane 6 and some extra ponies with the inside color the hair and the outside the ponies' coat color, with their cutie mark in the bottom left or right side on the back. With Rainbow Dash's, the elastic fabric that's on the end of the sleeves and on the waist of the coat can be rainbow colored, with the inside color matching her eyes. Oh and the zipper handle being something they enjoy, for example, Pinkie Pie's would be a cupcake, Twilight's a book, and Applejack's an apple. Sound good?
|
|
This application claims the benefit of Korean Application No. 98-54151, filed Dec. 10, 1998, in the Korean Patent Office, the disclosure of which is incorporated herein by reference.
1. Field of the Invention
The present invention relates to a fluid jetting apparatus and a process for manufacturing the same, and more particularly, to a fluid jetting apparatus for a print head which is employed in output apparatuses such as an ink-jet printer, a facsimile machine, etc. to jet fluid through a nozzle, and a manufacturing process thereof.
2. Description of the Related Art
A print head is a part or a set of parts which are capable of converting output data into a visible form on a predetermined medium using a type of printer. Generally, such a print head for an ink jet printer, and the like, uses a fluid jetting apparatus which is capable of jetting the predetermined amount of fluid through a nozzle to an exterior of a fluid chamber holding the fluid by applying a physical force to the fluid chamber.
According to methods for applying physical force to the fluid within the fluid chamber, the fluid jetting apparatus is roughly grouped into a piezoelectric system and a thermal system. The piezoelectric system pushes out the ink within the fluid chamber through a nozzle through an operation of a piezoelectric element which is mechanically expanded in accordance with a driving signal. The thermal system pushes the fluid through the nozzle by means of bubbles which are produced from the fluid within the fluid chamber by the heat generated by an exothermic body. Recently, also, a thermal compression system has been developed, which is an improved form of the thermal system. The thermal compression system is for jetting out the fluid by driving a membrane by instantly heating a vaporizing fluid which acts as a working fluid.
FIG. 1 is a vertical sectional view of a fluid jetting apparatus according to a conventional thermal compression system. The fluid jetting apparatus of the thermal compression system includes a heat driving part 10, a membrane 20, and a nozzle part 30.
A substrate 11 of the heat driving part 10 supports the heat driving part 10 and the whole structure that will be constructed later. An insulated layer 12 is diffused on the substrate 11. An electrode 14 is made of a conductive material for supplying an electric power to the heat driving part 10. An exothermic body 13 is made of a resistive material having a predetermined resistance for expanding a working fluid by converting electrical energy into heat energy. Working fluid chambers 16 and 17 contain the working fluid, to maintain a pressure of the working fluid which is heat expanded, are connected by a working fluid introducing passage 18, and are formed within a working fluid barrier 15.
Further, the membrane 20 is a thin layer which is adhered to an upper portion of the working fluid barrier layer 15 and working; fluid chambers 16 and 17 to be moved upward and downward by the pressure of the expanded working fluid. The membrane 20 includes a polyimide coated layer 21 and a polyimide adhered layer 22.
Jetting fluid chambers 37 and 38 are chambers which are formed to enclose the jetting fluid. When the pressure is transmitted to the jetting fluid through the membrane 20, the jetting fluid is jetted only through a nozzle 35 formed in a nozzle plate 34. Here, the jetting fluid is the fluid which is pushed out of the jetting fluid chambers 37 and 38 in response to the driving of the membrane 20, and is finally jetted to the exterior. A jetting fluid introducing passage 39 connects the jetting fluid chambers 37 and 38. The jetting fluid chambers 37 and 38 and the jetting fluid introducing passage 39 are formed in a jetting fluid barrier layer 36. The nozzle 35 is an orifice through which the jetting fluid held using the membrane 20 and the jetting fluid chambers 37 and 38 is emitted to the exterior. Another substrate 31 (see FIGS. 4A and 4B) of the nozzle part 30 is temporarily employed for constructing the nozzle part 30, and should be removed before the nozzle part 30 is assembled.
FIG. 2 shows a process for manufacturing the fluid jetting apparatus according to a conventional roll method.
As shown in FIG. 2, the nozzle plate 34 is transferred from a feeding reel 51 to a take-up reel 52. In the process of transferring the nozzle plate 34 from the feeding reel 51 to the take-up reel 52, a nozzle is formed in the nozzle plate 34 by laser processing equipment 53. After the nozzle is formed, air is jetted from an air blower 54 so as to eliminate extraneous substances attached to the nozzle plate 34. Next, an actuator chip 40, which is laminated on a substrate to the jetting fluid barrier, is bonded with the nozzle plate 34 by a tab bonder 55, and accordingly, the fluid jetting apparatus is completed. The completed fluid jetting apparatuses are wound around the take-up reel 52 to be preserved, and then sectioned in pieces in the manufacturing process for the print head. Accordingly, each piece of the fluid jetting apparatuses is supplied into the manufacturing line of a printer.
The process for manufacturing the, fluid jetting apparatus according to the conventional thermal compression system will be described below with reference to the construction of the fluid jetting apparatus shown in FIG. 1.
FIGS. 3A and 3B are views for showing a process for manufacturing the heat driving part and FIG. 3C is a view for showing a process for manufacturing the membrane on the heat driving part of the conventional fluid jetting apparatus. FIGS. 4A to 4C are views for showing the process for manufacturing the nozzle part.
In order to manufacture the conventional fluid jetting apparatus, the heat driving part 10 and the nozzle part 30 should be manufactured separately. Here, the heat driving part 10 is completed as the separately-made membrane 20 is adhered to the working fluid barrier layer 15 of the heat driving part 10. After that, by reversing and adhering the separately-made nozzle part 30 to the membrane 20, the fluid jetting apparatus is completed.
FIG. 3A shows a process for diffusing the insulated layer 12 on the substrate 11 of the heat driving part 10, and for forming an exothermic body 13 and an electrode 14 on the insulated layer 12 in turn. Referring to FIG. 3B, working fluid chambers 16 and 17 and a working fluid passage 18 are formed by performing an etching process of the working fluid barrier layer 15 through a predetermined mask patterning. More specifically, the heat driving part 10 is formed as the insulated layer 12, the exothermic body 13, the electrode 14, and the working fluid barrier layer 15 are sequentially laminated on the substrate 11 (which is a silicon substrate). In such a situation, the working fluid chambers 16 and 17 (which are filled with the working fluid to be expanded by heat, are formed on an etched portion of the working fluid barrier layer 15. The working fluid is introduced through the working fluid introducing passage 18.
FIG. 3C shows a process for adhering the separately-made membrane 20 to the upper portion of the completed heat driving part 10. The membrane 20 is a thin diaphragm, which is to be driven toward the jetting fluid chamber 37 (see FIG. 1) by the working fluid which is heated by the exothermic body 13.
FIG. 4A shows a process for manufacturing a nozzle 35 using the laser processing equipment 53 (shown in FIG. 2) after an insulated layer 32 and the nozzle plate 34 are sequentially formed on a substrate 31 of the nozzle part 30. FIG. 4B shows a process for forming the jetting fluid barrier layer 36 on the upper portion of the construction shown in FIG. 4A, and jetting fluid chambers 37 and 38 and the fluid introducing passage by an etching process through a predetermined mask patterning. FIG. 4C shows a process for exclusively separating the nozzle part 10 from the substrate 31 of the nozzle part 30. The nozzle part 30 includes the jetting fluid barrier layer 36 and the nozzle plate 34. On the etched portion of the jetting fluid barrier layer 36, the jetting fluid chambers 37 and 38 filled with the fluid to be jetted are formed. The jetting fluid such as an ink, or the like, is introduced through the jetting fluid introducing passage 39 (see FIG. 1) for introduction of the jetting fluid. The nozzle 35 is formed on the nozzle plate 34 to be interconnected with the jetting fluid chamber 37, so that the fluid is jetted through the nozzle 35. The nozzle part 30 is manufactured by the processes that are shown in FIGS. 4A to 4C. First, the nozzle plate 34 inclusive of the nozzle 35, is formed on the substrate 31 having the insulated layer 32 through an electroplating process. Next, the jetting fluid barrier layer 36 is laminated thereon, and the jetting fluid chambers 37 and 38 and the jetting fluid introducing passage 39 are formed through a lithographic process. Finally, as the insulated layer 32 and the substrate 31 are removed, the nozzle part 30 is completed. The completed nozzle part 30 is reversed, and then adhered to the membrane 20 of a membrane, heat driving part assembly which has been assembled beforehand. More specifically, the jetting fluid barrier 36 of the nozzle part 30 is adhered to the polyimide coated layer 21 of the membrane 20.
The operation of the fluid jetting apparatus according to the thermal compression system will be described below with reference to the construction shown in FIG. 1.
First, an electric power is supplied through the electrode 14, and an electric current flows through the exothermic body 13 connected to the electrode 14. Since the exothermic body 13 generates heat due to its resistance, the fluid within the working fluid chamber 16 is subjected to a resistance heating, and the fluid starts to vaporize when the temperature thereof exceeds a predetermined temperature. As the amount of the vaporized fluid increases, the vapor pressure accordingly increases. As a result, the membrane 20 is driven upward. More specifically, as the working fluid undergoes a thermal expansion, the membrane 20 is pushed upward in a direction indicated by the arrow in FIG. 1. As the membrane 20 is pushed upward, the fluid within the jetting fluid chamber 37 is jetted out toward an exterior through the nozzle 35.
Then, when the supply of electric power is stopped, the resistance heating of the exothermic body 13 is no longer generated. Accordingly, the fluid within the working fluid chamber 16 is cooled to a liquid state, so that the volume thereof decreases and the membrane 20 recovers its original shape.
Meanwhile, a conventional material of the nozzle plate 34 is mainly made of nickel, but the trend in using the material of a polyimide synthetic resin has increased recently. When the nozzle plate 34 is made of the polyimide synthetic resin, it is fed in a reel type. The fluid jetting apparatus is completed by the way a chip laminated from the silicon substrate to the jetting fluid barrier layer 36 is bonded on the nozzle plate 34 fed in the reel type.
According to the conventional fluid jetting apparatus and its manufacturing process, however, since the heat driving part, the membrane, and the nozzle part have to be separately made before such are adhered to each other by three adhering processes, the productivity has been decreased. Further; since the adhesion between the heat driving part and the membrane, and between the membrane and, the nozzle part are often unreliable, the working fluid and the jetting fluid often leak, so that a fraction defective has been increased, and the reliability and quality of the fluid jetting apparatus has been deteriorated.
The present invention has been made to overcome the above-described problems of the prior art, and accordingly it is an object of the present invention to provide a fluid jetting apparatus and a manufacturing process thereof capable of improving the reliability, quality and the productivity of the fluid jetting apparatus by sequentially laminating a heat driving part, a membrane, and a nozzle part to form the fluid jetting apparatus, instead of adhering the same to each other.
Additional objects and advantages of the invention will be set forth in part in the description which follows and, in part, will be obvious from the description, or may be learned by practice of the invention.
The above and other objects are accomplished by a method of manufacturing a fluid jetting apparatus according to the present invention, including: (1) forming a heat driving part having a sacrificial layer; (2) forming a membrane on the heat driving part which includes the sacrificial layer; (3) forming a nozzle part on the membrane; and (4) removing the sacrificial layer.
The step (1) includes: (i) forming an electrode and an exothermic body on a substrate; (ii) laminating a working fluid barrier on the electrode and the exothermic body, and forming a working fluid chamber in the working fluid barrier; (iii) forming a protective layer on the working fluid barrier, the electrode, and the exothermic body; (iv) forming a sacrificial layer on the protective layer and within the working fluid chamber at the same height as the working fluid barrier.
Further, the step (1) may otherwise include: (i) forming an electrode and an exothermic body on a substrate; (ii) forming a plane layer on the substrate at the same height as the electrode and the exothermic body combined; (iii) laminating a protective layer on the electrode and the plane layer; (iv) laminating the working fluid barrier on the protective layer, and forming a working fluid chamber in the working fluid barrier; and (v) forming the sacrificial layer on the protective layer and within an interior of the working fluid chamber at the same height as the working fluid barrier.
The step (2) is performed through a spin coating process.
The step (3) includes: (i) laminating a jetting fluid barrier on the membrane, and forming a jetting fluid chamber in the jetting fluid barrier; and (ii) laminating a nozzle plate on the jetting fluid barrier, and forming a nozzle in the nozzle plate. The nozzle plate is laminated through a process for laminating a dry film.
The above and other objects of the present invention may further be achieved by providing a fluid jetting apparatus including a heat driving part which generates a driving force, a nozzle part having a jetting fluid chamber interconnected to an exterior of the fluid jetting apparatus through a nozzle, and a membrane which transmits the driving force generated from the heat driving part to the nozzle part, wherein the heat driving part comprises: an electrode and an exothermic body formed on a substrate; a plane layer formed on the substrate at the same height as the electrode and the exothermic body combined; a protective layer laminated on the plane layer; and a working fluid barrier laminated on the protective layer, and provided with the working fluid chamber for holding a working fluid which is expanded by the exothermic body to generate the driving force.
|
|
Event Description
Professor Bill Lee, Former Director of the Centre for Nuclear Engineering, Imperial College London
Speakers include:
Dr Dan Poulter MP
Tim Yeo, Chairman, New Nuclear Watch Europe (NNWE)
Nick Butler, Energy Commentator, Financial Times
Peter Atherton, Associate, Cornwall Energy
New Nuclear Watch Europe (NNWE) invites you to attend our upcoming Parliamentary Briefing on The future of nuclear energy in Europe following Brexit, due to be held in the House of Commons, London, on the 14 March 2017 at 16.00.
The event will focus on the opportunities and challenges facing the nuclear energy sector across Europe following Brexit. With the UK moving forward with a pipeline of new nuclear build projects, most recently with the CGN-EDF Hualong 1 application for GDA approval, this event will bring together leading policymakers, industry, academics and commentators to discuss how Europe can continue to be a global leader in nuclear energy development.
NNWE intends to promote discussion on the need for a Pan-European, or EU+, policy framework when discussing new nuclear build. With Brexit likely to occur in 2019, and the recent announcement that the UK will be pulling out of the Euratom Treaty, NNWE envisages the development of an Organisation for Nuclear Cooperation and Development in Europe, to continue and further enhance nuclear cooperation.
The latest EU PINC report highlights that 105GWe of new nuclear generation will be needed by 2050 – roughly 100 new reactors – to meet existing demand and climate change targets. However, only eighteen nuclear power plants are in development, planned, or proposed within the European Union itself. Whereas ninety-five reactors are planned throughout our EU neighbours – including Belarus, Russia, Switzerland, Turkey, Ukraine and now the UK. NNWE believes an organisation is needed to drive the future of nuclear energy development across Europe and help us reach the ambitious 2050 target.
Agenda (subject to change)
Time
Description
16.00
Registration and light refreshments
16.15
Introduction
Dr Dan Poulter MP
Professor Bill Lee, Former Director of the Centre for Nuclear Engineering, Imperial College London
NNWE was established at the end of 2014 under the chairmanship of Tim Yeo (former UK Member of Parliament and Chair of the House of Commons Energy and Climate Change Select Committee) and is an interest group which aims to ensure nuclear power is recognised as an important and desirable way for European governments to meet the long-term security needs of their countries.
|
|
Tetsuya Nakashima
Tetsuya Nakashima (中島哲也) (born 1959) is a Japanese film director. He was born in Fukuoka, attending high school in Chikushino. Nakashima was given the Best Director award at the 2005 Yokohama Film Festival for his film Kamikaze Girls.
His 2010 film Confessions was selected as the Japanese entry for the Best Foreign Language Film at the 83rd Academy Awards and made the final shortlist in January 2011.
He was originally slated to direct an adaptation of the hit manga Attack on Titan, but in December 2012 he left the project due to differences with the rest of the production team.
Filmography
Bakayaro! I'm Plenty Mad (1988) (segment 2)
Happy-Go-Lucky (1997)
Beautiful Sunday (1998)
Kamikaze Girls (2004)
Rolling Bomber Special (2005)
Memories of Matsuko (2006)
Paco and the Magical Picture Book (2008)
Confessions (2010)
The World of Kanako (2014)
It Comes (2018)
References
External links
Category:1959 births
Category:Living people
Category:Japanese film directors
Category:People from Fukuoka Prefecture
|
|
1. Field of the Invention
The invention relates generally to a device that attaches to a telephone for the purpose of lifting up the receiver end of a telephone handset (hook-switching).
2. Description of the Prior Art
Many of the newest telephone systems that are coming out on the market have what is called electronic hook-switching. This is basically a button, that when pressed, will give a dial tone for a telephone headset. This is a very convenient option for people who use telephone headsets, but the problem still remains that there are literally millions of telephones on the market that do not have this option.
Until now, the only option that people have had to alleviate this problem is to physically pick up the handset every time the telephone rings, and place the headset off to the side of the telephone base. This procedure is time and space consuming.
Another method that is commonly used when getting a dial tone, is to balance the telephone handset just up and to the side of the telephone""s hook-switch. The major problem with this solution is that if accidently bumped or moved, the handset will fall back into place and one will hang up the line.
The present invention overcomes the prior art practices by providing a mechanical handset lift for lifting the receiver end of a telephone handset off the hook-switch and pivoting the handset about the microphone end, but leaving the handset centrally positioned over and about the telephone body.
The general purpose of the present invention is to provide a mechanical device for lifting the receiver end of a telephone handset off the telephone hook-switch to allow electrical operation of a remote handset receiver/mouthpiece while still leaving the handset placed over and about the telephone base unit.
According to one object of the present invention, there is provided a vertically oriented base for mounting to the side of a telephone base. A moveable pivot shaft extends through an upper region of the vertically oriented base end, which includes a lift rod secured to one end of the pivot shaft and a lift rod lever handle secured to the opposite end of the pivot shaft. A stop shaft limits the over center travel of the lift rod lever handle and the lift rod to allow on hook or off hook positioning of a telephone handset receiver.
According to an alternate embodiment of the present invention, there is provided a vertical base member with a lift rod and lift lever secured about the base member in positive locked alignment and also having rotational stops aligned on a surface of the vertical base member.
One significant aspect and feature of the present invention is mechanical handset lift that will mechanically lift up the receiver end of a telephone handset off the hook-switch so that a dial tone may be obtained for the telephone headset in use.
Another significant aspect and feature of the present invention is a mechanical handset lift which will lift the receiver end of a telephone handset off the hook-switch so as to allow a user to use either the telephone handset or a telephone headset.
A further significant aspect and feature of the present invention is a mechanical handset lift which will lift the receiver end of a telephone handset off the hook-switch and which will result in the environment on a person""s desk being less cluttered due to the absence of a telephone handset lying off to the side of the telephone base while the telephone handset is in use.
Yet another significant aspect and feature of the present invention is a mechanical handset lift that will mechanically lift up the receiver end of a telephone handset in such a manner that will greatly increase the chances of not accidentally hanging up the telephone while a telephone headset is in use.
Another significant aspect and feature of the present invention is a lift rod and lift rod handle in positive angular engagement with each other about a base unit.
Another significant aspect and feature of the present invention is stops which define rotational movement of the lift rod and lift rod handle with respect to the base of a telephone.
Having thus described the embodiments of the present invention, it is the principal object hereof to provide a mechanical handset lift.
The present invention relates to a mechanical handset lift device that will enable the telephone user to enable and disable the telephone""s hook-switch capabilities without the inconvenience of picking up the telephone and placing it on the desk. Currently, the only means to do this is by placing the telephone handset on and off the hook-switch. The problems that arrive from this method are 1) one has to physically pick up the handset every time the telephone rings, 2) one has to lay the handset on the desk (for many people this takes up just too much room), 3) if the telephone allows one to balance the handset off to the right side of the hook switch, one may bump the telephone, and accidentally hang up.
The invention uses the handset""s own mold to accomplish the goal of hook-switching, and allows the handset to be used as well. The present invention also creates an environment where it is virtually impossible to accidently hand up the telephone. This is a very common problem when the telephone is balanced to the side of the hook-switch.
It is an object of the present invention to provide a device that will enable a telephone handset operator to use both the telephone handset or headset conveniently, without the problems that are currently plaguing the telephone headset industry.
|
|
Seven rare rhinos spotted in Indonesian jungle
August 9, 2012 in Biology / Ecology
In this undated photo released by Leuser International Foundation, a Sumatran rhino roams at Gunung Leuser National Park in Aceh province, Indonesia. A conservationist from the foundation said Thursday, Aug. 9, 2012 that seven of the world's rarest rhinoceroses were photographed at the national park. It is the first sighting there in 26 years. (AP Photo/Leuser International Foundation) NO SALES
Seven Sumatran rhinos have been captured on hidden cameras in an Indonesian national park where the critically endangered species was feared extinct, a conservationist said Thursday.
The Sumatran rhino had not been sighted in the Mount Leuser National Park on the northern tip of Sumatra for 26 years, the project's team leader Tarmizi of the Leuser International Foundation said.
"This discovery can allay doubts over the rhino's presence in the park," Tarmizi told AFP, adding he hoped the discovery would encourage more efforts to conserve the species.
Images of the rhinos were captured by 28 infrared cameras set up between June 2011 and April this year and confirmed six female and one male rhino appearing in 1,000 photo frames.
The Sumatran rhino population has dropped 50 percent over the past 20 years, and there are now believed to be fewer than 200 left in the world.
The rhinos are commonly targeted by poachers and rampant illegal logging has destroyed much of their habitat.
|
|
Notice # 01-184
May 31, 2001
TO:
All NYMEX Division Members and Member Firms
FROM:
Neal L. Wolkoff, Executive Vice President
RE:
Reminder on Use of Money Market Funds as Original Margin Deposits on the NYMEX Division
DATE:
May 31, 2001
===========================================================
This Notice is a reminder regarding certain rule changes and related policy guidelines that will go into effect on June 1, 2001 for the NYMEX Division. The rule changes going into effect on June 1, 2001 on the NYMEX Division allow shares of certain money market mutual funds to be acceptable for purposes of original margin deposits.
Corresponding rule changes for the COMEX Division have also been approved by the NYMEX Board of Directors and filed with the CFTC; the changes for the COMEX Division will be implemented at a later date.
Rule Amendments
The amendments generally require that in order to be used for such purpose, a money market fund must be approved by the NYMEX Board and also must comply with CFTC Regulation ? 1.25. For purposes of original margin, the Exchange's Clearing House will value such money market fund shares at 95% of their market value. In addition, a Clearing Member's participation in any approved fund or any group of approved funds offered by the same issuer shall be limited to the greater of $250,000 or 25% of the Clearing Member's total original margin obligations. Finally, no more than 25% of the total assets of an approved money market mutual fund may be used to meet original margin obligations at the Exchange.
Exchange Policy on Money Market Funds
The NYMEX Board of Directors also recently adopted three additional guidelines that will be applicable to such funds. First, until further notice from the Exchange, the Board has determined to limit the number of money market funds available for this purpose to ten. Second, the Board will require that henceforth each fund applying for such status must have a minimum value of $5 billion. Finally, each fund further must provide for same day payment if notification is made by 3:00 p.m. on that day.
If you have any questions concerning this change, please contact Bernard Purta, Senior Vice President, Regulatory Affairs and Operations, at (212) 299- 2380; Arthur McCoy, Vice President, Financial Surveillance Section, NYMEX Compliance Department, at (212) 299-2928; or Joseph Sanguedolce, Director, Financial Surveillance Section, NYMEX Compliance Department, at (212) 299-2855.
AMENDMENTS TO NYMEX RULE 9.05 ("MARGINS")
(Asterisks indicate additions; brackets indicate deletions.)
Rule 9.05. MARGINS
* * * *
(E) Clearing Members may meet original margin calls by depositing:
*(4) Shares in a money market mutual fund that complies with CFTC Regulation ?1.25 and that has been approved by the Board, subject to the following conditions:
(i) for purposes of original margin, such shares will be valued at 95% of market value;
(ii) a Clearing Member's participation in any approved fund or any group of approved funds offered by the same issuer shall be limited to the greater of $250,000 or 25% of the Clearing Member's total original margin obligations;
(iii) no more than 25% of the total assets of an approved money market mutual fund may be used to meet original margin obligations at the Exchange.*
[Shares of Brown Brothers Harriman & Co. Common Settlement Fund, valued at 95% of market value.]
__________________________________________________
Please click on the link below to indicate you have received this
email.
"http://208.206.41.61/email/[email protected]&refdoc=(01-184)"
Note: If you click on the above line and nothing happens, please copy
the text between the quotes, open your internet browser,
paste it into the web site address and press Return.
|
|
Management of ventricular tachycardia in the ablation era: results of the European Heart Rhythm Association Survey.
Patients with sustained ventricular tachycardia (VT) are at risk of sudden death. Treatment options for VT include antiarrhythmic drug therapy, insertion of an implantable cardioverter-defibrillator, and catheter ablation. Evidence on indications for VT ablation, timing, ablation strategies, and periprocedural management is sparse. The aim of this European Heart Rhythm Association (EHRA) survey was to evaluate clinical practice regarding management of VT among the European countries. An electronic questionnaire was sent to members of the EHRA Electrophysiology Research Network. Responses were received from 88 centres in 12 countries. The results have shown that management of VTs is very heterogeneous across the participating centres. Indications, periprocedural management, and ablation strategies vary substantially. This EP Wire survey has revealed that catheter ablation is the first-line therapy for the treatment of recurrent monomorphic stable VT in patients without structural heart disease as well as in patients with ischaemic cardiomyopathy and impaired left ventricular ejection fraction in the majority of centres. Furthermore, in patients with ischaemic cardiomyopathy and the first episode of monomorphic VT, most centres (62.0%) performed catheter ablation. On the contrary, in patients with non-ischaemic cardiomyopathy, amiodarone (41.4%) and catheter ablation (37.1%) are used in a very similar proportion. Ablation strategies, endpoints, and post-ablation antithrombotic management vary substantially among European centres.
|
|
Is there a ProductHunt without the “selection process”? - hoodoof
i.e. a site that actually shows what's new, not just what ProductHunt thinks we should see is new?
======
ledil
[http://www.produktfang.de](http://www.produktfang.de)
I'm the author of produktfang. We aggregate new apps and show them on the
front page ... there is no "community" that decides what should be shown or
not like in producthunt.
------
getdavidhiggins
[http://urli.st/](http://urli.st/)
Lots of products can be found on URLIST. It's basically product hunt, except
not sabotaged by trends and a karma system
|
|
Background
==========
Polysaccharide-rich fungi and plants have been employed for centuries by cultures around the world for their dietary and medicinal benefits \[[@B1]-[@B5]\]. Often thought to merely support normal bowel function and blood glucose and lipid levels \[[@B6]-[@B8]\], certain polysaccharides have attracted growing scientific interest for their ability to exert marked effects on immune system function, inflammation and cancers \[[@B9]-[@B11]\]. Many of these chemically and structurally diverse, non- to poorly-digestible polysaccharides have been shown to beneficially affect one or more targeted cellular functions *in vitro*\[[@B11]-[@B16]\], but much of the *in vivo*literature consists of studies in which polysaccharides were injected \[[@B1],[@B2]\]. For clinicians and scientists interested in immunologic effects following dietary intake, the value of such studies is uncertain. Polysaccharides that elicit effects *in vitro*or by injection may be ineffective or have different effects when taken orally \[[@B17]\]. We thus decided to conduct a systematic review to evaluate the specific immunologic effects of dietary polysaccharide products on rodents and human subjects.
Methods
=======
Literature review
-----------------
Studies were identified by conducting electronic searches of PubMed and Google Scholar from their inception to the end of October 2009. The reference lists of the selected articles were checked for additional studies that were not originally found in the search.
Study selection and data extraction
-----------------------------------
The following search terms were combined with the term polysaccharide: dietary AND immune, or oral AND immune, or dietary AND inflammation, or oral AND inflammation. When specific polysaccharides or polysaccharide-rich plants and fungi were identified, further searches were conducted using their names with the same search terms. Studies were selected based on the following inclusion criteria:
1\. Rodent or human studies
2\. The presence of test group and control group (using either placebo, crossover, sham, or normal care)
3\. Studies reporting statistically significant immunomodulatory effects
4\. English language
5\. Studies published up to October 2009.
Two researchers (JER, EDN) reviewed the list of unique articles for studies that fit the inclusion criteria. Uncertainties over study inclusion were discussed between the researchers and resolved through consensus. Searches were then conducted to obtain specific polysaccharide product information: safety (using the search terms: toxicity, NOAEL, LD~50~), composition and structure, and disposition.
Quality assessment
------------------
Each study was assessed as to whether or not it reported a significant outcome measure for the polysaccharide intervention group.
Results
=======
A total of 62 rodent publications (Tables [1](#T1){ref-type="table"}, [2](#T2){ref-type="table"} and [3](#T3){ref-type="table"}) and 15 human publications (Table [4](#T4){ref-type="table"}) were deemed appropriate for inclusion in this review. Available structural and compositional information for these immunomodulatory polysaccharides are provided in Table [5](#T5){ref-type="table"} and safety information is provided in Table [6](#T6){ref-type="table"}. The majority of animal studies explored models in which animals were injected or implanted with cancer cells or tumors, were healthy, or were exposed to carcinogens. Other studies investigated immunodeficient, exercise-stressed, aged animals, or animals exposed to inflammatory agents, viruses, bacterial pathogens, pathogenic protozoa, radiation or mutagens. Human studies assessed immunomodulatory effects in healthy subjects, or patients with cancers, seasonal allergic rhinitis or aphthous stomatitis. Because of the limited number of human studies, we included some promising open-label controlled trials. Human study durations ranged from four days to seven years; daily doses ranging from 100-5,400 mg were reported to be well-tolerated.
######
Immunomodulatory Glucan Extracts: Oral Animal Studies
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Source Extract Animal Dose/day Duration of study Treatment Effects Reference
------------------------------------- ---------------------------------- --------------------------------------------------------------------------- ---------------------------------------------------------------------- ------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------
*Agaricus*\ α-1,6 and\ 8-week ♀ C3H/He mice (5/group) 100 mg/kg IG every 3 days 1 month Healthy animals ↑ \#s splenic T lymphocytes (Thy1.2, CD4+ and CD8+) \[[@B24]\]
(*A. blazei*) *subrufescens* α-1,4 glucans
Aqueous 7-9-week ♂ Balb/cByJ mice (40/group) 1 ml 0.45N, 0.6N, or 3N aqueous extract 2 months All doses ↑ serum IgG levels, CD3+ T cell populations and PML phagocytic activity \[[@B22]\]
7-9-week male Balb/cByJ mice (40/group) 1 ml 0.45N, 0.6N, or 3N aqueous extract 10 weeks IP injection of OVA at 4 weeks 0.6N and 3N ↑ levels of OVA-specific serum IgG 28 days post-immunization; all doses ↑ delayed-type hypersensitivity and TNF-α secreted from splenocytes at 10 weeks; 0.6N ↑ splenocyte proliferation at 10 weeks
5-6 -week ♀ BALB/cHsdOla mice (8/group × 2) One 200 μl extract day 1, orogastric intubation 1 week Injected IP fecal solution day 2 ↓ CFU in blood of mice with severe peritonitis & improved overall survival rate in all peritonitis groups \[[@B46]\]
6-week BALB/c nu/nu mice (7/group) 2.5 mg extract days 20-41, drinking water 41 days Injected SC Sp-2 myeloma cells day 1 ↓ tumor size & weight after 21 days treatment \[[@B65]\]
Aqueous, acid treated 6-week ♀ C57BL/6 mice (10/group) 20, 100 or 500 μg/ml, drinking water 9 days Injected IP human ovarian cancer cells day 1 500 μg/ml ↓ tumor weight \[[@B66]\]
20, 100 or 500 μg/ml, drinking water 3 weeks Injected IV murine lung cancer (3LL) cells 100 & 500 μg/ml ↓ \#s metastatic tumors
Aqueous, with 200 ng/day\ 6-week ♀ BALB/c mice (10/group) 200 ng days 5-21 3 weeks Injected Meth A tumor cells day 1 ↓ tumor size & weight \[[@B23]\]
β-glucan
2 weeks Injected Meth A tumor cells ↑ cytotoxic T lymphocyte activity & spleen cell IFN-α protein
300 mg 5 days Healthy animals ↑ splenic NK cell activity
*Avena*spp. β-glucans (particulate) 6-7 -week ♀ C57BL/6 mice (7/group) 3 mg every 48 h, days 1-3 1 month Oral *E. vermiformis*oocytes day 10 ↓ *E. vermiformis*fecal oocyte \#s; increased intestinal anti-merozoite IgA; ↓ \# of IL-4-secreting MLN cells \[[@B42]\]
3 mg on alternating days, days 1-10 22 days Injected IP *Eimeria vermiformis*day 10 ↓ *E. vermiformis*fecal oocyte \#s; ↑ anti-merozoite intestinal IgA \[[@B43]\]
β-glucans (soluble) 4-week ♂ CD-1 mice (24/group) 0.6 mg/ml 68% β-glucan, drinking water 1 month Resting or exercise-stressed (days 8-10) animals administered HSV-1 IN\ ↓ morbidity in resting and exercise-stressed animals; ↓ mortality in exercise-stressed animals; pre-infection, ↑ Mø anti-viral resistance in resting and exercise-stressed animals \[[@B38]\]
day 10
\~3.5 mg days\ Resting or exercise-stressed (days 5-10) animals administered HSV-1 IN\ Pre-infection, ↑ Mø antiviral resistance in resting animals \[[@B41]\]
1-10, drinking water day 10
4-week ♂ CD-1 mice (10/group) 0.6 mg/ml 68% β-glucan, drinking water 10 days Resting animals or animals exposed to a bout of fatiguing exercise days 8-10 or moderate exercise days 5-10, injected IP with thioglycollate on day 10 ↑ neutrophil mobilization in resting & moderately exercised animals; ↑ neutrophil respiratory burst activity in resting and fatiguing exercised animals \[[@B37]\]
4-week ♂ CD-1 mice (19-30/group) 0.8 mg/ml 50% β-glucan, days\ 1 month Resting or exercise-stressed (days 8-10) animals administered IN clodronate-filled liposomes to deplete Mø days 8 & 14 & infected IN with HSV-1 day 10 ↓ morbidity, mortality, symptom severity in exercise-stressed animals, without Mø depletion \[[@B40]\]
1-10, drinking water
4-week ♂ CD-1 mice (20/group) Resting or exercise-stressed (days 8-10) animals administered HSV-1 IN day 10 ↓ morbidity in exercise-stressed & resting animals; ↓ mortality in exercise-stressed animals \[[@B39]\]
*Ganoderma lucidum* Aqueous 7-week ♂ CD-1 mice (26/group) 5% of diet 5 months Injected IM DMH once a week, weeks 1-10 ↓ aberrant crypt foci per colon, tumor size, cell proliferation, nuclear staining of β-catenin \[[@B69]\]
4-8-week BALB/c mice (10/group) 50, 100 or 200 mg/kg, oral 10 days Injected SD Sarcoma 180 cells ↓ of tumor weight was dose dependent: 27.7, 55.8, 66.7%, respectively \[[@B67]\]
*Ganoderma lucidum*(mycelia) Aqueous 7-week ♂ F344/Du Crj rats (16/group) 1.25% or 2.5% of diet 6 months Injected SC AOM once a week, weeks2-5 Both doses ↓ colonic adenocarcinoma incidence; 2.5% ↓ total tumor incidence; both doses ↓ nuclear staining of β-catenin and cell proliferation \[[@B68]\]
*Ganoderma tsugae* Aqueous 8-week ♀ BALB/cByJNarl mice (14/group) 0.2-0.4% of diet (young fungi); 0.33 or 0.66% of diet (mature fungi) 5 weeks Injected IP OVA days 7, 14, 21; aerosolized OVA twice during week 4 In splenocytes, both doses of both extracts ↑ IL-2 and IL-2/IL-4 ratios, 0.2% young extract and 0.66% mature extract ↓ IL-4; in Mø, 0.66% mature extract ↑ IL-1β, both doses of both extracts ↑ IL-6 \[[@B53]\]
*Grifola frondosa* D fraction Mice: 1) ICR, 2) C3H/HeN, 3) CDF~1~(10/group) 1.5 mg every other day, beginning day 2 13 days Implanted SC: 1) Sarcoma-180, 2) MM-46 carcinoma, or 3) IMC carcinoma cells ↓ tumor weight & tumor growth rate: 1) 58%, 2) 64%, and 3) 75%, respectively \[[@B71]\]
5-week ♂ BALB/c mice (10/group) 2 mg,\ 45 days Injected in the back with 3-MCA, day 1 ↓ (62.5%) \# of animals with tumors; ↑ H~2~0~2~production by plasma Mø; ↑ cytotoxic T cell activity \[[@B72]\]
days 15-30
*Hordeum vulgare* β-1,3;1,4 or β-1,3;1,6-D-glucans Athymic nu/nu mice\ 40 or 400 μg IG for 4 weeks 31 weeks Mice with human xenografts (SKMel28 melanoma, A431 epidermoid carcinoma, BT474 breast carcinoma, Daudi lymphoma, or LAN-1 neuroblastoma) ± mAb (R24, 528, Herceptin, Rituximab, or 3F8, respectively) therapy twice weekly 400 μg + mAb ↓ tumor growth & ↑ survival; higher MW ↓ tumor growth rate for both doses \[[@B75]\]
(4-12/group)
β-1,3;1,4-D-glucans Athymic BALB/c mice 4, 40, or 400 μg for 3-4 weeks 1 month Mice with neuroblastoma (NMB7, LAN-1, or SK-N-ER) xenografts, ± 3F8 mAb therapy twice weekly 40 and 400 μg doses + mAB ↓ tumor growth; 400 μg dose ↑ survival. Serum NK cells required for effects on tumor size \[[@B76]\]
C57BL/6 WT and CR3-deficient mice (10/group) 0.4 mg for 3 weeks 100 days Injected SC RMA-S-MUC1 lymphoma cells day 1 ± IV 14.G2a or anti-MUC1 mAb every 3rd day ±mAB ↓ tumor diameter; ↑ survival \[[@B73]\]
β-glucans ♀ Fox Chase ICR immune-deficient (SCID) mice (9/group) 400 μg days 1-29 50 days Mice with human (Daudi, EBV-BLCL, Hs445, or RPMI6666) lymphoma xenografts, ± Rituximab mAb therapy twice weekly +mAB ↓ tumor growth and ↑ survival \[[@B74]\]
*Laminaria digitata* Laminarin ♂ ICR/HSD mice (3/group) 1 mg 1 day Healthy animals ↑ Mø expression of Dectin-1 in GALT cells; ↑ TLR2 expression in Peyer\'s patch dendritic cells \[[@B29]\]
♂ Wistar rats (7/group) 5% of diet days 1-4, 10% of diet days 5-25 26 days Injected IP *E. coli*LPS day 25 ↓ liver ALT, AST, and LDH enzyme levels; ↑ ED2-positive cells, .↓ peroxidase-positive cells in liver; ↓ serum monocytes, TNF-α, PGE2, NO~2~ \[[@B44]\]
*Lentinula edodes* SME 6-week nude mice 0.1 ml water with10% SME/10 g body weight days 1-19, 33-50 50 days Injected SC prostate cancer (PC-3) cells day 1 ↓ tumor size \[[@B80]\]
β-glucans ♀ 3- and 8-week BALB/c mice (15/group) 50, 100 or 250 μg 1-2 weeks Healthy animals 250 μg dose ↑ spleen cell IL-2 secretion \[[@B27]\]
♀ 3- and 8-week BALB/c mice (15/group) 50, 100 or 250 μg 1-2 weeks Injected murine mammary carcinoma (Ptas64) cells into mammary fat pads 2 weeks before treatment ↓ tumor weight
Lentinan 6-week ♂ Wistar-Imamichi specific-pathogen free rats (10/group) 1 mg twice weekly 1-2 months Healthy animals ↑ T cell \#s, helper-cell \#s & helper/suppressor ratio, ↓ suppressor cell level at 4, but not 8 weeks \[[@B26]\]
5-6-week ♂\ 3 mg, days 1-7 3 weeks Injected SC K36 murine lymphoma cells day 7 ↓ tumor weight; ↑ tumor inhibition rate (94%) \[[@B82]\]
pre-leukemic AKR mice (10/group)
5-6-week athymic mice (10/group) 5 weeks Injected SC colon cancer (LoVo and SW48, SW480 and SW620, or SW403 and SW1116) cells day 7 ↓ tumor weight, ↑ tumor inhibition rate (\>90%)
♂ AKR mice 3 mg 1 day Pre-leukemic mice ↑ serum IFN-α and TNF-α, peak at 4 h and then back to normal at 24 h; ↑ IL-2 and IL-1α, peak at 2 h and back to normal at 24 h; ↑ CD3+ T, CD4+ T, CD8+ T, B lymphocytes \[[@B81]\]
*Phellinus linteus* Aqueous, alcohol-precipitated 6-7-week C57BL/6 mice (10-50/group) 200 mg/kg in drinking water 1 month Healthy animals ↑ production and secretion of IFN-γ by con A stimulated T cells \[[@B32]\]
*Saccharomyces cerevisiae* Scleroglucan ♂ ICR/HSD mice (3/group) 1 mg one day before challenge (day 1) 6 days IV *Staphylococcus aureus*or *Candida albicans*day 2 ↑ long-term survival \[[@B29]\]
β-1,3;1,6 glucans (particulate) 3 and 8-week ♀ BALB/c mice (15/group) 50, 100 or 250 μg 1-2 weeks Injected murine mammary carcinoma (Ptas64) cells into mammary fat pads 2 weeks before treatment ↓ tumor weight \[[@B27]\]
β-1,3-glucan Healthy animals All 3 doses ↑ phagocytic activity of blood monocytes & neutrophils & ↑ spleen cell IL-2 secretion
WT or CCD11b^-/-^C57BL/6 mice (2/group) 0.4 mg for 3 weeks 100 days Injected SC RMA-S-MUC1 lymphoma cells ± 14.G2a or anti-MUC1 mAb IV injection every 3^rd^day ↓ tumor diameter when included with mAb; ↑ survival with and without mAb \[[@B73]\]
C57BL/6mice (4/group) 25 mg 1 week Healthy animals ↑ \# intestinal IELs; ↑ \# TCRαβ+, TCR γδ+, CD8+, CD4+, CD8αα+, CD8αβ+ T cells in IELs; ↑ IFN-γ mRNA expression in IELs and spleen \[[@B28]\]
*Sclerotinia sclerotiorum* SSG 6-8-week specific pathogen-free ♂ CDF~1~mice (3/group) 40 or 80 mg/kg days 1-10 2 weeks Healthy animals 10 mg dose ↑ acid phosphatase activity of peritoneal Mø (day 14) \[[@B30]\]
40, 80 or 160 mg/kg days 2-6 35 days Implanted SC Metha A fibrosarcoma cells day 1 80 mg dose ↓ tumor weight
6-8-week specific pathogen-free ♂ CDF~1~mice (10/group) 40, 80 or 160 mg/kg days 2-11 Injected ID IMC carcinoma cells day 1
6-8-week specific-pathogen free ♂ mice of BDF1 and C57BL/6 mice (7/group) 0.5, 1, 2, or 4 mg days 1-10 2-3 weeks Injected IV Lewis lung carcinoma (3LL) cells 2 mg ↓ \# of 3LL surface lung nodules at 2 weeks \[[@B83]\]
*Sclerotium rofsii* Glucan phosphate ♂ ICR/HSD mice (3/group) 1 mg 1 day Healthy animals ↑ systemic IL-6; ↑ Mø expression of Dectin-1 in GALT cells; ↑ TLR2 expression in dendritic cells from Peyer\'s patches \[[@B29]\]
*Trametes*(*Coriolus*) *versicolor* PSP 6-8-week ♂ BALB/c mice (10/group) 35 μg days 5-29 in drinking water 29 days Implanted SC Sarcoma-180 cells day 1 ↓ tumor growth & vascular density \[[@B94]\]
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
######
Immunomodulatory Non-Glucan Extracts: Oral Animal Studies
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Extract Source Animal Oral dose/day Duration Treatment Significant effects Reference
---------------------------------------------------------------------- ------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------- ---------- ---------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -------------
Fucoidans *Cladosiphon okamuranus Tokida* 8-week ♀ BALB/c mice, 10/group 0.05% w/w of diet 56 days DSS-induced UC ↓ disease activity index and myeloperoxidase activity; ↓ \# of B220-positive colonic B cells; ↓ colonic MLN IFN-γ and IL-6 and ↑ IL-10 and TGF-β; ↓ colonic IgG; ↓ colonic epithelial cell IL-6, TNF-α, and TLR4 mRNA expression \[[@B49]\]
*Undaria pinnatifida* 5-week ♀ BALB/c mice (10-12/group) 5 mg, days 1-14 or 7-14 2 weeks Injected HSV into cornea day 7 ↓ facial herpetic lesions; ↑ survival, particularly in pre-treated animals \[[@B45]\]
10 mg 1 week Administered\ ↑ plasma NK cell activity
5-fluorouracil
Injected SC HSV ↑ cytotoxic splenic T lymphocyte activity
0.1 or 0.5 mg 3 weeks Injected IP HSV Both doses ↑ serum neutralizing Ab titers, weeks 2 and 3
6-week ♂ ddY mice (5/group) 50, 100, 200 400 or\ 3 weeks Injected with Ehrlich carcinoma in back day 14 200-500 mg/kg ↓ tumor growth \[[@B116]\]
500 mg/kg\
days 1-28
6-week ♂ BALB/c mice (8/group) 40 mg/kg alternating days\ 19 days Injected IP Meth A fibrosarcoma day 1 ↓ tumor growth
7-19
Furanose (COLD-FX^®^) *Panax quinquefolium* Weanling ♂ SD rats (10/group) 450 or\ 1 week Healthy animals Both doses ↑ spleen Il-2 and IFN-γ production following ConA or LPS stimulation; ↓ proportion of total MLN and Peyer\'s patch CD3+ cells & activated T cells; high dose ↑ spleen cell IL-1β production following 48 h ConA stimulation. \[[@B33]\]
900 mg/kg in food
Galacto-mannan (partially hydrolyzed guar gum) *Cyamopsis tetragonolobus* 10-week ♀ BALB/c mice,\ 5% of diet 3 weeks DSS-induced UC at beginning of\ ↓ disease activity index scores, ↓ colonic mucosal myeloperoxidase activity & lipid peroxidation; ↓ colonic TNF-α protein levels & mRNA expression up regulated by DSS exposure \[[@B50]\]
11-15/group week 3
Galacto-mannans\ 8-month- SD rats, 5/group 5% of diet 3 weeks Older animals ↓ serum IgG; ↑ MLN lymphocyte IgA, IgM and IgG production \[[@B36]\]
(guar gum)
Glucomannan (KS-2) *Lentinula edodes* DD1 mice (10-20/group) 140 mg/kg days\ 50 days Injected IP Ehrlich ascites tumor cells day 1 ↑ survival \[[@B84]\]
2-13
0.1, 1, 10, or 100 mg/kg dose days 2-13 100 days Injected Sarcoma-180 tumor cells\ 1, 10, and 100 mg/kg doses ↑ survival
day 1
Heteroglycan (ATOM) *A. subrufescens* Mice (10/group): 1) 5-week ♂ Swiss/NIH; 6 week- ♀ DS mice; 3) 8-week ♀ BALB/c nude; 4) 5-week C3H/HcN 100 or\ 8 weeks Implanted SC 1) Sarcoma-180, 2) Shionogi carcinoma 42, 3) Meth A fibrosarcoma, or 4) Ehrlich ascites carcinoma cells Both doses ↓ Sarcoma-180 tumor size at 4 weeks & ↑ survival; 300 mg/kg ↑ peritoneal macrophage and C3-positive cells; 300 mg/kg ↓ Shionogi and Meth A tumor sizes at 4 weeks. Both doses ↑ survival of Ehrlich ascites mice \[[@B93]\]
300 mg/kg\
days 2-11
Heteroglycan (LBP~3p~) *Lycium barbarum* ♂ Kunming mice (10/group) 5, 10 or\ 10 days Injected SC Sarcoma-180 cells 5 & 10 mg/kg ↑ thymus index; all doses ↓ weight, ↓ lipid peroxidation in serum, liver and spleen & ↑ spleen lymphocyte proliferation, cytotoxic T cell activity, IL-2 mRNA \[[@B91]\]
20 mg/kg
Heteroglycan (PNPS-1) *Pholiota nameko* SD rats (5/group) 100, 200 or 400 mg/kg days 1-8 8 days Implanted SC cotton pellets in scapular region\ ↓ granuloma growth positively correlated with dose: 11%, 18% and 44%, respectively \[[@B55]\]
day 1
Heteroglycan (PG101) *Lentinus lepideus* 8-10-week ♀ BALB/c mice (3/group) 10 mg 24 days 6 Gy gamma irradiation ↑ colony forming cells, granulocyte CFUs/Mø, erythroid burst-forming units, and myeloid progenitor cells in bone marrow; induced proliferation of granulocyte progenitor cells in bone marrow; ↑ serum levels of GM-CSF, IL-6, IL-1β \[[@B92]\]
Mixed poly-saccharides (Ambrotose^®^or Advanced Ambrotose^®^powders) *Aloe barbadensis*, *Larix*spp, and other plant poly-saccharides ♂ SD rats (10/group) 37.7 or 377 mg/kg Ambrotose^®^powder or 57.4 or 574 mg/kg Advanced Ambrotose^®^powder 2 weeks 5% DSS in drinking water beginning day 6 574 mg/kg Advanced Ambrotose powder ↓ DAI scores; 377 mg/kg Ambrotose complex & both doses Advanced Ambrotose powder ↑ colon length and ↓ blood monocyte count \[[@B52]\]
Pectin *Pyrus pyrifolia* 6-8-week ♂ BALB/c mice (11/group) 100 μg\ 22 days Injected IP OVA day 7, provoked with OVA aerosol day 21 bronchial fluid:↓ IFN-γ & ↑ IL-5; splenic cells: ↑ IFN-γ, ↓ IL-5; normalized pulmonary histopathological changes; ↓ serum IgE \[[@B54]\]
days 1-7
Pectins (bupleurum 2IIc) *Bupleurum falcatum* 6-8-week ♀ specific-pathogen-free C3H/HeJ mice 250 mg/kg 1 week Healthy animals ↑ spleen cell proliferation \[[@B35]\]
Pectins (highly methoxylated) *Malus*spp. 8-month- SD rats (5/group) 5% of diet vs. cellulose control 3 weeks Older animals ↑ MLN lymphocyte IgA & IgG \[[@B36]\]
Pectins Citrus spp. 5-week ♀ F344 rats (30/group) 15% of diet 34 weeks Injected SC AOM once a week, weeks 4-14 ↓ colon tumor incidence \[[@B86]\]
*Malus*spp. 5-week ♀ BALB/c mice (6/group) 5% of diet 2 weeks Healthy animals ↑ fecal IgA and MLN CD4+/CD8+ T lymphocyte ratio & IL-2 & IFN-γ secretion by ConA-stimulated MLN lymphocytes \[[@B51]\]
5-week ♀ BALB/c mice (6/group) 5% of diet days 5-19 vs. cellulose control 19 days DSS-induced UC days 1-5 Significantly increased MLN lymphocytes IgA, and significantly decreased IgE; significantly decreased ConA-stimulated IL-4 and IL-10
4-week ♂ Donryu rats (20-21/group) 20% of diet 32 weeks Injected SC AOM once a week,\ ↓ colon tumor incidence \[[@B85]\]
weeks 2-12
4-week ♂ Donryu rats (19-20/group) 10 or 20% of diet 32 weeks Injected SC AOM once a week,\ Both doses ↓ colon tumor incidence; 20% ↓ tumor occupied area & ↓ portal blood and distal colon PGE~2~ \[[@B90]\]
weeks 2-12
Pectins (modified) Citrus spp. 2-4-month BALB/c mice (9-10/group) 0.8 or 1.6 mg/ml drinking water,\ 20 days Injected SC with 2 × 2 mm section of human colon-25 tumor on day 1 Both doses ↓ tumor size \[[@B87]\]
days 8-20
NCR nu/nu mice (10/group) 1% (w/v) drinking water 16 weeks Orthotopically injected human breast carcinoma cells (MDA-MB-435) into mammary fat pad on day 7 ↓ tumor growth rate & volume at 7 weeks, lung metastases at 15 weeks, \# of blood vessels/tumor at 33 days post-injection \[[@B89]\]
NCR nu/nu mice (10/group) 1% (w/v) drinking water 7 weeks Injected human colon carcinoma cells (LSLiL6) into cecum on day 7 ↓ tumor weights and metastases to the lymph nodes and liver
SD rats (7-8/group) 0.01%, 0.1% or 1.0% wt/vol of drinking water, days 4-30 1 month Injected SC MAT-LyLu rat prostate cancer cells 0.1% and 1.0% ↓ lung metastases; 1.0% ↓ lymph node disease incidence \[[@B88]\]
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
######
Immunomodulatory Polysaccharide-Rich Plant Powders: Oral Animal Studies
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Source Animal Oral dose/day Duration Treatment Significant effects Reference
------------------------------------------------------ ----------------------------------------------------------------- --------------------------------------------------------- ----------- ------------------------------------------------------------------------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------------------------------- ------------
*Agaricus*(*A. blazei*) *subrufescens*(fruit bodies) 6-week ♂ C57BL/6, C3H/HeJ and BALB/c mice (3/group) 16, 32 or 64 mg 2 weeks Healthy animals 32 and 64 mg ↑ liver mononuclear cell cytotoxicity \[[@B25]\]
*Grifola frondosa* 6-week ♀ ICR mice (10-15/group) 5% of diet 36 weeks Oral N-butyl-N\'-butanolnitrosamine daily for first 8 weeks ↓ \#s of animals with bladder tumors; ↓ tumor weight; ↑ peritoneal Mø chemotactic activity, splenic lymphocyte blastogenic response & cytotoxic activity \[[@B70]\]
*Laminaria angustata* Weanling SD rats (58/group) 5% of diet 26 weeks IG DMBA, beginning of week 5 ↑ time to tumor development and ↓ \# of adenocarcinomas in adenocarcinoma-bearing animals \[[@B77]\]
*Lentinula*(*Lentinus*) *edodes* 6-week ♀ ICR mice (10-17/group) 5% of diet 36 weeks Oral BBN daily for first 8 weeks ↓ \# of animals with bladder tumors; ↓ tumor weight; ↑ Mø chemotactic activity, splenic lymphocyte blastogenic response, cytotoxic activity \[[@B70]\]
7-8 -week ♂ Swiss mice (10/group) 1%, 5% or 10% of diet of 4 different lineages days 1-15 16 days Injected IP N-ethyl-N-nitrosourea day 15 All 3 doses of one lineage and the 5% dose of two other lineages ↓ \#s of micronucleated bone marrow polychromatic erythrocytes \[[@B79]\]
*Lentinula edodes*(fruit bodies) 5-week ♀ ICR mice\ 10%, 20% or 30% of diet 25 days Injected IP Sarcoma-180 ascites All 3 doses ↓ Sarcoma-180 tumor weight \[[@B78]\]
(14/group × 2)
Mice: 1) CDF~1~; 2) C3H; 3) BALB/c; 4,5) C57BL/6N (9/group × 3) 20% of diet 25 days Injected SC 1) IMC carcinoma, 2) MM-46 carcinoma, 3) Meth-A fibrosarcoma, 4) B-16 melanoma, or 5) Lewis lung carcinoma cells ↓ growth of MM-46, B-16, Lewis lung, and IMC tumors; ↑ lifespan in Lewis lung and MM-46 animals
ICR mice (14/group × 2) 20% of diet days 1-7, days 7-31 or days 14-31 31 days Injected IP Sarcoma-180 ascites ↓ tumor weight & growth when fed days 7-31 or 14-31
Mice: 1) CDF~1~; 2) C3 H (5/group × 4) 20% of diet 7-12 days Injected SC: 1) IMC carcinoma or 2) MM-46 carcinoma cells ↑ spreading rate of activated Mø ↑ phagocytic activity
*Phellinus linteus* 4-week ♂ ICR mice (10/group) 2 mg 1 month Healthy animals ↓ serum & splenocyte IgE production; ↑ proportion of splenic CD4^+^T cells & splenocyte IFN-γ production \[[@B31]\]
*Pleurotus ostreatus* 6-week ♀ ICR mice\ 5% of diet 36 weeks Oral BBN daily for first 8 weeks ↓ \#s of animals with bladder tumors; ↓ tumor weight; ↑ plasma Mø chemotactic activity, splenic lymphocyte blastogenic response, cytotoxic activity \[[@B70]\]
(10-20/group)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
######
Immunomodulatory Polysaccharide Products: Oral Human Studies
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Extract Source Study design Population N (experimental/control) Dose/day Dura-tion Significant effects Reference
-------------------------------- ---------------------------------- ---------------------------------------------- ---------------------------------------------------------------------------------- -------------------------------------- ---------------------------------------------------------------------- ------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------
Arabino-galactans *Larix occidentalis* Randomized, double-blind, placebo-controlled Healthy adults 8/15 4 g 6 weeks ↑ % CD8+ lymphocytes & blood lymphocyte proliferation \[[@B18]\]
Arabino-galactans (ResistAid™) Healthy adults given pneumococcal vaccinations day 30 21/24 4.5 g 72 days ↑ plasma IgG subtypes \[[@B19]\]
Fucoidans *Undaria pinnatifida*sporophylls Randomized, single-blind, placebo-controlled Healthy adults 25 (75% fucoidan, 6 (10% fucoidan)/6 3 g 12 days 75% fucoidan: ↓ \#s blood leukocytes, lymphocytes\' ↑ plasma stromal derived factor-1, IFN-γ, CD34+ cells; ↑ % CXCR4-expressing CD34+ cells \[[@B21]\]
Furanose extract (Cold-FX^®^) *Panax quinque-folium* Randomized, double-blind, placebo-controlled Healthy older adults given influenza immunization at the end of week 4 22/21 400 mg 4 months During weeks 9-16, ↓ incidence of acute respiratory illness, symptom duration \[[@B20]\]
Glucans *Agaricus subru-fescens* Randomized, double-blind, placebo-controlled Cervical, ovarian or endometrial cancer patients receiving 3 chemotherapy cycles 39/61 5.4 g (estimated) 6 weeks ↑ NK cell activity, ↓ chemotherapy side effects \[[@B64]\]
Glucans\ Not identified Placebo-controlled Recurrent aphthous stomatitis patients 31/42 20 mg 20 days ↑ PBL lymphocyte proliferation,↓ Ulcer Severity Scores \[[@B48]\]
(β-1,3;1,6)
Glucans\ *S. cerevisiae* Randomized, double-blind, placebo-controlled Adults with seasonal allergic rhinitis 12/12 20 mg 12 weeks 30 minutes after nasal allergen provocation test, nasal lavage fluid: ↓ IL-4, IL-5, % eosinophils, ↑ IL-12 \[[@B47]\]
(β-1,3;1-6)
Glucans (PSK) *Trametes versicolor* Randomized, controlled Patients with curatively resected colorectal cancer receiving chemotherapy 221/227 200 mg 3-5 years ↑ disease-free survival and overall survival \[[@B56]\]
Controlled Post-surgical colon cancer patients receiving chemotherapy 123/121 3 g for 4 weeks, alternating with 10 4-week courses of chemo-therapy 7 years ↑ survival from cancer deaths; no difference in disease-free or overall survival \[[@B57]\]
Post-surgical colorectal cancer patients receiving chemotherapy 137/68 3 g daily 2 years ↑survival in stage III patients; ↓ recurrence in stage II & III patients \[[@B58]\]
Post-surgical gastric cancer patients receiving chemotherapy 124/129 3 g for 4 weeks, alternating with 10 4-week courses of chemo-therapy 5-7 years ↑ 5-year disease-free survival rate, overall 5-year survival \[[@B59]\]
Pre-surgical gastric or colorectal cancer patients 16 daily; 17 every other day/13 3 g daily or on alternate days before surgery \<14 days or 14-36 days ≥14 day treatment: ↑ peripheral blood NK cell activity, PBL cytotoxicity, proportion of PBL helper cells; ↓ proportion of PBL inducer cells; \<14 day treatment: ↑ PBL response to PSK and Con A, proportion of regional node lymphocyte suppressor cells \[[@B62]\]
Randomized, double-blind, placebo-controlled Post-surgical stage III-IV colorectal cancer patients 56/55 3 g for 2 months, 2 g for 22 months, 1 g thereafter 8-10 years ↑ remission & survival rates \[[@B61]\]
Controlled Post-surgical stage III gastric cancer patients receiving chemotherapy 32/21 3 g 1 year ↑ survival time \[[@B60]\]
Glucans (PSP) *Trametes versicolor* Randomized, double-blind, placebo-controlled Conventionally-treated stage III-IV non-small cell lung cancer patients 34/34 3.06 g 1 month ↑ blood IgG & IgM, total leukocyte and neutrophil counts, % body fat; ↓ patient withdrawal due to disease progression \[[@B63]\]
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
######
Immunomodulatory Polysaccharide Products: Composition and Structure
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Source Category Features MW Monosaccharide composition Reference
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ----------------------------------------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------- --------------------------------------------
*Agaricus subrufescens*(*A. blazei*) Extract β-1,6-D-glucan 10,000 NA \[[@B66]\]
*Agaricus subrufescens*(fruit body) Extract α-1,6- and α-1,4 glucans with β-1,6-glucopyranosyl backbone (629.2 mcg/mg polysaccharides, 43.5 mcg/mg protein) 170,000 glucose \[[@B24]\]
α-1,4 glucans & β-1,6 glucans with β-1,3 side branches; α-1,6 glucans; β-1,6; 1-3 glucans, β-1,4 glucans; β-1,3 glucans; β-1,6; α-1,3 glucans; riboglucans, galactoglucomannans, β-1,2; β-1,3 glucomannans NA glucose, mannose, galactose, ribose \[[@B25],[@B117],[@B118]\]
*Agaricus subrufescens*(mycelia) Extract (ATOM) β-1,6-D-glucan, protein complex, 5% protein 100,000-1,000,000 glucose, mannose, galactose, ribose \[[@B93]\]
*Aloe barbadensis*(leaf gel) Whole tissue Dry weight: 10% polysaccharides; acemannan, aloemannan, aloeride, pectic acid, galactans, arabinans, glucomannans average 2,000,000 mannose, glucose, galactose, arabinose, xylose, rhamnose \[[@B119],[@B120]\]
Extract (aloemannan) neutral partially acetylated glucomannan, mainly β-1,4-mannans \>200,000 mannose, glucose \[[@B121]\]
Extract (aloeride) NA 4,000,000-7,000,000 37% glucose, 23.9% galactose, 19.5% mannose, 10.3% arabinose \[[@B122]\]
Extract (acemannan) β-1,4 acetylated mannan 80,000 mannose \[[@B123]\]
*Aloe barbadensis*, (leaf gel), *Larix*sp. (bark), *Anogeissus latifolia*(bark), *Astragalus gummifer*(stem), *Oryza sativa*(seed), *glucosamine* Extracts (Ambrotose^®^powder) β-1,4 acetylated mannan, arabinogalactans, polysaccharide gums, rice starch, 5.4% protein 57.3% ≥ 950,000; 26.4% \< 950,000 and ≥80,000; 16.3% ≤ 10,000 mannose, galactose, arabinose, glucose, galacturonic acid, rhamnose, xylose, fructose, fucose, glucosamine, galacturonic acid (unpublished data, Mannatech Incorporated)
*Aloe barbadensis*(leaf gel), *Larix*sp. (bark), *Undaria pinnatifida*(frond), *Anogeissus latifolia*(bark), *Astragalus gummifer*(stem), *Oryza sativa*(seed), *glucosamine* Extracts (Advanced Ambrotose^®^powder) β-1,4 acetylated mannan, arabinogalactans, polysaccharide gums, fucoidans, rice starch, 6% protein, 1% fatty acids 13% = 1,686,667; 46% = 960,000 30% \<950,000 and ≥70,000; 11% ≤ 10,000
*Avena*spp. (seed endosperm) Extract β-1,3;1,4 particulate (1-3 μ) glucans 1,100,000 glucose \[[@B43]\]
*Avena*spp. (seed) Extract β-1,4,1,3 particulate glucans (linear chains of β-D-glycopyranosyl units; 70% β 1-4 linked) 2,000,000 NA \[[@B41],[@B124]\]
*Buplerum falcatum*(root) Extract (bupleuran 2IIc) 6 linked galactosyl chains with terminal glucuronic acid substituted to β-galactosyl chains NA galactose, glucuronic acid, rhamnose \[[@B35]\]
Citrus spp. (fruit) Extract α-1,4-linked partially esterified D-anhydrogalacturonic acid units interrupted periodically with 1,2-rhamnose 70,000-100,000 galactose, galacturonic acid, arabinose, glucose, xylose, rhamnose \[[@B125]\]
*Cladosiphon okamuranus*(frond) Extract α-1,3-fucopyranose sulfate 56,000 fucose:glucuronic acid (6.1:1.0) \[[@B126]\]
*Cordyceps sinensis*(mycelia) Extract β-1,3-D-glucan with 1,6-branched chains NA NA \[[@B127]\]
*Cyamopsis tetragonolobus*(seed) Extract (guar gum) Main chain of β-1,4-mannopyranosyl units with α-galactopyranosyl units 220,000 mannose, galactose \[[@B36],[@B128]\]
Extract (partially-hydrolyzed guar gum) NA 20,000 mannose, galactose \[[@B50]\]
*Flammulina velutipes* Extract NA NA glucose, mannose, galactose \[[@B117]\]
*Flammulina velutipes*(fruit body) Extract β-1,3 glucan NA glucose \[[@B129]\]
*Ganoderma lucidum* Whole tissue Linear β-1,3-glucans with varying degrees of\ 400,000-1,000,000 glucose, galactose, mannose, xylose, uronic acid \[[@B130]\]
D-glucopyranosyl branching, β-glucan/protein complexes, heteropolysaccharides
Extract NA 7,000-9,000 NA \[[@B67]\]
*Ganoderma lucidum*(fruit body) Extract NA 7,000-9,000 NA
β-linked heteroglycan peptide 513,000 fructose, galactose, glucose, rhamnose, xylose (3.167:\ \[[@B15]\]
0.556:6.89:0.549:3.61)
*Ganoderma tsugae* Extract 55.6% carbohydrates (12.5% polysaccharides); 12% triterpenes, 1.7% sodium, 0.28% protein, 0% lipid NA NA \[[@B53]\]
*Ginkgo biloba*(seed) Extract 89.7% polysaccharides NA glucose, fructose, galactose, rhamnose \[[@B131]\]
*Grifola frondosa* Whole tissue β-1,3; 1, 6-glucans, α-glucans, mannoxyloglucans, xyloglucans, mannogalactofucans NA glucose, fucose, xylose, mannose, galactose \[[@B117]\]
*Grifola frondosa*(fruit body) Extract\ β-1,6-glucan with β-1,3 branches, 30% protein NA glucose \[[@B132]\]
(D fraction)
Extract\ β-1,6-D-glucan with α-1,4 branches, 35% protein 550,000-558,000 glucose
(X fraction)
*Hordeum*spp. (seed) Extract β-1,3;1,4-and β-1,3;1,6-D-glucans 45,000-404,000 glucose \[[@B75]\]
Primarily linear β-1,3;1,4- glucans NA glucose \[[@B124]\]
*Laminaria*spp.\ Extract (laminarin) β-1,3;1-6 glucan 7,700 glucose \[[@B29]\]
(frond)
β-1,3 glucan with some β-1,6 branches and a small amount of protein 4,500-5,500 glucose \[[@B44]\]
Extract Fucoidan NA NA \[[@B133]\]
*Larix occidentalis*(bark) Extract β-1,3;1,6-D-galactans with arabinofuranosyl and arabinopyranosyl side chains 19,000-40,000 galactose:arabinose (6:1), uronic acid \[[@B128],[@B134]\]
*Lentinula edodes* Extract (SME) β-1,3-glucans (4-5%), α-1,4-glucan (8-10%), protein (11-14%) NA glucose \[[@B80]\]
Extract β-glucan 1,000 glucose \[[@B27]\]
Whole tissue Linear β-1,3-glucans, β-1,4;1,6-glucans, heterogalactan NA glucose, galactose, mannose, fucose, xylose \[[@B135]\]
Extract (lentinan) β-1,3-glucan with 2 β-1,6 glucopyranoside branchings for every 5 β-1,3-glucopyranoside linear linkages 500,000 glucose \[[@B136]\]
*Lentinula edodes*(fruit body)\ Extract (lentinan) Neutral β-1,3-D glucan with two β-1,6 glucoside branches for every five β-1,3 units 400,000-800,000 glucose \[[@B137]\]
*Lentinula edodes*
Extract\ Peptide units and mannan connected by α-glycosidic bonds 60,000-90,000 mannose, glucose
(KS-2)
*Lentinula edodes*(mycelia or fruit body) Extract Triple helical β-1,3-D glucan with β-1,6 glucoside branches 1,000,000 glucose \[[@B3]\]
*Lentinula edodes*(mycelia) Extract\ 44% sugars, 24.6% protein \~1,000,000 xylose, arabinose, glucose, galactose, mannose, fructose \[[@B3]\]
(LEM)
Extract (PG101) 72.4% polysaccharides, 26.2% protein, 1.4% hexosamine NA 55.6% glucose, 25.9% galactose, 18.5% mannose \[[@B138]\]
*Lycium barbarum* Whole tissue α-1,4;1,6-D-glucans, lentinan, β-1,3;1,6 heteroglucans, heterogalactans, heteromannans, xyloglucans NA glucose, galactose, mannose, xylose \[[@B139]\]
*Lycium barbarum*(fruit body) Extract\ 88.36% sugars, 7.63% protein 157,000 galactose, glucose, rhamnose, arabinose, mannose, xylose (molar ratio of 1:2.12:1.25:1.10:1.95:1.76) \[[@B91]\]
(LBP~3p~)
*Panax quinquefolium*(root) Extract Poly-furanosyl-pyranosyl saccharides NA arabinose, galactose, rhamnose, galacturonic acid, glucuronic acid \[[@B33]\]
NA NA glucose, mannose, xylose \[[@B140]\]
Extract\ 90% poly-furanosyl-pyranosyl-saccharides NA furanose \[[@B20]\]
(Cold-fX^®^)
*Phellinus linteus*(fruit body) Extract α- and β-linked 1,3 acidic proteoglycan with 1,6 branches 150,000 glucose, mannose, arabinose, xylose \[[@B141]\]
*Phellinus linteus*(mycelia) Extract 83.2% polysaccharide (4.4% β-glucan), 6.4% protein, 0.1% fat NA glucose \[[@B142]\]
*Pholiota nameko*(fruit body) Extract (PNPS-1) NA 114,000 mannose, glucose, galactose, arabinose, xylose (molar ratio of 1:8.4:13.6:29.6:6.2) \[[@B55]\]
*Pleurotus ostreatus*(mycelia) Extract β-1,3;1,6-D-glucans 316,260 glucose \[[@B143]\]
*Saccharomyces cerevisiae* Extract (WGP) Particulate β-1,3;1,6-D-glucan NA glucose \[[@B144]\]
Extract β-glucans with β-1,6 branches with a β-1,3 regions NA glucose \[[@B124]\]
Extract\ soluble β-1,3-D-glucan with β-1,3 side chains attached with β-1,6 linkages 20,000 glucose \[[@B145]\]
(SBG)
*Sclerotinia sclerotiorum*(mycelia) Extract\ β-1,3-D-glucan, \<1% protein (\>98% polysaccharide) NA glucose \[[@B83]\]
(SSG)
*Sclerotium rofsii* Extract (scleroglucan) β-1,3;1,6 glucan 1,000,000 glucose \[[@B29]\]
*Trametes versicolor*(fruit body) Extract\ α-1,4, β-1,3 glucans, 10% peptides 100,000 glucose, arabinose, mannose, rhamnose \[[@B146]\]
(PSP)
*Trametes versicolor*(mycelia) Extract\ β-1,4;1,3;1,6-D-glucans, protein 94,000 glucose (74.6%), mannose (15.5%), xylose (4.8%), galactose (2.7%), fucose (2.4%) \[[@B137],[@B147]\]
(PSK)
*Undaria pinnatifida*(sporophyll) Extract Galactofucan sulfate 9,000 fucose:galactose 1.0:1.1 \[[@B148]\]
Galactofucan sulfate 63,000 fucose:galactose:gluc-uronic acid (1.0:1.0:0.04) \[[@B149]\]
β-1,3-galactofucan sulphate 38,000 fucose, galactose \[[@B150]\]
Unidentified source Extract (modified citrus pectin) NA 10,000 galactose, rhamnose, uronic acid \[[@B125]\]
Extract (highly methoxylated pectin) NA 200,000 NA \[[@B36]\]
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
######
Safety of Immunomodulatory Polysaccharide Products Following Oral Intake
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Category Source Test group Test Design Results Equivalent human dose\* Reference
------------------- ----------------------------------------------------------- ---------------------------------------------------- --------------------------------- ------------------------------------------------------------------------------------- -------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------------------- ------------------------------------
Arabino-galactans *Argemone mexicana*(arabinogalactan protein) Pregnant rats Develop-mental toxicity 250, 500, or 1,00 mg/kg, gestational days 5-19 No developmental toxicity: NOAEL = 1 g/kg 68 g \[[@B151]\]
♀ and ♂ rats Fertility 250, 500, or 1,00 mg/kg, 1 month No effects on reproduction: NOAEL = 1 g/kg
Fucoidans *Undaria pinnatifi*da Rats Subchronic toxicity 1.35 g/kg, 1 month No evidence of toxicity 91.8 g \[[@B152]\]
Galacto-mannans *Cyamopsis tetragonolobus* Adolescent and adult ♂ rats Subchronic and chronic toxicity 8% of diet, 6-67 weeks No evidence of toxicity 8% of diet \[[@B153]\]
Rats Acute toxicity One 7.06 g/kg dose: observed 2 weeks LD~50~= 7.06 g/kg 480 g \[[@B96]\]
Subchronic and chronic toxicity 1, 2, 4, 7.5 or 15% of diet, 3 months All doses ↓ ♀ BW; 7.5-15% ↓ ♂ BW; 15% ↓ bone marrow cellularity; ↓ kidney and liver weights 1-15% of diet
19 adults with hypercholesterol-emia 18 g/day, 1 year Short-term gastric bloating/loose stools, in 8 subjects, resolved in 7-10 days; 2 withdrew because of diarrhea. No toxicity for 13 subjects completing study 18 g \[[@B154]\]
16 Type II diabetics 26.4-39.6 g/day, 6 months No effects on hematologic, hepatic, or renal function 39.9 g \[[@B155]\]
18 Type II diabetics 30 g/day, 4 months 30 g
*Cyamopsis tetragonolobus*(partially hydrolyzed guar gum) Mice & rats Acute toxicity One 6 g/kg dose; observed\ LD~50~\> 6 g/kg \>408 g \[[@B156]\]
2 weeks
Rats Subchronic toxicity 0.2, 1.0 or 5% of diet, 13 weeks No evidence of toxicity 5% of diet
0.5 or 2.5 g/kg, 1 month NOAEL \> 2.5 g/kg \>170 g \[[@B157]\]
*S. typhimurium* Mutagenicity Ames test Not mutagenic NA
Glucans *Agaricus subrufescens*(aqueous extract) Rats Subchronic toxicity 0.63, 1.25, 2.5 or 5% of diet, 3 months NOAEL = 5% of diet 5% of diet \[[@B158]\]
3 women with advanced cancers Case reports Specific identity of products, doses, and durations of intake unknown Severe hepatotoxicity; two patients died NA \[[@B97]\]
*Agaricus subrufescens*(freeze dried powder) 24 normal adults and 24 adults with liver problems Subchronic toxicity 3 g, 4 months No evidence of toxicity 3 g \[[@B159]\]
*Ganoderma lucidum*\ Elderly woman Case report 1 year *G. lucidum*(and another unidentified product, initiated one month previous) Elevated liver enzymes and liver tissue damage NA \[[@B98]\]
(supplement)
*Grifola frondosa*(powder) Rats Acute toxicity One 2 g/kg dose No evidence of toxicity 136 g \[[@B160]\]
*Lentinula edodes*(powder) 10 adults Safety 4 g/day for 10 weeks; repeated\ 50% of subjects experienced blood eosinophilia, ↑ eosinophil granule proteins in serum and stool, ↑GI symptoms 4 g \[[@B99]\]
3-6 months later
*Lentinula edodes*\ Nude mice Safety 10% of diet days 1-18, 33-50 No adverse events 10% of diet \[[@B80]\]
(SME)
61 men with prostate cancer 0.1 g/kg, 6 months No adverse events 6.8 g
*Lentinus lepideus*(PG101) Female mice Subchronic toxicity 0.5 g/kg, 24 days No evidence of toxicity 34 g \[[@B92]\]
*Phellinus linteus*\ Rats Acute toxicity One 5 g/kg dose; observed\ LD~50~\> 5 g/kg 349 g \[[@B161]\]
(crude extract) 2 weeks
*Pleurotus ostreatus*(aqueous extract) Mice Acute toxicity One 3 g/kg dose; observed\ LD~50~\> 3 g/kg \>204.g \[[@B100]\]
1 day
Subacute toxicity 319 mg/kg, 1 month Hemorrhages in intestine, liver, lung, kidney; inflammation and microabscesses in liver 21.7 g
*Saccharomyces cerevisiae*(particulate glucan \[WGP\]) Rats Acute toxicity One 2 g/kg, observed 2 weeks LD~50~\> 2 g/kg \>136 g \[[@B144]\]
Subchronic toxicity 2, 33.3 or 100 mg/kg, 3 months NOAEL = 100 mg/kg 6.80 g
Heteroglycans *Trametes versicolor*\ Rats Subchronic toxicity 1.5, 3.0 or 6.0 mg/kg, 2 months No evidence of toxicity 408 mg \[[@B162]\]
(PSP)
Rats & monkeys Subchronic and chronic toxicity 100-200X equivalent human dose, 6 months No evidence of toxicity NA
*Trametes versicolor*\ Humans with colon cancer Safety 3 g/day, up to 7 years No significant adverse events 3 g \[[@B57]\]
(PSK)
Humans with colorectal cancer 3 g/day, 2 years 3 g \[[@B58]\]
Mannans *Aloe vera*gel Dogs Acute toxicity Fed one 32 g/kg; observed 2 weeks LD50 \> 32 g/kg \>2,176 g Bill Pine, personal communi-cation
Rats One 21.5 g/kg; observed 2 weeks LD50 \> 10 g/kg \>680 g
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\*150 lb adult
A number of studies in healthy human adults demonstrated immune stimulating effects of oral polysaccharides. Arabinogalactans from *Larix occidentalis*(Western larch) were shown in RCTs to increase lymphocyte proliferation and the number of CD8+ lymphocytes \[[@B18]\] and to increase the IgG subtype response to pneumococcal vaccination \[[@B19]\]. A furanose extract from *Panax quiquefolium*(North American ginseng) was shown in an RCT of healthy older adults to decrease the incidence of acute respiratory illness and symptom duration \[[@B20]\]. Finally, an RCT of healthy adults consuming *Undaria pinnatifida*(wakame) fucoidans found both immune stimulating and suppressing effects, including increased stromal-derived factor-1, IFN-g, CD34+ cells and CXCR4-expressing CD34+ cells and decreased blood leukocytes and lymphocytes \[[@B21]\].
Studies in healthy animals showed a number of immune stimulating effects of various glucan products from *Agaricus subrufescens (A. blazei)*(aqueous extracts \[[@B22]\], aqueous extracts with standardized β-glucans \[[@B23]\], α-1,6 and α-1,4 glucans \[[@B24]\], and whole plant powders \[[@B25]\]); *Lentinula edodes*(shiitake) (lentinan \[[@B26]\] and β-glucans \[[@B27]\]); *Saccharomyces cerevisiae*(β-1,3-glucans \[[@B27],[@B28]\]); *Laminaria digitata*(laminarin \[[@B29]\]); *Sclerotium rofsii*(glucan phosphate \[[@B29]\]); *Sclerotinia sclerotiorum*(SSG \[[@B30]\]); and *Phellinus linteus*(powder \[[@B31]\] and aqueous, alcohol-precipitated extract \[[@B32]\]). A furanose extract from *P. quiquefolium*and pectins from *Buplerum falcatum*and *Malus*(apple) spp. have also been shown to enhance immune function in healthy young animals \[[@B33]-[@B35]\]. *Cyamopsis tetragonolobus*galactomannan (guar gum) or highly methoxylated pectin feeding exerted numerous stimulating effects on antibody production in older animals \[[@B36]\].
Evidence for the effectiveness of oral polysaccharides against infection and immune challenges has been mainly demonstrated in animals. Immune stimulating effects have been shown in resting and exercise-stressed animals with thioglycollate, clodronate, or HSV-1 injections fed *Avena*(oat) spp. soluble glucans \[[@B37]-[@B41]\]; animals injected with or fed *E. vermiformis*and fed *Avena*spp. particulate glucans \[[@B42],[@B43]\]; animals with *E. coli*injections fed *L. digitata*glucans (laminarin) \[[@B44]\]; animals with HSV injections fed *U. pinnatifida*fucoidans \[[@B45]\]; animals with *Staphylococcus aureus*or *Candida albicans*injections fed *S. cerevisiae*glucans (scleroglucan) \[[@B29]\]; and animals with fecal solution injections fed an aqueous extract of *A. subrufescens*(*A. blazei*Murrill) \[[@B46]\].
Additional controlled human and animal studies have shown anti-inflammatory and anti-allergy effects of some polysaccharide products. In an RCT of adults with seasonal allergic rhinitis, *S. cerevisiae*β-1,3;1-6 glucans decreased IL-4, IL-5 and percent eosinophils, and increased IL-12 in nasal fluid \[[@B47]\], while a placebo-controlled study of patients with recurrent aphthous stomatitis (canker sores) consuming β-1,3;1-6 glucans found increased lymphocyte proliferation and decreased Ulcer Severity Scores \[[@B48]\].
Animal models of inflammatory bowel disease have shown anti-inflammatory effects of *Cladosiphon okamuranus*Tokida fucoidans \[[@B49]\], *Cyamopsis tetragonolobus*galactomannans \[[@B50]\], *Malus*spp. pectins \[[@B51]\], and mixed polysaccharide supplements \[[@B52]\]. Animals challenged with ovalbumin have demonstrated anti-inflammatory/allergy effects of *A. subrufescens*aqueous extracts \[[@B22]\], an aqueous extract *of Ganoderma tsugae*\[[@B53]\], and *Pyrus pyrifolia*pectins \[[@B54]\]. Anti-inflammatory effects have also been seen in animals with cotton pellet implantations fed a *Pholiota nameko*heteroglycan (PNPS-1) \[[@B55]\].
*Trametes versicolor*glucans have demonstrated anti-cancer effects in humans. In two RCTs and five controlled trials, PSK from *T. versicolor*mycelia increased survival of advanced stage gastric, colon and colorectal cancer patients \[[@B56]-[@B62]\] with one study showing increased immune parameters (including blood NK cell activity, leukocyte cytotoxicity, proportion of helper cells and lymphocyte suppressor cells) \[[@B62]\]. An RCT of advanced stage lung cancer patients consuming PSP from *T. versicolor*fruit bodies found increased IgG and IgM antibodies and total leukocyte and neutrophil counts, along with a decrease in the number of patients withdrawing from the study due to disease progression \[[@B63]\]. An RCT of ovarian or endometrial cancer patients consuming *A. subrufescens*glucans showed increased NK cell activity and fewer chemotherapy side effects \[[@B64]\].
In numerous animal models of cancer, a wide range of polysaccharides have shown anti-tumorogenic effects. Glucan products sourced from *A. subrufescens*demonstrating anti-cancer activities in animal models include an aqueous extract \[[@B65]\], an aqueous, acid-treated extract \[[@B66]\], and an aqueous extract with standardized levels of β-glucans \[[@B23]\]. Anti-cancer effects have been reported following intake of aqueous extracts of *G. lucidum*\[[@B67]-[@B69]\]; the powder and D fraction of *G. frondosa*\[[@B70]-[@B72]\]; *Hordeum vulgare*β-glucans \[[@B73]-[@B76]\]; *Laminaria angustata*powder \[[@B77]\]; *Lentinula edodes products*(powders \[[@B70],[@B78],[@B79]\], SME \[[@B80]\], β-glucans \[[@B27]\], and lentinan \[[@B81],[@B82]\]); *Pleurotus ostreatus*powder \[[@B70]\], *Saccharomyces cerevisiae*particulate β-1,3;1,6 and β-1,3glucans\[[@B27],[@B73]\]; and a glucan from *Sclerotinia sclerotiorum*(SSG) \[[@B30],[@B83]\]. A glucomannan from *L. edodes*(KS-2) improved survival of animals with cancer cell injections \[[@B84]\]; apple and citrus pectins have exerted anti-cancer effects, including decreased tumor incidence \[[@B85]-[@B90]\]. Finally, heteroglycans from *Lycium barbarum*(LBP~3p~), *Lentinus lepidus*(PG101) and A. *subrufescens*(ATOM) demonstrated a number of immune stimulating effects in animal cancer models \[[@B91]-[@B93]\]. Interestingly, only one animal study has been performed using glucans from *T. versicolor*(PSP): animals with cancer cell implantations showed decreased tumor growth and vascular density \[[@B94]\].
Most polysaccharide products appear to be safe, based on NOAEL, acute and/or chronic toxicity testing in rodents (Table [6](#T6){ref-type="table"}). As would be expected, powders, extracts and products that have not been fully characterized pose the most concerns. Other than for aloe vera gel, which was shown in a small human trial to increase the plasma bioavailability of vitamins C and E \[[@B95]\], the impact of polysaccharide intake on the absorption of nutrients and medications is not known. While one rat toxicity study raised concerns when guar gum comprised 15% of the daily diet \[[@B96]\], the product was safe in humans studies when 18-39.6 g/day was consumed for up to a year (Table [4](#T4){ref-type="table"}). Product contamination may explain three case reports of hepatotoxicity and/or death following intake of an *A. subrufescens*aqueous extract \[[@B97]\]. Seven animal studies reporting positive immunologic effects of *A. subrufescens*extracts in healthy animals or animals with cancers found no evidence of toxicity (Tables [1](#T1){ref-type="table"} and [2](#T2){ref-type="table"}). In humans, six weeks of *A. subrufescens*glucans intake was safe for cancer patients, and four months of 3 g/day intake by 24 healthy adults and 24 adults with liver disease reported no evidence of toxicity (Table [4](#T4){ref-type="table"}). Another case report associated liver toxicity with *G. lucidum*intake, but the elderly subject also took an unidentified product a month previous to her admission for testing \[[@B98]\]. Three animal studies reported immunologic benefits and no adverse effects following intake of *G. lucidum*aqueous extracts; in one study intake was 5% of the diet for 5 months (Table [1](#T1){ref-type="table"}). While adverse effects were also reported in a study in which 10 adults consumed 4 g/day *L. edodes*powder for 10 weeks \[[@B99]\], immunologic animal studies reported no ill effects of either *L. edodes*powder (5 studies, up to 5% of the diet up to nine months) or extract (7 studies, up to 40 days intake) (Tables [1](#T1){ref-type="table"} and [3](#T3){ref-type="table"}). Finally, while intake of 319 mg/kg of an aqueous extract of *P. ostreatus*by mice for 1 month caused hemorrhages in multiple tissues \[[@B100]\], there was no reported toxicity when mice consumed the mushroom powder as 5% of their diet for nine months (Table [3](#T3){ref-type="table"}). While ≥1 gram/day of *T. versicolor*glucan products were safely consumed by cancer patients for up to 10 years, the long-term effects of ingestion of the other polysaccharide products discussed in this review is also not known.
Discussion
==========
The majority of studies that qualified for inclusion in this review employed models investigating immune stimulation; fewer explored anti-inflammatory effects. Animal studies reported immune system effects in the gut, spleen, bone marrow, liver, blood, thymus, lungs, and saliva; controlled human studies reported evidence of immune stimulation in the blood, anti-inflammatory effects in nasal lavage fluid and improved survival in cancer patients. The literature is highly heterogenous and is not sufficient to support broad structure/function generalizations. For the limited number of studies that investigated well-characterized, isolated products (primarily glucan products), effects can be unequivocally attributed to polysaccharides. Such associations are certainly more tenuous when considering product powders or products obtained by extraction methods designed to isolate polysaccharides, but without complete compositional analyses.
Dietary polysaccharides are known to impact gut microbial ecology \[[@B101],[@B102]\], and advances in microbial ecology, immunology and metabolomics indicate that gut microbiota can impact host nutrition, immune modulation, resistance to pathogens, intestinal epithelial development and activity, and energy metabolism \[[@B103]-[@B107]\]. Other than fucoidans, the polysaccharides discussed in this review appear to be at least partially degraded by bacterial enzymes in the human digestive tract (Table [7](#T7){ref-type="table"}). Arabinogalactans, galactomannans, a glucan (laminarin), glucomannans, and mixed polysaccharide products (Ambrotose^®^products) have been shown to be metabolized by human colonic bacteria. Orally ingested fucoidans, glucans and mannans (or their fragments) have been detected in numerous tissues and organs throughout the body \[[@B73],[@B108],[@B109]\], (Carrington Laboratories, personal communication). We know of no study that has determined the specific identity of orally-ingested polysaccharide end products in animal or human tissues.
######
Fate of Immunomodulatory Polysaccharide Products Following Oral Intake
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Category Product Metabol-ized by human gut bacteria? Study type Fate\ References
(method: tissues detected)
------------------------------- --------------------------------------------------------------------------------- ------------------------------------- ------------ ------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------
Arabinogalactans *Larix*spp. yes *in vitro* NA \[[@B163]-[@B169]\]
Fucoidans *Undaria pinnatifida* no *in vitro* Ab: human plasma \[[@B108],[@B170]\]
Galactomannans *Cyamopsis tetragonolobus*(partially hydrolyzed guar gum) yes *in vivo* NA \[[@B171]\]
*Cyamopsis tetragonolobus*(guar gum) yes *in vitro* NA \[[@B167]\]
Glucans *Hordeum vulgare* NA *in vivo* Fluorescein-labeled: mouse Mø in the spleen, bone marrow, lymph nodes \[[@B73]\]
*Laminaria digitata*(laminarin) yes *in vitro* NA \[[@B29],[@B170],[@B172]\]
*Sclerotium rofsii*(scleroglucan) glucan phosphate, *Laminaria*spp. (laminarin) NA *in vivo* Alexa Fluor 488-labeled: mouse intestinal epithelial cells, plasma, GALT \[[@B29]\]
*Saccharomyces cervisiae*(particulate) NA *in vivo* Fluorescein-labeled: mouse macrophage in the spleen, bone marrow, lymph nodes \[[@B73]\]
*Trametes versicolor*\ NA *in vivo* ^14^C-labeled: rat and rabbit serum; mouse GI tract, bone marrow, salivary glands, liver, brain, spleen, pancreas \[[@B173]\]
(PSK)
Mannans *Aloe barbadensis*(aloemannan) yes *in vitro* FITC-labeled: mouse, GI tract \[[@B121],[@B174]\]
*Aloe barbadensis*\ yes *in vitro* NA \[[@B163]\]
(gel powder)
*Aloe barbadensis*(acemannan) NA *in vivo* ^14^C-labeled: dog systemic, particularly liver, bone marrow, gut, kidney, thymus, spleen (Carrington Laboratories, personal communication)
Mixed polysaccharide products Ambrotose complex^®^, Advanced Ambrotose^®^powder yes *in vitro* NA \[[@B163],[@B175]\]
Pectins NA yes *in vitro* NA \[[@B165]-[@B167],[@B176]\]
*Buplerum falcatum*(bupleuran 2IIc) NA *in vivo* Ab bound: mouse Peyer\'s patch, liver \[[@B109]\]
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
One can only speculate upon the mechanisms by which the polysaccharides discussed in this review influence immunologic function, particularly when one considers the exceedingly complex environment of the GI tract. It is possible that fragments of polysaccharides partially hydrolyzed by gut bacteria may either bind to gut epithelia and exert localized and/or systemic immune system effects, or be absorbed into the bloodstream, with the potential to exert systemic effects. Current studies investigating the link between the bioconversion of dietary polysaccharides, their bioavailability and their downstream effects on the host metabolism and physiology are utilizing metabolomic and metagenomic approaches that can detect and track diverse microbial metabolites from immunomodulatory polysaccharides \[[@B103]\]. These and other innovative approaches in the field of colonic fermentation are providing novel insights into gut microbial-human mutualism \[[@B110],[@B111]\], its impact on regulating human health and disease, and the importance of dietary modulation \[[@B112]-[@B115]\].
Additional RCTs of well-characterized products are needed to more completely understand the immunomodulatory effects and specific applications of oral polysaccharides. Such studies will need to better investigate the optimal timing and duration for polysaccharide ingestion. That is, should they be consumed continuously, before, at the time of, or after exposure to a pathogen or environmental insult? Only a few studies have actually investigated the impact of timing of polysaccharide intake to achieve optimal benefits. Daily feeding with some polysaccharides appears to result in tolerance (and diminished benefits); this has been demonstrated for some mushroom β-glucans \[[@B3],[@B26]\]. For those polysaccharides whose immunologic effects are dependent on their prebiotic activities, regular feeding would be presumed necessary.
Conclusions
===========
The dietary polysaccharides included in this review have been shown to elicit diverse immunomodulatory effects in animal tissues, including the blood, GI tract, and spleen. In controlled human trials, polysaccharide intake stimulated the immune system in the blood of healthy adults, dampened the allergic response to a respiratory inflammatory agent, and improved survival in cancer patients. Additional RCTs of well-characterized products are needed to more completely understand the immunomodulatory effects and specific applications of oral polysaccharides
List of abbreviations
=====================
♀: female; ♂: male; Ab: antibody; AIDS: autoimmune deficiency syndrome; AOM: azoxymethane; BBN: N-butyl-N\'-butanolnitrosamine; BLCL: Burkitt\'s Lymphoma Cell Line; BW: body weight; CBC: complete blood count; CD: cluster of differentiation; CFU: colony forming unit; ConA: concanavalin A; CXCR: CXC chemokine receptor; DMBA: 7,12-dimethylbenz*(a)*anthracene; DMH: N-N\'-dimethylhydrazine; DMN: dimethylhydrazine; DSS: dextran sulfate sodium; EBV: Epstein-Barr virus; GALT: gut-associated lymphoid tissue; GI: gastrointestinal; H~2~0~2:~hydrogen peroxide; HSV: herpes simplex virus; ICR: imprinting control region; ID: intradermal; IEL: intraepithelial lymphocytes; IFN-λ: interferon gamma; IG: intragastric; IgA: immunoglobulin A; IgE: immunoglobulin E; IgG: immunoglobulin G; IgM: immunoglobulin M; IL: interleukin; IMC: invasive micropapillary carcinoma; IN: intranasally; IP: intraperitoneal; IV: intravenous; LPS: lipopolysaccharide; Mø: macrophage; mAb: monoclonal antibody; 3-MCA: methylcholanthrene; MLN: mesenteric lymph nodes; MM-46 carcinoma: mouse mammary carcinoma; MW: molecular weight; NK: natural killer; NOAEL: no observable adverse effect level; OVA: ovalbumin; PBL: peripheral blood leukocytes; PBMC: peripheral blood mononuclear cells; PHA: phytohaemagglutinin; PMA: phorbol 12-myristate 13-acetate; PML: polymorphonuclear lymphocyte; RCT: randomized, controlled trial; RNA: ribonucleic acid; SC: subcutaneous; SD rats: Sprague Dawley; TCR: T cell receptor; TLR: toll like receptor; TNF-α: tumor necrosis factor alpha; UC: ulcerative colitis; WT: wild type.
Competing interests
===================
The authors are employees of the Research & Development Department at Mannatech, Incorporated, which sells two of the polysaccharide products (Ambrotose^®^powder and Advanced Ambrotose^®^powder) discussed in this review.
Authors\' contributions
=======================
JER and EDN conducted literature searches and wrote the manuscript. RAS provided technical guidance. All authors read and approved the final manuscript.
Acknowledgements
================
The authors would like to thank Barbara K. Kinsey, Ward Moore and Mrs. Jennifer Aponte for their assistance with the preparation of this manuscript, and Dr. Azita Alavi and Mrs. Christy Duncan for their editorial assistance.
|
|
Long Beach Film Festival - Now Accepting Films & Screenplays
From:
Robin Duarte
Subject:
Long Beach Film Festival - Now Accepting Films & Screenplays
Date:
Fri, 19 Jul 2002 15:13:14 -0800
Filmmakers & Screenwriters (please forward to interested parties):
The Long Beach Film Festival is now accepting screenplays and films (short,
documentary & feature) in all formats. The winners' work will be reviewed by a
committee of established production companies. This is a great way to get
exposure and even discovered in Hollywood.
The festival is being held onboard the renowned Queen Mary in Long Beach,
California (30 miles from Hollywood). The dates of the festival are September
13 - 22, 2002.
You can view an 8 x 10 flyer here:
http://www.longbeachfilmfestival.com/poster.html
A 20% discount has been set up for students and independent filmmakers. The
discounted submission prices are as follows:
ORIGINAL PRICE DISCOUNTED PRICE
Short Film $45 $36
Feature Film $60 $48
Screenplay $50 $40
To take advantage of these discounted prices, simply include a printout of this
email with the submission form and legibly write 'email discount' on the
payment check.
The submission forms can be found here:
http://www.longbeachfilmfestival.com/entry.htm
All submissions must be received by August 15th, 2002.
We look forward to receiving your work.
Robin Duarte
http://www.longbeachfilmfestival.com
|
|
994 A.2d 1040 (2010)
202 N.J. 43
STATE
v.
McCARY.
Supreme Court of New Jersey.
May 19, 2010.
Petition for Certification Denied.
|
|
Metrics and proxies for stringency of regulation of plant water status (iso/anisohydry): a global data set reveals coordination and trade-offs among water transport traits.
Plants operate along a continuum of stringency of regulation of plant water potential from isohydry to anisohydry. However, most metrics and proxies of plant iso/anisohydric behavior have been developed from limited sets of site-specific experiments. Understanding the underlying mechanisms that determine species' operating ranges along this continuum, independent of site and growing conditions, remains challenging. We compiled a global database to assess the global patterns of metrics and proxies of plant iso/anisohydry and then explored some of the underlying functional traits and trade-offs associated with stringency of regulation that determines where species operate along the continuum. Our results showed that arid and semi-arid biomes were associated with greater anisohydry than more mesic biomes, and angiosperms showed marginally greater anisohydry than gymnosperms. Leaf water potential at the turgor loss point (Ψtlp) and wood density were the two most powerful proxies for ranking the degree of plant iso/anisohydry for a wide range of species and biomes. Both of these simple traits can be easily and rapidly determined, and therefore show promise for a priori mapping and understanding of the global distribution pattern of the degree of plant iso/anisohydry. Generally, the most anisohydric species had the most negative values of Ψtlp and highest wood density, greatest resistance to embolism, lowest hydraulic capacitance and lowest leaf-specific hydraulic conductivity of their branches. Wood density in particular appeared to be central to a coordinated series of traits, trade-offs and behaviors along a continuum of iso/anisohydry. Quantification of species' operating ranges along a continuum of iso/anisohydry and identification of associated trade-offs among functional traits may hold promise for mechanistic modeling of species-specific responses to the anticipated more frequent and severe droughts under global climate change scenarios.
|
|
Spain is the EU country where most people live in apartments
Eurostat spends a good amount of money in
producing statistics about almost any activity within the EC and offers very valuable information about the construction industry. This time it has produced an array of figures about where the
European likes to live.
I am not one for statistics I must confess, but as
I have mentioned in many other articles we do obtain a lot of useful information especially for those of us involved in the construction industry.
Spain tops the ranking
According to the latest data from the European
Statistical Office (Eurostat), Spain tops the ranking of countries in the European Union (EU) where the highest percentage of population lives in an apartment: 66.5% of Spaniards live in this
type of building compared to 33.1% it does in a house. The figure is striking especially when compared with other neighbouring countries. In France, for example, the ratio is almost reversed:
seven out of 10 French lives in a house for three out of 10 in apartments.
The difference is even greater if we take the
number of UK, the country with the highest percentage of population living in households: 84.7% versus 14.4% living in a flat (0.9% of those interviewed answered with another category called
"other"). The closest country to Spain with apartments as the most widespread living accommodation is Latvia (65.1%), followed by Lithuania (58.4%) and Greece (56.9%), in that order.
The result of the average of the EU countries also
marks a clear dissimilarity with the Spanish context: six out of 10 Europeans live in a house opposite the remaining four does so in an apartment; more than 2.5 points of difference from the
Spanish proportion.
There are more home owners in Spain than in
other European countries.
Another interesting figure from Eurostat study on
the conditions and characteristics of housing in the EU is about ownership, all data shown here are obtained from 2014.
In this respect, nearly eight out of 10 Spaniards
(78.8%) own the property in which they live, 8.7% more than the European average. For rent they are somewhat below the average: 21.2% versus 29.9% for the European Community.
Why is the apartment so quintessential to the
Spanish people and why are they so prone to this property regime?
The reasons can be explained by three factors: the
historical, economic and sociological.
From inside the castle wall to the apartment
block.
Let’s start from the beginning. We have to roll
back to the turbulent middle ages, when wars determined the pattern of urban settlements. The cities were walled, the ground was very limited and already at that time housing needed to be built
in height. It was also the same in other countries, but in those countries wars did not last centuries as in Spain.
More recently, we had the rural exodus: Farmers
left the countryside and moved on to the city. In Spain this happened not long ago just in the decades of the 60’s to the 80’s. People migrated to cities and property developers sorted the
problem out with a quick construction method: the block of flats.
Today, vertical construction has been widely
accepted because it is greener and more resource-efficient.
Spain is an increasingly empty country where it is
increasingly easy to build horizontally. Still remember that, despite everything, the Spaniards hardly see the good side of an ecological construction and tend to seek the villas from a
prestigious point of view.
Property developers take control.
The role of the economy and the current situation
of crisis arising from the bursting of the housing bubble, are some of the explanations that make almost seven out of 10 Spaniards to live in apartments. There has been a very uneven economy and
there are the selected few who control the sale of development land. The property developer gets more economic benefit from building in height because they can make more profit.
A conservative family orientated
society.
The Spanish idiosyncrasies explain the property
ownership regime being most widespread among the Spanish people on one hand, and developments been built around the block of flats on the other.
The Spaniards are very conservative and fear and
loath financial investments. You only have to read recent news to see what happened to those who tried buying complicated financial products that they didn’t understand.
In general people have always seen the brick as a
solid long term investment, unlike financial products.
They are also conservative in its family
structure. There is less geographical mobility than other countries and historically people have bought a house because they did not anticipate moving for work reasons for a long time.
Having said that, due to the current crisis there
a good percentage of the working population ready to move anywhere, even abroad for a stable job position.
This has impacted directly on the sale of
properties, now the tendency has changed to rent.
However, figures for rental in this country still
far from European countries more oriented in that direction. As shown in Eurostat study, Germany with 52.5%, Austria with 57.2% and Denmark with 63.3%, are the countries where most people opt for
the lease in detriment of an ownership regime.
|
|
Cast metal bases as an economical alternative for the severely resorbed mandible.
Resorption of the alveolar ridge is a common problem in edentulous patients and can compromise the stability and function of dentures. Resorption and its consequences can be minimized when strategically placed implants are used; however, this option is financially out of reach for many patients. The article discusses a more cost-effective alternative (metalbased dentures) for patients with ridge resorption. In certain environments, like a dental school, where patients are looking for solutions to their dental problems at a reasonable price, cast metal bases can be a feasible economical alternative for edentulous patients. Both cases presented here demonstrated a significant improvement in stability, phonation, and mastication.
|
|
Pancreatic trauma: Management and literature review.
Pancreatic injury is an uncommon event often difficult to diagnose at an early stage. After abdominal trauma, the surgeon must always be aware of the possibility of pancreatic trauma due to the complications associated with missed pancreatic injuries. Due to its retroperitoneal position, asociated organs and vascular injuries are almost always present, which along with frequent extra abdominal injuries explain the high morbidity and mortality. The aim of this study is to present a concise description of the incidence of these injuries, lesional mechanisms, recommended diagnostic methods, therapeutic indications including nonoperative management, endoscopy and surgery, and an analysis of pancreas-specific complications and mortality rates in these patients based on a 60-year review of the literature, encompassing 6,364 patients. Due to pancreatic retroperitoneal position, asociated organs and vascular injuries are almost always present, which along with frequent extraaabdominal injuries explain the high morbidity and mortality of these patients.
|
|
[Patterns of Candida esophagitis in cancer and AIDS patients: histopathological study of 23 patients].
Candida oesophagitis is a common concomitant disease in neutropenic cancer patients after chemotherapie as well as in HIV-patients. In order to characterize the features of oesophagitis in each population, we reviewed the medical history and pathology records of 23 patients (18 cancer-patients, 5 HIV-patients) with culture and autopsy-proven Candida oesophagitis. Histopathological patterns of morphology, invasion, angioinvasion and inflammation were evaluated. Virtually all patients, 17/18 cancer- and 5/5 HIV-patients, had a history of previous mucosal candidosis or candidemia. There was a significant difference histopathologically in depth of invasion of the Candida-organisms between cancer and HIV-patients. Only in HIV-patients organisms were observed within the muscularis propria and the adventitia (2/5 vs 0/18; p = 0.04). The frequency of angioinvasion (12/18 vs 3/5) was similar in both groups. Neutropenia (< 500/microliter) was present in 12 (68%) of 18 cancer patients vs 0/5 HIV-patients (p = 0.01). Correspondingly there was a significant higher PMN/MN ratio in the oesophageal inflammatory infiltrate in HIV-patients, reflecting chemotherapy-induced neutropenia in cancer patients (p = 0.02). Oesophageal candidosis in HIV-patients may be highly invasive despite the presence of neutrophils. These findings suggest an impaired inflammatory response of HIV-patients to invasive candidosis, leading to impaired mucosal host defence.
|
|
TMBA 166 (LBP142) – The Hiring Golden Triangle
Happy Valentines Day from the fellas at The Lifestyle Business Podcast. Everybody’s back together this week to bring you some love. Ian has determined Tokyo to be his favorite Asian city and Dan has returned from some business (and pleasure) in the Philippines.
Dan and Ian discuss hiring, when to use interns vs. VA’s vs. professionals and how this can have a profound impact on your business’ growth. They have also been getting an incredible amount of emails, reviews and feedback from everybody so the fellas take some time to answer your most pressing questions, concerns and confessions.
To Hire or Not to Hire…
How you can scientifically determine the best time to hire your first employee.
|
|
Kengo Ota
is a Japanese football player for Grulla Morioka.
Career
After attending Osaka University of Health and Sport Sciences, Ota joined Grulla Morioka in January 2018.
Club statistics
Updated to 30 August 2018.
References
External links
Profile at J. League
Profile at Iwate Grulla Morioka
Category:1995 births
Category:Living people
Category:Osaka University of Health and Sport Sciences alumni
Category:Association football people from Kanagawa Prefecture
Category:Japanese footballers
Category:J3 League players
Category:Iwate Grulla Morioka players
Category:Association football defenders
|
|
Guard youths from alcopops
9:55 AM,
May 8, 2013
Written by
Dylan Goodman
OPINION
Attending any high school means you hear a lot about what everyone is doing - from after-school activities to alcohol. One of the major problems are alcopops - alcoholic drinks marketed toward youths that are easy to mistake as juice, soda or energy drinks because of their packaging and taste. I attend Asheville High School and work with Youth Empowered Solutions, a youth advocacy group that focuses on everything from youth obesity to substance abuse. We've worked before on labeling alcopops with stickers that remind adults not to purchase the products for youths and help distinguish the ...
|
|
In cranes, cargo-handling machinery or construction machinery, such as excavators for example, hydraulic quick couplings are widely used for the purpose of coupling structural components which have to be separated or reset for a specific use of for transport.
The structural components are in most cases connected mechanically by quick-change systems, the coupling of the power transmission lines, especially those with large cross sections, being associated with considerable expenditure in terms of energy and in terms of time.
One object of the present application is to make available a hydraulic quick coupling which on the one hand reduces the expenditure of energy and time and on the other hand avoids contamination of the hydraulic fluid by using individual couplings free from leakage oil.
According to the one embodiment, the object is achieved by a hydraulic quick coupling. The coupling includes two interacting quick-coupling parts which are arranged respectively on the structural components that are to be connected or separated. One quick-coupling part has at least one guide bolt which can engage in a centering bore of the quick-coupling part lying opposite it, each quick-coupling part being provided with coupling plugs or coupling sleeves for the connection of the hydraulic lines, and at least one quick-coupling part being arranged movably on one structural component in order to connect or separate the two quick-coupling parts.
Preferred embodiments are set out in the dependent claims following on from the main claim. Accordingly, one quick-coupling part can preferably be arranged fixedly on one structural component, while the other quick-coupling part is arranged movably on the second structural component.
Particularly advantageously, at least one of the quick-coupling parts is spring-mounted in a support frame. In this way, the coupling can be kept free from forces acting on the structural components. The quick-coupling part spring-mounted in the support frame can, together with said support frame, be mounted movably on the structural component.
At least one lock can be provided via which the quick-coupling parts can be locked to one another in the coupled state. The lock can secure the at least one guide bolt driven into the corresponding at least one centering bore.
The movable quick-coupling part can sit displaceably on a linear guide. As has already been mentioned, the support frame in which the quick-coupling part is spring-mounted can also be guided on this linear guide. The movable quick-coupling part is advantageously displaceable via a piston/cylinder arrangement. To lock the quick-coupling parts in the coupled state, it is also possible for the coupled position to be fixed, for example, by a permanent pressure load of the piston/cylinder arrangement or by suitable shut-off valves.
Advantageously, the movable quick-coupling part spring-mounted in the support frame can be fixed in its opened position by a guide. The guide can comprise a guide means, for example, a guide bolt which engages in the coupling sleeve in the opened position of the quick-coupling part. In this position, the guide means, that is to say for example a guide bolt, permits guiding of the spring-mounted quick-coupling part in such a way that the forces acting on the latter can be taken up. When attaching the quick-coupling part, that is to say when moving it into the closed position, the quick-coupling part moves with its centering bore onto the guide bolt of the other quick-coupling part lying opposite it. In the coupled position, the guide means, that is to say for example the guide bolt, frees the corresponding coupling sleeve. The securing of the quick-coupling part is taken over by the guide bolt of the opposite quick-coupling part.
To provide a possibility of also being able to couple structural components which are angled about their bolted point, at least one of the two quick-coupling parts is arranged on a pivotable support bracket. The support bracket can be pivoted by its own piston/cylinder arrangement. The quick-coupling part arranged on the support bracket can in addition be driven along the support bracket and moved to and fro along the lengthwise guide with another piston/cylinder arrangement. In this way, the quick-coupling parts can also be coupled in an angled position.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.