%% Antes de processar este arquivo LaTeX (LaTeX2e) deve
%% verificar que o arquivo TEMA.cls estah no mesmo
%% diretorio. O arquivo TEMA.cls pode ser obtido do
%% endereco www.sbmac.org.br/tema.

\documentclass{TEMA}

\usepackage[english]{babel}      % para texto em Portugu�s
%\usepackage[english]{babel}    % para texto em Ingl�s

\usepackage[latin1]{inputenc}   % para acentua��o em Portugu�s
%\input{P-margin.inf}

\usepackage[dvips]{graphics}
\usepackage{amssymb,amsmath, amsthm}
\usepackage{subfigure}
\usepackage{graphicx}
\usepackage{epsfig}
\newtheorem{proposition}{Proposition}[section] 
\newtheorem{definition}{Definition}[section]
\newtheorem{remark}{Remark}[section]
\newcommand{\B}{{\tt\symbol{92}}}
\newcommand{\til}{{\tt\symbol{126}}}
\newcommand{\chap}{{\tt\symbol{94}}}
\newcommand{\agud}{{\tt\symbol{13}}}
\newcommand{\crav}{{\tt\symbol{18}}}
\newcommand{\Rb}{\mathbb{R}}
\newcommand{\Nb}{\mathbb{N}}

\begin{document}

%********************************************************
\title
    {On  high order barycentric root-finding methods}

%\author
 %   {M\'ario  M. Gra\c{c}a%
 %    \thanks{mgraca@math.ist.utl.pt}\,,
  %   Departamento de  Matem\'atica, Instituto Superior T\'ecnico,
	%	Universidade de Lisboa, 1049-001 Lisboa, Portugal\\ \\
  %   Pedro M. Lima%
  %   \thanks{plima@math.ist.utl.pt}\,,
  % CEMAT, Instituto Superior T\'ecnico,
	%	Universidade de Lisboa, 1049-001 Lisboa, Portugal.}

\criartitulo

\runningheads {Author 1 and author2}{
On  high order barycentric root-finding methods}
\begin{abstract}
{\bf Abstract}. To approximate a simple root of a real function $f$  we construct a family of  iterative maps, which we call  {\it Newton-barycentric} functions, and analyse their convergence order. The performance of the resulting methods is illustrated by means
of   numerical examples.


{\bf Keywords}.  Order of convergence, Newton's method,  Newton\--bary\-centric map, nonlinear equations.

\end{abstract}
\newsec{Introduction}
\label{introd}
The classical Newton's iterative scheme for approximating the roots of an equation has been generalized by many authors in order to define iterative maps going from cubical to arbitrary orders of convergence (see for instance \cite{traub}, \cite{rheinboldt}, \cite{collatz}, \cite{householder}, \cite{labelle}, \cite{sebah},  \cite{torres}, \cite{weerakoon},  
\cite{Torregrosa}, \cite{ham}, \cite{graca} and references therein).
In particular we mention the third-order method introduced in \cite{weerakoon} and
a family of high-order methods based on quadrature rules, recently described 
in \cite{graca}.

In that work the authors have introduced a recursive algorithm for constructing iterative maps.
The first iterative function  of this family is the classical Newton's method,  which can be seen as the result of applying the rectangles rule to approximate a certain integral. As it s well-known
the Newton's method has in general second order of convergence (when applied to the computation of 
simple roots). In the referred work \cite{graca}, the authors have shown that iterative maps of arbitrarily high
order can be obtained if quadrature rules of higher degree are used (instead of the rectangles rule)
to compute the mentioned integral. The properties of these methods were analysed and their convergence
has been illustrated by several numerical examples.

\medskip
\noindent
In  the present work we also construct a family of high order iterative maps, starting with the Newton's method, but we follow a different approach.
All the  iterative maps $t$  to be considered have a common structure  $t(x)=x- \left[\phi(x)  \right]^{-1} f(x)$, where $f$ is a real function with a simple zero at $z$.  The function  $\phi$  will be called a 
{\it model function}. This model function depends on  $f$ and on another function  $h$ which we name {\it step function}.


\medskip
\noindent
The family of iterative maps to be discussed is constructed by choosing a  certain model function $\phi$. We remark that our approach does not follow the traditional  path for generating iterative maps by direct or inverse hyperosculatory interpolation (see, for instance \cite{traub}) or  Taylor expansions around a zero of $f$ (see for instance \cite{sebah}).
 
 
%%%%%

Given  a real\--valued  function   $f$ defined on an open set $D\subset \Rb$,  we assume that $f$ is sufficiently smooth  in a neighbourhood of a {\it simple zero} $z$ of $f$. In what follows we show how to  construct a family of  iterative maps $t$ generating a sequence $x_{i+1}=t(x_i)$, $i=0,1,\ldots$, converging locally to $z$. 


Maps of higher order of convergence can produce  accurate approximations with a smaller number of iterations. However, in general, they  have expressions of increasing complexity, and so augmenting its computational cost. In this paper we show that at least for some Newton-barycentric
methods  the gain in accuracy compensates the computational cost.

Like other root-finding methods these iterative maps can be easily
extended to the case of multivariate functions. Numerical experiments have
been carried out and the results are promising. The corresponding theory
is still under construction.

The paper is organized as follows. In Section~\ref{modelo} we introduce the notion of a model function $\phi$ and prove that the iterative map $t(x)=x- \left[\phi(x)  \right]^{-1} f(x)$ has a certain order of convergence 
(see Proposition~\ref{prop1}). This is the main idea for the construction of recursive families of
iterative functions.

In  Section 3  we describe in detail one of these families, which we call  the Newton-barycentric iterative maps. 
We begin by specifying the choice of the model function in the case of this family  (subsection 3.1).
Then we describe the algorithm for obtaining iterative maps, based on this model function
and we give the formulae of some of these maps (subsection 3.2).
In Section 4 we analyse  the application of some Newton-barycentric formulas to some numerical examples.
We finish in Section 5 with some conclusions.

\newsec{Recursive Families of Iterative Maps}\label{modelo}

\medskip

%Let us remark that the purpose of the class of methods we introduce is not to improve the efficiency, in the sense of minimizing the number of operations needed to obtain
% a certain accuracy. As we shall see, the Newton-barycentric methods are not optimal, in the sense of the Kung and Traub's conjecture, see \cite{KungTraub}.
%However their advantage comparing with Newton's method is that they frequently have a larger "attraction basin", that is, given a certain root $z$ there is a larger neighborhood
% $V(z)$ such that if $x_0 \in V(z)$ then the error of the first iterate of the Newton-barycentric
% method is sufficiently close to zero. In order to analyse this property, given a certain
% iterative map $t_k$, we introduce
% the concept of $\epsilon$-set $B_k(\epsilon,z)$: $x$ belongs to $B_k(\epsilon,z)$
% if and only if $|t_k(x)-z|<\epsilon$. Comparing different types of iterative functions, it turns out that in certain cases the Newton-barycentric methods have larger $\epsilon$-sets
% than other iterative methods of higher convergence order with similar complexity.

In  this section we describe a procedure for constructing recursive families 
of higher order iterative methods. 
 %%%%%%%%%%%%%%%%%%%%%%%
\begin{proposition}\label{prop1}
Let $z$ be a simple zero of a function  $f: D\subset \Rb\mapsto \Rb$ and $\phi$ a sufficiently smooth function in  a neighbourhood of 
$z$, such that its derivatives $\phi^{(i)}$
satisfy the  $j+1$ equalities
\begin{equation}\label{eq2}
\begin{array}{l}
 \phi^{(i)}(z)=\displaystyle{\frac{ f^{(i+1)}(z)}{i+1}},\qquad i=0,1,\ldots, j,
\end{array}
\end{equation}
for $j\geq 0$ a fixed integer.
Then, for any initial value $x_0$ sufficiently close to $z$, the iterative process $x_{k+1}=t(x_k)$, $k=0,1,\ldots$,  with
\begin{equation}\label{eq1}
t(x)=x-\phi^{-1} (x)\, f(x),
\end{equation}
 converges to  $z$  and its order of convergence  is at least $j+2$.
\end{proposition}

\begin{proof} 
From \ref{eq1} it is obvious that the zero $z$ of $f$ is a fixed point of the map $t$ (that is, $t(z)=z$). Let us consider the function $\,\Delta t$ defined  by
% \begin{equation}\label{eq3}
$$ 
\Delta t(x)= t(x)-x.
$$
%\end{equation}
Note that  its derivatives are
$$\Delta^{(1)} t(x)= t^{(1)}(x)-1,\quad \mbox{and}\quad \Delta^{(i)} t(x)= t^{(i)}(x), \quad \mbox{for}\quad  i\geq 2.$$
We now use induction on $j$ to prove that the hypotheses in \ref{eq2} imply that  $\Delta t(z)=0$, $\Delta^{(1)} t(z)= -1$, $\Delta^{(j)} t(z)= 0$, for $j\geq 2$, and consequently  $t$ has the referred order of convergence.

\medskip

Let $j=0$. Rewriting   \ref{eq1} as
\begin{equation}\label{eq4}
\phi(x)\, \Delta t(x)= - f(x),
\end{equation}
and applying the derivative operator to this equation, we have
\begin{equation}\label{eq5}
\phi^{(1)} (x)\, \Delta t (x)+ \phi(x)\, \Delta ^{(1)}t(x)= - f^{(1)} (x).
\end{equation}
Since  $\Delta t (z)=0$, and $\phi$ satisfies  \ref{eq2} with $i=0$,   it follows that
$
f^{(1)} (z)\, \Delta^{(1)} (z)=-f^{(1)} (z).
$
As   $z$ is a simple zero for $f$, then 
$$
\Delta^{(1)} t (z)=-1\,\Longleftrightarrow\, t^{(1)}(z)=0,
$$
which means that the iterative process generated by   $t$ has local order of convergence $p$ at least $2$. That is, $p\geq j+2$.

\medskip

Let $j=1$. Differentiating  \ref{eq5}, we get
$$
\phi^{(2)} (x)\, \Delta t (x)+2\,  \phi^{(1)}(x)\, \Delta ^{(1)}t(x)+\phi(x)\, \Delta^{(2)} t(x)= - f^{(2)} (x).
$$
  Since   $\Delta t(z)=0$ and $\Delta^{(1)} t(z)=-1$,   we obtain 
$$
-2\, \displaystyle{ \frac{f^{(2)}(z)}{2} } + f^{(1)} (z)\, \Delta^{(2)} t (z)= - f^{(2)} (z).
$$
Therefore $\Delta^{(2)} t (z)=t^{(2)}(z)=0$, and so the iterative process has local order of convergence at least $3=j+2$.

\medskip

For an integer $m\geq 2$, assume that
$$
\phi^{(j)} (z)=\displaystyle{\frac{f^{(j+1)} (z)}{j+1}   }, \quad \mbox{for}\quad j=0,1,\ldots,m,
$$
and
\begin{equation}\label{num1}
\Delta^{(1)} t(z)=-1, \quad  \Delta^{(j)} t(z)=0,  \quad \mbox{for}\quad j=2,3,\ldots,m.
\end{equation}
Let us show that $ \Delta^{(m+1)} t(z)=t^{(m)}(z)=0$.
From \ref{eq4}  and the Leibniz's rule for the derivatives  of the product, we have
$$
\begin{array}{ll}
& \phi^{(m+1)} (x)\, \Delta t(x)+\binom{m+1}{1}  \phi^{(m)} (x)\, \Delta^{(1)} t(x)+\cdots+ \binom{m+1}{m}  \phi^{(1)} (x)\, \Delta^{(m)} t(x)+\\
&\hspace{2cm}+  \phi^{(0)} (x)\, \Delta^{(m+1)} t(x)= - f^{(m+1)}(x).
\end{array}
$$
Thus, by the induction hypotheses, we obtain
$$
-\binom{m+1}{1} \,\displaystyle{\frac{ f^{(m+1)} (z)}{m+1}}+ f^{(1)}(z)\, \Delta^{(m+1)} t (z)= - f^{(m+1)} (z) \, \Leftrightarrow \Delta^{(m+1)} t (z)=0.
$$
Hence the iterative map $t_{m+1}$ has local order of convergence $p\geq m+2$ and the proof is complete.
\end{proof}
\begin{remark}\label{remnovo}
The well-known result on the local order of convergence of the  Newton's  map $t(x)=x-\left[ f^{(1)}(x)  \right]^{-1} f(x)$ follows immediately from  Proposition \ref{prop1}. It is enough to see that \ref{eq2} is verified for $j=0$, i.e. $\phi^{(0)}(z)=f^{(1)}(z)$, and so $t$ has local order of convergence at least 2.
\end{remark}


With the aim of analysing in detail the process of creating iterative maps, proposed in
Proposition 2.1, we will now introduce the definitions of  model function
and  step function.


\begin{definition}\label{defstand}
Let $z$ be a simple zero of a function  $f: D\subset \Rb\mapsto \Rb$,  $h$ and $\phi$ sufficiently smooth functions in a neighbourhood of $z$, and $j\geq 0$ a fixed  integer.
\begin{itemize}
\item A function $\phi$ is called a model function of degree j, at $x=z$, if it satisfies the $j+1$ conditions \ref{eq2}, but does not satisfy
a similar condition for $i=j+1$.
\item A function $h$  is called a  step function  of degree j at $x=z$ (or simply a step function) if it satisfies the following $j+1$ equalities: 
 \begin{equation}\label{hs}
\begin{array}{l}
h(z)=0,\quad 
h^{(1)} (z)=-1\quad \mbox{and}\quad
h^{(i)} (z)=0, \quad \mbox{for}\quad i=2,3,\ldots, j,
\end{array}
\end{equation}
but does not satisfy the condition corresponding to $i=j+1$.
\end{itemize}
\end{definition}


{ \bf Example 2.1} Under the conditions of definition 2.1, set
\begin{equation} \label{phi}
\phi(x) \equiv f'(x).
\end{equation}
Then we have $\phi'(z)=f'(z)$ but $\phi''(z)=f''(z) \ne f''(z)/2$.
Therefore, $\phi$ defined by \ref{phi} is a model function of degree 0.


In the same way, let
\begin{equation} \label{h}
h(x) \equiv \phi^{-1}(x) f(x)= \left(f^{(1)}\right)^{-1}(x) f(x).
\end{equation}
In this case, $h(z)= f(z)/f'(z)=0$, but $h'(z)=1 - f(z)f''(z)/(f'(z))^2 =1 \ne -1$.
Hence $h$ defined by \ref{h} is a step function of degree 0.

\medskip

The above defined model function $\phi$ and step function $h$ can be used
to define the map $t(x)=x-\left[ f^{(1)}(x)  \right]^{-1} f(x)$, which
coincides with the Newton's method (see remark \ref{remnovo}).
    %===================================
%============================================================================
\newsec{The  Newton-barycentric  Maps}\label{subbar}
 We now consider a family of iterative maps, based on the model function 
$\phi_k$, defined
  as follows: $\phi_k$ is the inner product  of a constant vectorial function  $U_k$  and a  function  $V_k$ depending only on the first derivative $f^{(1)}$  evaluated at $x+ i\, h(x)$, for $i=0,\ldots, k$. Namely, we take
\begin{equation}\label{lc0}
U_k(x) = \left( a_0,a_1,a_2,\cdots, a_k  \right) =\mathbf{a},
\end{equation}
where the choice of ${\bf a}$ will be discussed below, and
\begin{equation}\label{lc1}
V_k(x)=\left( f^{(1)}(x),f^{(1)}(x+h(x)),\cdots, f^{(1)}(x+k\, h(x))  \right),
\end{equation}
where $h$ is a step function of degree $k$. 
If one proves that $\phi_k=\langle\mathbf{a}, V_k(x)\rangle$ is a model function
 of degree $k$ then, by Proposition~\ref{prop1},  the respective process $t_k(x)= x-\left[\phi_k(x)\right]^{-1} f(x)$ has  order of convergence at least $k+2$. 


\subsection{Choice of the model function}


\medskip

The next proposition shows that $\phi_k$ is a model function of degree $k$ if and only if $U_k(x)=\mathbf{a}$ is the unique  solution  of a non homogeneous linear system. Moreover,  this solution represents {\em the barycentric coordinates of   $\phi_k$ in a basis defined by the components of $V_k$}. The name {\em Newton-barycentric maps} reflects this
property, as well as the fact that the recursive process starts
with the Newton method.


\begin{proposition}\label{lemaC}
Let $f$  be a function satisfying the hypotheses of Proposition~\ref{prop1},  $h$ a  step function of degree $k$, $k\geq 0$ a fixed integer and
 $\phi_k=\langle U_k, V_k\rangle$, 
  with $U_k$ and $V_k$ defined by \ref{lc0} and \ref{lc1}. That is, 
  \begin{equation}\label{lc4}
\phi_k(x)=a_0 f^{(1)} (x)+ a_1 f^{(1)}( x+ h(x)) +\cdots + a_k f^{(1)}( x+k\, h(x)).
\end{equation}
Then, the derivative of order $k$  of $V_k$, evaluated at $x=z$,  is $V^{(k)}_k(z)=D_k \, R_k$,
%\begin{equation}\label{lc3}
%$
%\begin{array}{l}
%V^{(k)}_k(z)= M_k=D_k \, R_k,\\ 
%\end{array}
%$
%\end{equation}
where $D_k$ and $R_k$ are the following $(k+1)\times (k+1)$ matrices 
$$D_k=\operatorname{diag} \left( f^{(1)}(z), f^{(2)}(z),\cdots, f^{(k+1)}(z)\right),
$$
and
\begin{equation}\label{lc3a}
R_k=\begin{bmatrix}
1&1&1&1&\ldots&1\\
1&0&-1&-2&\ldots&-(k-1)\\
1&0&1&2^2&\ldots&(k-1)^2\\
\vdots&\vdots&\vdots&\vdots&\vdots&\vdots\\
1&0&(-1)^k &(-1)^k 2^k&\ldots&(-1)^k (k-1)^k\\
\end{bmatrix}.
\end{equation}
Furthermore,
\begin{itemize}
\item[(i)] The function $\phi_k$ is a model function of degree $k$
if and only if $$U_k=\mathbf{a}=\left( a_0,a_1,a_2,\cdots, a_k  \right) $$ is the (unique) solution of the  linear system
\begin{equation}\label{lc5}
R_k\, \mathbf{a} =\mathbf{b},\quad \text{with}\quad \mathbf{b}=\left(1,1/2,1/3,\cdots,1/(k+1)\right).
\end{equation}
In particular, this solution satisfies the equality 
\begin{equation}\label{lc8}
\sum_{i=0}^k a_i=1.
\end{equation}
\item[(ii)] If $f^{(i)}(z)\neq 0$ for $i=1, \ldots, k$, then the function $U_k=\mathbf{a}$  represents   the (normalized) barycentric coordinates of the model function  $\phi_k$ relative to the
basis,
%\begin{equation}\label{lc9}
$$
{\cal V}_k=\left\{  f^{(1)} (x),  f^{(1)}( x+ h(x)),\cdots,  f^{(1)}( x+k\, h(x))  \right\}.
$$
%\end{equation}
\end{itemize}
Moreover,  the iterative process generated by $t_k(x)= x-\left[\phi_k(x)\right]^{-1} f(x)$ has   order of convergence at least $k+2$.
\end{proposition}

\begin{proof}
For $i=0,1,\ldots,k$ the  derivatives of order $i$ of  $V_k$,  evaluated at  $x=z$, are:
\begin{align*}\nonumber
V_k^{(0)}& = f^{(1)} (z) \left(1,1,1,1,1,\cdots,1     \right)\\\nonumber
V_k^{(1)}&= f^{(2)} (z) \left(1,0,-1,-2,-3,\cdots,-(k-1)     \right)\\ \nonumber
V_k^{(2)}&= f^{(3)} (z) \left(1,0,1,2^2,3^2,\cdots,(k-1)^2     \right)\\\nonumber
\vdots \\
%\label{lc2}
V_k^{(k)}&= f^{(k+1)} (z) \left(1,0,(-1)^k, (-1)^k 2^k, (-1)^k 3^k, \cdots, (-1)^k (k-1)^k     \right).
\end{align*}
So, the equalities \ref{lc5} hold.

\medskip

For (i), since $\phi_k=\langle \mathbf{a}, V_k(x)\rangle$,   it is straightforward to verify that the conditions \ref{eq2} for $\phi_k$ to be a model function are equivalent to the system $R_k\,\mathbf{a} =\mathbf{b}$. So, $U_k=\mathbf{a}$ must be a solution of this system. As $R_k$ is nonsingular, this is the unique solution. Furthermore, since $z$ is a simple zero of $f$, the equality  \ref{lc8} holds because it is just the first equation of the system $R_k\,\mathbf{a} =\mathbf{b}$.

\medskip
For (ii), we need to show that for $\mathbf{\alpha}=(\alpha_0, \alpha_1, \ldots, \alpha_k)\in \Rb^{k+1}$, such that
\begin{equation}\label{above}
\alpha_0 f^{(1)}(x)+ \alpha_1 f^{(1)}(x+h(x))+ \ldots+  \alpha_k f^{(k)}(x+k\, h(x))=\mathbf{0},
\end{equation}
the only solution is $ \mathbf{\alpha}=\mathbf{0}$. Differentiating  \ref{above} and evaluating  at $x=z$, we obtain the homogeneous linear system
$$
\operatorname{diag}\left(f^{(1)} (z), f^{(2)} (z), \cdots, f^{(k+1)} (z)\right) R_k\, \mathbf{\alpha}= \mathbf{0},
$$
which admits only the   solution $\mathbf{\alpha}=\mathbf{0}$ since  both the diagonal matrix and $R_k$ are nonsingular.

\medskip

The last assertion follows from Proposition~\ref{prop1} since by item (i) $\phi_k$ is a model function of degree $k$.
\end{proof}

 

The expressions for the first five barycentric  maps are shown in Table \ref{tab2}.
    \begin{table}
$$
%\hspace{-1.9cm}
\small
\begin{array}{ |   l  | }
\hline
t_1(x)= x- \displaystyle{ \frac{2\, f(x)}{f^{(1)}(x)+ f^{(1)}(x+h_1(x))}   }\\
t_2(x)=  x- \displaystyle{ \frac{12\, f(x)}{5\, f^{(1)}(x)+8  f^{(1)}(x+h_2(x))-  f^{(1)}(x+2\,h_2(x))}   }\\
t_3(x)= x- \displaystyle{ \frac{24\, f(x)}{9\, f^{(1)}(x)+19  f^{(1)}(x+ h_3(x))-5\,  f^{(1)}(x+ 2\,h_3(x))+ f^{(1)}(x+3\, h_3(x))}   }\\
t_4(x)= x- \displaystyle{ \frac{720\, f(x)}{251\, f^{(1)}(x)+646  f^{(1)}(x+h_4(x))-264\,  f^{(1)}(x+2\, h_4(x))+106\, f^{(1)}(x+3 h_4(x)-19  f^{(1)}(x+4 h_4(x))}   }\\
t_5(x)= x- \displaystyle{ \frac{1440\, f(x)}{\sum_{i=0}^{5} \alpha_i\, f^{(1)}(x+i\, h_5(x))}   }, \quad \mbox{with}\quad \alpha_0=475, \, \alpha_1=1427,\, \alpha_2=-798\\
\hspace{6cm}\begin{array}{l}
\alpha_3=482, \, \alpha_4=-173,\, \alpha_5=27\\
\end{array}\\
\hline
  \end{array}
  $$
  \caption{ First five barycentric type maps. \label{tab2}}
  \end{table} 
  
\subsection{Construction of the recursive family of iterative maps}\label{secrec}


We recall that a model function $\phi$ depends on a  certain step function $h$. Now, for each model function $\phi_j$ included in the definition of the map $t_j=x-\left[ \phi_j(x) \right]^{-1} f(x)$, we use a step function which is defined recursively by $h_j(x)=t_{j-1}(x)-x$.  The  starter $t_0$ will be taken to be the Newton's map $t_0(x)=x-[f^{(1)}(x)]^{-1} f(x)$.
 The next proposition shows that the  iterative map $t_m$, defined recursively in \ref{step1A},  has local order of convergence at least $m+2$.
\begin{proposition}\label{propD} Let $z$ be a simple zero of a given  function $f$ and   $t_0 $ the Newton's  map
$$t_0(x)=x-[f^{(1)}(x)]^{-1} f(x).$$
\medskip

For a given natural number $m\geq 1$, define recursively the step function $h_m$ and the iterative map $t_m$ by
\begin{equation}\label{step1A}
\begin{array}{l}
h_j(x)= t_{j-1}(x)-x\\
\hspace{5cm}\quad j=1,2,\cdots,m\\
t_j(x)= x- \left[ \phi_j(x)   \right]^{-1}\, f(x),
\end{array}
\end{equation}
where $\phi_j $ is  constructed using $h_j$ as step function   and $\phi_j$ is  a barycentric\--type map, given by  \ref{lc4}-\ref{lc3a}.  Then, the   map $t_m$ has local order  of convergence  at least $m+2$.
\end{proposition}

\begin{proof} It is only necessary to prove that each function $h_j$ is a step function of degree $j$ and the statement follows from Propositions  \ref{lemaC}. 

\vspace{2mm}

Let us apply   induction on the integer $m$.  
For $m=1$, we  have $h_1(x)=t_0(x)-x$ and so $h_1(z)=0$ and $h_1^{(1)} (z)=-1$.
\vspace{2mm}

Let $m\geq 1$ be an integer. As $h_m^{(0)}(z)=0$, $h_m^{(1)}(z)=-1$ and for any integer $i$ such that $2\leq i \leq m$, we have $h_m^{(i)}(z)=0$, and so $h_m$ is a  step function of degree $m$. 
\end{proof}

\medskip
The{\it Newton-barycentric} maps $t_k$ of arbitrary  degree $k$ are thus completely defined by Proposition~\ref{propD}.
Let us remark that the same procedure to construct recursive families
of iterative methods can be applied using other types of model
and step functions.
Moreover the idea of this method is easily extendable
to the case of multivariate functions. Multidimensional analogs of the Newton-barycentric
maps have  been implemented and applied to the solution of systems of 
nonlinear equations and  the numerical results obtained
so far are promising. 
%==============================================

   
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \newsec{Numerical examples}\label{secexemp}
Besides the theoretical interest of this new family of iterative functions, some of
the above described methods 
 are of practical interest. Though 
they are not optimal in the sense of the Kung and Traub's conjecture \cite{KungTraub},
they are more efficient than the Newton's method, in the sense that with
the same number of function evaluations a more accurate result can be produced.
Concerning for example the Newton's method (iterative function $t_0$)
and the method with the iterative function $t_1$, we know  that the first
one requires 2 function evaluations at each iteration, while the second 
one requires 3. This means that two iterations of the second method
require as many function evalutions (6) as three iterations of the first one.
However the result produced by two iterations of the second method
is in general much more accurate than the one produced by 3 iterations
of the Newton's method. This happens, because the second method has at 
least convergence order three, and it follows that $t_1\circ t_1$ (the composition
of $t_1$ with itself) has at least convergence order 9; on the other hand,
since the Newton method has in general convergence order 2, the method
$t_0 \circ t_0 \circ t_0$ has just convergence order 8.   
Concerning the method $t_2$  (fourth order of convergence) each iteration requires 5 function evaluations;
however as we shall see in the examples below, one single iteration
of this method is often sufficient to obtain a result with an error
less than  $10^{-10}$ (which can only be obtained with three iterates of the 
Newton's method).

\medskip
Let us present some numerical results that illustrate the above properties. In all the cases we have applied the methods with iterative functions
$t_0$, $t_1$ and $t_2$, starting with a certain initial approximation $x_0$
and performing enough iterations to obtain an approximation
with absolute error less than $10^{-10}$. 
Then we compare the error of the last approximation, the number of iterations
and the number of function evaluations by each method. 
Moreover we have obtained computational estimates of the convergence order, which 
confirm the theoretical predictions. All the computations were 
carried out in a personal computer using {\em Mathematica} \cite{wolfram}.
The results are displayed
in Tables 2,3,4 and
they illustrate the advantage of using the methods with the iterative
functions $t_1$ and $t_2$ for approximating  the roots of real functions.
%%%% table 2
\begin{table}
\begin{displaymath}
\begin{array} {|c|c|c|c|}
\hline
\makebox{Iterative function} & \makebox{Error} &\makebox{ n. of. iter.} &\makebox{n. of f. eval.}\\
\hline
t_0 &  2.13 \times 10 ^{-11} &  3  & 6 \\
t_1 & -4.54 \times 10^{-17} &  2   & 6\\
t_2 & -4.54 \times 10^{-11} &  1   & 5 \\
\hline
\end{array}
\end{displaymath}
\caption{ $f(x)=  x ^3 + 4x^2 -10$, the initial approximation is $x_0=1.$, the exact
solution is $z=1.3652300134141$ (see this  example in \cite{weerakoon}). }
\end{table}
%%%%%%%%%%%%%%%%%%%%%  table 3
\begin{table}
\begin{displaymath}
\begin{array} {|c|c|c|c|}
\hline
\makebox{Iterative function} & \makebox{Error} &\makebox{ n. of. iter.} &\makebox{n. of f. eval.}\\
\hline
t_0 &  1.03 \times 10 ^{-11} &  3  & 6 \\
t_1 & 3.8 \times 10^{-23} &  2   & 6\\
t_2 & -3.3 \times 10^{-16} &  1   & 5 \\
\hline
\end{array}
\end{displaymath}
\caption{ $f(x)=  \cos(x)-x $, the initial approximation is $x_0=0.1$, the exact
solution is $z=0.739085133215$.}
\end{table}
%%%%%%%%%%%%%%%%%%%%%  table 4
\begin{table}
\begin{displaymath}
\begin{array} {|c|c|c|c|}
\hline
\makebox{Iterative function} & \makebox{Error} &\makebox{ n. of. iter.} &\makebox{n. of f. eval.}\\
\hline
t_0 &  2.3\times 10 ^{-13} &  4  & 8 \\
t_1 & 1.8\times 10^{-13} &  3   & 9\\
t_2 & 4.8 \times 10^{-19} &  2   & 10 \\
\hline
\end{array}
\end{displaymath}
\caption{ $f(x)=  \tanh(x-1) $, the initial approximation is $x_0=0.$, the exact
solution is $z=1$.}
\end{table}



\section{Conclusions}  
    In this work we have introduced a family of high order iterative methods
    for the numerical solution  of nonlinear equations.
    This family starts with the Newton's method as the basis of the recurrence process.
    Then each member of the family is build by a well defined procedure, forming
    a sequence of increasing convergence order.
		
		The numerical examples presented in Sec. 4 demonstrate that the first
		iterative functions of these family (in particular $t_1$ and $t_2$)
		offer a good alternative to the Newton's method, taking account their
		accuracy and number of function evaluations. We remark that these these
		methods can be easily combined with algorithms for the separation
		of real roots, such as recently described in \cite{graca2}, providing an effective tool for detection and computation of real roots.
		
\bigskip		
{\bf Resumo.} Com o fim de aproximar uma raiz simples de uma fun\c{c}\~ao real 
$f$, constroi-se
uma fam\'ilia de aplica\c{c}\~oes iteradoras, que se designam fun\c{c}\~oes Newton-baric\^entricas, e analisa-se
a sua ordem de converg\^encia. O desempenho dos m\'etodos computacionais resultantes \'e ilustrado
atrav\'es de exemplos num\'ericos.
		
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{thebibliography}{99}
\bibitem{collatz} {L. Collatz},  Functional Analysis and Numerical Mathematics, {\it Academic Press}, New York, 1966.

\bibitem{Torregrosa} {A.~Cordero and J.~Torregrosa}, Variants of Newton's method using fifth-order
quadrature formulas {\it Appl. Math. Comput.}, 190 (2007) 686-698.



\bibitem{graca}M. M. Gra\c{c}a and P. M. Lima,  Root finding by high order iterative methods based on quadratures,
 {\it Appl. Math. Comput.} 268 (2015) 466-482.

\bibitem{graca2}M. M. Gra\c{c}a, Maps for global separation of roots,
{\it Electronic Transactions on Numerical Analysis}
  45 (2016) 241-256.
 
  \bibitem{ham} {Y.~Ham, C.~Chun and S.--G. Lee},   Some higher\--order modifications of Newton's method for solving nonlinear equations, {\it J. Comp. Appl. Math.} 222 (2008) 477\--486.

\bibitem{householder} {A.~S. ~Householder}, The Numerical Treatment of a Single Nonlinear Equation. {\it McGraw\--Hill}, New York, 1970.

\bibitem{KungTraub} H.T. ~Kung and J.F.~Traub, Optimal order of one-point and multipoint
iteration, {\it J. Assot. Comput. Math. }  21   (1974) 634--651.

\bibitem{labelle} {G.~Labelle},   On extensions of the Newton\--Raphson iterative scheme to arbitrary orders, {\it  Disc. Math Th. Comput Sc. (DMTCS)}, proc. AN, 845\--856, Nancy, France,  2010.


\bibitem{rheinboldt} {W.~C.~Rheinboldt},   Methods for Solving Systems of Nonlinear Equations.  2nd Ed.,  
{\it SIAM}, Philadelphia, 1998.
 
\bibitem{sebah} {P.~Sebah and X.~Gourdon},  {\it Newton's method and high order iterations}, 2001. Available from: http://numbers.
computation.free.fr/Constants/constants.html.

\bibitem{torres} {G.~Fernandez\--Torres},  Derivative free iterative methods with memory of arbitrary high convergence order, {\it  Numer. Alg.} 67 (2014) 565--580.


\bibitem{traub} {J.~F.~Traub},   Iterative Methods for the Solution of Equations.  {\it Prentice\--Hall}, Englewood Cliffs, 1964.

\bibitem{weerakoon} {S.~Weerakoon, T.~G.~I.~Fernando}, A variant of Newton's method with accelerated third\--order convergence, {\it  App. Math. Lett.} 13 (2000) 87\--93.



 \bibitem{wolfram} S. Wolfram, \textit{The Mathematica Book}. 
{\it Wolfram Media,  fifth ed.},2003.
 \end{thebibliography}


\end{document}
% end of file template.tex



