Skip to content

Commit 78b4403

Browse files
committed
Finish basic Adams method
1 parent 2101757 commit 78b4403

File tree

1 file changed

+113
-6
lines changed

1 file changed

+113
-6
lines changed

nordsieck/nordsieck.tex

Lines changed: 113 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,14 +85,14 @@ \section{The General Form of Nordsieck Methods}
8585
\end{equation}
8686
While one can construct the Pascal matrix explicitly
8787
\begin{lstlisting}
88-
P(q) = [i>=j ? binomial(i,j) : 0 for i in 1:q+1, j in 1:q+1]
88+
P(q) = [i>=j ? binomial(i,j) : 0 for i in 1:q+1, j in 1:q+1]
8989
\end{lstlisting}
9090
and perform matrix-matrix multiply to get $z_n$, a much efficient method is to
9191
write a \texttt{for} loop.
9292
\begin{lstlisting}
93-
for i in 1:q, j in q:-1:i
94-
z[j] = z[j] + z[j+1]
95-
end
93+
for i in 1:q, j in q:-1:i
94+
z[j] = z[j] + z[j+1]
95+
end
9696
\end{lstlisting}
9797

9898
Now we have a formulation for the predictor that is based on Taylor expansion.
@@ -112,16 +112,123 @@ \subsubsection{Adams Methods} \label{subsec:adams}
112112
$i=n-q+1,\cdots,n$.
113113
Explicit Adams methods can be thought as a polynomial interpolation that
114114
fulfills
115-
\begin{align}
115+
\begin{align} \label{eq:adams1}
116116
\pi_{n-1}'(t_{n-i})&=F_{n-i}\qq{where} i=1,2,\cdots,q \\
117117
\pi_{n-1}(t_{n-1})&=u_{n-1},
118118
\end{align}
119119
while the implicit Adams methods are the same with polynomials that fulfills
120-
\begin{align}
120+
\begin{align} \label{eq:adams2}
121121
\pi_{n}(t_{n-i})'&=F_{n-i}\qq{where} i=0,1,\cdots,q-1 \\
122122
\pi_{n}(t_{n-1})&=u_{n-1} \\
123123
\pi_{n}(t_{n})&=u_{n}.
124124
\end{align}
125+
Here, we define that $y'_n$ is $F_n$. Note that those conditions are equivalent
126+
with the classical linear multi-step form
127+
\begin{equation}
128+
u_n = u_{n-1} h_n\sum_{i=0}^{q-1}\beta_{ni}y'_{n-i}.
129+
\end{equation}
130+
Here the coefficients $\beta_{ni}$ dependents on $h_j$ and order $q$. Now,
131+
define $\hat{y}_n = \pi_{n-1}(t_n)$ and $\hat{y}'_n = \pi'_{n-1}(t_n)$. We have
132+
\begin{equation} \label{eq:nl_adams}
133+
hy'_n = hF_n = h\hat{y}'_n + \Delta_n l_1,
134+
\end{equation}
135+
where $\Delta_n = \pi_n - \pi_{n-1}, h=h_n$ and $l_1 =
136+
\beta_{n0}^{-1}$. The $\Delta$ vector in \cref{eq:nl_adams} can be solved by
137+
function iteration.
138+
\begin{lstlisting}
139+
# Zero out the difference vector
140+
Delta .= zero(eltype(Delta))
141+
# `k' is a counter for convergence test
142+
k = 0
143+
# Start the functional iteration & store the difference into `Delta'
144+
while true
145+
@. ratetmp = inv(l[2])*muladd(dt, ratetmp, -z[2])
146+
@. integrator.u = ratetmp + z[1]
147+
@. Delta = ratetmp - Delta
148+
del = norm(cache.Delta)
149+
# Convergence test
150+
test_rate <= one(test_rate) && return true
151+
# Divergence criteria
152+
(k == max_iter) || (k >= 2 && del > div_rate * del_prev) && return false
153+
del_prev = del
154+
integrator.f(ratetmp, integrator.u, p, dt+t)
155+
end
156+
\end{lstlisting}
157+
158+
From \cref{eq:adams1} and \cref{eq:adams2}, we see that
159+
\begin{equation}
160+
\Delta_n(t_n) = u_n - \hat{u}_n \equiv e_n,\; \Delta_n(t_{n-1}) = 0,\;
161+
\Delta'_n(t_{n-1}) = 0, \qq{where} i=1,2,\cdots,q-1.
162+
\end{equation}
163+
Thus we have
164+
\begin{equation}
165+
\Delta_n(t) = \Delta_n(t_n+hx) = \Lambda_n(x)e_n, \quad x = (t-t_n)/h,
166+
\end{equation}
167+
where $\Lambda_n$ is a scalar polynomial that fulfills the conditions
168+
\begin{equation}
169+
\Lambda_n(0)=1,\; \Lambda_n(-1) = 0,\; \Lambda'_n(\Xi_i) = 0,\qq{where}
170+
i=1,2,\cdots,q-1,\; \Xi_i \equiv (t_n-t_{n-i})/h.
171+
\end{equation}
172+
Hence, we can write $\Lambda_n$ as
173+
\begin{equation}
174+
\Lambda_n(x) = \frac{\int_{-1}^x\prod_{i=1}^{q-1} (u+\Xi_i) \dd{u}}
175+
{\underbrace{\int_{-1}^0\prod_{i=1}^{q-1} (u+\Xi_i)
176+
\dd{u}}_\text{constant}}
177+
= \sum_{j=1}^{q+1} l_jx^j.
178+
\end{equation}
179+
We can compute the vector $l_j$ by calculating the coefficients of
180+
$\prod_{i=1}^{q-1} (u+\Xi_i)$ and integrate it with some normalization. Here is
181+
an implementation which uses Vieta's formulas,
182+
\begin{equation}
183+
\sum_{1\leq i_{1}<i_{2}<\cdots <i_{k}\leq n}x_{i_{1}}x_{i_{2}}\cdots
184+
x_{i_{k}}=(-1)^{k}l_{n-k},
185+
\end{equation}
186+
where $x_j$ is the $j$-th roots of a polynomial.
187+
\begin{lstlisting}
188+
# This function computes the integral, from -1 to 0, of a polynomial
189+
# `P(x)` from the coefficients of `P` with an offset `k`.
190+
function int01dx(a, deg, k)
191+
@inbounds begin
192+
int = zero(eltype(a))
193+
sign = one(eltype(a))
194+
for i in 1:deg
195+
int += sign * a[i]/(i+k)
196+
sign = -sign
197+
end
198+
return int
199+
end
200+
end
201+
202+
function calc_coeff!(cache::T) where T
203+
@unpack m, l, tau = cache
204+
ZERO, ONE = zero(m[1]), one(m[1])
205+
dtsum = dt = tau[1]
206+
order = cache.step
207+
m[1] = ONE
208+
for i in 2:order+1
209+
m[i] = ZERO
210+
end
211+
# initialize Xi_inv
212+
Xi_inv = dt / dtsum
213+
# compute coefficients from the Newton polynomial
214+
for j in 1:order-1
215+
Xi_inv = dt / dtsum
216+
for i in j:-1:1
217+
m[i+1] += m[i] * Xi_inv
218+
end
219+
dtsum += tau[j+1]
220+
end
221+
222+
M0 = int01dx(m, order, 0)
223+
M1 = int01dx(m, order, 1)
224+
M0_inv = inv(M0)
225+
l[1] = ONE
226+
for i in 1:order
227+
l[i+1] = M0_inv * m[i] / i
228+
end
229+
cache.tq = M1 * M0_inv * Xi_inv
230+
end
231+
\end{lstlisting}
125232

126233
\subsubsection{BDF Methods} \label{subsec:bdf}
127234
The backward differentiation formula methods are usually

0 commit comments

Comments
 (0)