Commit 43f18431 authored by conmccoid's avatar conmccoid
Browse files

updates to notes for PSIM and stochastic networks

parent 411ea6ad
\documentclass{article}
\usepackage{/home/mccoid/LaTeX/preamble}
\usepackage{preamble}
\usepackage{float}
\floatstyle{boxed}
\newfloat{algorithm}{t}{}
\floatname{algorithm}{Algorithm}
\title{Collocation matrices representing inverse operators - notes}
\author{Conor McCoid}
......@@ -57,14 +62,14 @@ These matrices will be denoted by:
For the Wronskians of $E \setminus E_i$ we present the following lemma:
\begin{lemma}[Wronskians of exponential functions]
\begin{lem}[Wronskians of exponential functions]
Let $\set{\lambda_k}_{k=1}^m \in \mathbb{R}$ and $\Lambda$ the matrix defined earlier for such a set, then
\begin{equation*}
\W{\set{e^{\lambda_k x}}_{k=1}^m}{x} = \abs{\Lambda}
e^{x \sum_{k=1}^m \lambda_k} .
\end{equation*}
\label{lem:exp}
\end{lemma}
\end{lem}
\begin{proof}
The case of any two $\lambda_k$ being equal is trivially true as both sides are necessarily zero.
......@@ -113,14 +118,14 @@ The special case is equivalent to $M=1$ and $\lambda_1 = 0$.
As such, the set $E = \set{\Poly{j}}_{j=0}^{m-1}$.
The following lemma presents the Wronskians for such polynomials:
\begin{lemma}[Wronskians of polynomials]
\begin{lem}[Wronskians of polynomials]
\begin{align*}
(i) && \Wpoly{k=0}{m} & = 1 \\
(ii) && \Wpoly{k=0,k \neq j}{m} & = \Wpoly{k=1}{m-j} \\
(iii) && \Wpoly{k=1}{m} & = \Poly{m}
\end{align*}
\label{lem:poly}
\end{lemma}
\end{lem}
\begin{proof}
\begin{description}
......@@ -218,12 +223,12 @@ The case for one root with multiplicity $m$, represented by $M=1$, is a generali
The set $E = E_1 = \set{\Poly{k} e^{\lambda_1 x}}_{k=1}^m$ which is the set $E$ from the previous case multiplied by $e^{\lambda_1 x}$.
The following lemma then makes the generalization simple.
\begin{lemma}
\begin{lem}
\begin{equation*}
W(\{ f_k g \}_{k=1}^m ; x) = g^m W(\{ f_k \}_{k=1}^m ; x )
\end{equation*}
\label{lem:group}
\end{lemma}
\end{lem}
\begin{proof}
It is trivially true for $m=1$.
......@@ -436,4 +441,78 @@ One may write out equation (\ref{eq:Wronskian system}) with this in mind:
where $\tilde{I}_k $ indicates the selection of those rows corresponding to $\lambda_k$.
Note that all $F_k^{-1}(x)$ are principal submatrices of $F_{k^*}^{-1}$, where $m_{k^*} \geq m_k$ for all $k=1,...,M$.
\section{Other attempts}
\newcommand{\Lcal}{\mathcal{L}}
\newcommand{\ddx}{\frac{d}{dx}}
Let $\hat{\Lcal}$ be the operator for the set $\set{\Poly{k} e^{\lambda_j x}}_{j \neq q}$ and $\tilde{\Lcal}$ the one for $\set{\Poly{k} e^{\lambda_q x}}_{k \neq n}$, then
\begin{align*}
\Lcal & = \hat{\Lcal} \tilde{\Lcal} \\
& = \left [\left ( \ddx \right )^{m-l} +\sum_{j \neq q} \lambda_j \left ( \ddx \right )^{m-l-1} + \dots + \prod_{j \neq q} \lambda_j \right ]
\left [ \left ( \ddx \right )^l + r(x) \left ( \ddx \right )^{l-1} + \dots + s(x) \right ] \\
& = \left ( \ddx \right )^m + \left ( \sum_{j \neq q} \lambda_j + r(x) \right ) \left ( \ddx \right )^{m-1} + \dots
\end{align*}
but then $\Lcal$ is the operator for
\begin{itemize}
\item $\set{\Poly{k} e^{\lambda_j x} } \cup \set{\hat{\Lcal} \left ( \Poly{k} e^{\lambda_q x} \right )}_{k \neq n}$ or
\item $\set{\tilde{\Lcal} \left ( \Poly{k} e^{\lambda_j x} \right )}_{k,j \neq q} \cup \set{\Poly{k} e^{\lambda_q x}}_{k \neq n}$.
\end{itemize}
\begin{align*}
E & = \set{P_k(x)}_{k=1}^m \\
& = \set{\Poly{j} e^{\lambda_i x}}_{j=0, i=1}^{m_i-1,M} \\
& = \cup_{i=1}^M \set{\Poly{j} e^{\lambda_i x}}_{j=0}^{m_i-1} \\
& = \cup_{i=1}^M E_i
\end{align*}
\begin{itemize}
\item $E \to \Lcal$ such that $\forall f \in E$ $\Lcal f = 0$.
\item $\cup_{i \neq k} E_i \to \tilde{\Lcal}$ in the same way: $\tilde{\Lcal} f = f^{(m-m_k)}(x) + \sum_{i \neq k} \lambda_i^{m_i} f^{(m - m_k - 1)}(x) + \dots$.
\item $E_k \setminus \set{\Poly{k} e^{\lambda_i x} } \to \hat{\Lcal}$, $\hat{\Lcal}f = f^{(m_k-1)}(x) + r(x) f^{(m_k-2)}(x) + \dots$.
\item $E \setminus \set{\Poly{k} e^{\lambda_i x} } \to \bar{\Lcal}$, $\bar{\Lcal} f = f^{(m-1)}(x) + \left ( r(x) + \sum_{i \neq k} \lambda_i^{m_i} \right ) f^{(m-2)}(x) + \dots$.
\end{itemize}
Need $W(\hat{\Lcal} \left ( E_k \setminus \set{\Poly{k} e^{\lambda_i x}} ; x\right)$.
\begin{align*}
\hat{\Lcal} \left ( E_k \setminus \set{\Poly{n} e^{\lambda_k x}} \right ) & = \prod_{i \neq k} \left ( \ddx - \lambda_i \right )^{m_i} \Poly{j} e^{\lambda_k x}, \quad j \neq n \\
& \neq \prod_{i \neq k} \left ( \ddx - \lambda_i \right )^{m_i - 1} \left ( \poly{j-1}+ \lambda_k \Poly{j} - \lambda_i \poly{j} \right ) e^{\lambda_k x}
\end{align*}
\begin{align*}
\tilde{\lambda}_l = \begin{cases} \lambda_1 & 1 \leq l \leq m_1 \\ \lambda_i & \sum_{j < i, j \neq k} m_j < l \leq \sum_{j \leq i,j \neq k} m_j \end{cases}
\end{align*}
\begin{align*}
\prod_{l=1}^{m-m_k} & \left ( \ddx - \tilde{\lambda}_l \right ) \Poly{j} e^{\lambda_k x} = \\
\prod_{l=2}^{m-m_k} & \left ( \ddx - \tilde{\lambda}_l \right ) \left ( \poly{j-1} + (\lambda_k - \tilde{\lambda}_l ) \Poly{j} \right ) e^{\lambda_k x} = \\
\prod_{l=3}^{m-m_k} & \left ( \ddx - \tilde{\lambda}_l \right ) \left ( \poly{j-2} + (2 \lambda_k - \tilde{\lambda}_1 - \tilde{\lambda}_2 ) \poly{j-1} + (\lambda_k^2 - \lambda_k \tilde{\lambda}_1 - \tilde{\lambda}_2 \lambda_k + \tilde{\lambda}_1 \tilde{\lambda}_2 ) \Poly{j} \right ) e^{\lambda_k x}
\end{align*}
\begin{align*}
\Lcal P_k(x) & = P_k^{(m)}(x) - \sum_{i=1}^{M} m_i \lambda_i P_k^{(m-1)}(x) + \dots = 0 \\
\hat{\Lcal} P_k(x) & = P_k^{(m-1)}(x) + r(x) P_k^{(m-2)}(x) + \dots = 0 & \forall k \neq j \\
\tilde{\Lcal} P_j(x) & = P_j'(x) + q(x) P_j(x) = 0 \\
\bar{\Lcal} P_k(x) & = P_k^{(m-1)}(x) + p(x) P_k^{(m-2)}(x) + \dots & \text{such that } \Lcal P_k(x) = \bar{\Lcal} \tilde{\Lcal} P_k(x) \\
\breve{\Lcal} P_k(x) & = P_k'(x) + s(x) P_k(x) & \text{such that } \Lcal P_k(x) = \breve{\Lcal} \hat{\Lcal} P_k(x) \\
\implies -\sum_{i=1}^M m_i \lambda_i & = q(x) + p(x) = r(x) + s(x)
\end{align*}
$\Omega$ (in this document, pretty sure $\Lambda$ in the current version) has $\sum_{k=1}^M \left [ \frac{m_k (m_k+1)}{2} + (m-m_k)^2 \right ]$ elements.
To make $\Omega$ make each piece separately.
Separate into coefficients and roots.
\begin{algorithm}
$\lambda_k$, $m_k$
$\Omega_{\lambda_k} = \text{spdiags} \left ( 0:-1:1-m, \lambda_k^{(0:m-1)}, m, m_k \right )$;
$P_T$ (Pascal's triangle in lower triangular form, size $m \times m$)
$\Omega_k = P_T(:,1:m_k) \otimes \Omega_{\lambda_k}$; ($\otimes$ is the Hadamard matrix product)
$\Omega = \begin{bmatrix} \Omega_1 & \Omega_2 & \dots & \Omega_M \end{bmatrix}$;
\caption{Algorithm to construct $\Omega$}
\end{algorithm}
\end{document}
......@@ -2,7 +2,8 @@
\usepackage{preamble}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usetikzlibrary{positioning,calc}
\usepackage{pgfplots}
\begin{document}
......@@ -53,42 +54,24 @@ Define $\dagger \bbp = \bbp \cup \bbr_- \cup \set{-\infty}$.
$(\dagger \bbp, \lor)$ is a commutative group.
\end{lemma}
\subsection{The Probabilistic Semiring}
\subsection{The Probabilistic Concentric Ring??}
In addition to the OR operation, one can equip $\bbp$ with the AND operation.
This operation is equivalent to standard multiplication and shall be referred to as such.
\begin{lemma}
$(\bbp, \lor, \cdot)$ is a commutative semiring.
\end{lemma}
\subsection{The Probabilistic Field}
Unfortunately, the OR and AND operations do not distribute, meaning they do not interact in any meaningful way.
Essentially, one can set up two groups that overlap in the region $\bbp$ and each operation considers consequences only on their respective group.
We can call $(\bbp,\lor,\cdot)$ a concentric ring, though such an object has little use.
The probadd inverses have already been included in $\dagger \bbp$.
The multiplicative inverses that remain lie in $(1,\infty)$.
Taken together, this forms the extended real numbers, $\bbr^*$.
\begin{lemma}
$(\bbr^*, \lor, \cdot)$ is a field.
\end{lemma}
\begin{proof}
Not true, since the operations do not allow for distributivity.
\end{proof}
\subsection{The Probabilistic Inner Product Space}
Since $(\bbr^*, \lor, \cdot)$ is a field, the space $({\bbr^*}^n, \bbr^*, \lor, \cdot)$ is a vector space. -not true, see proof of prev. lemma
Define the inner product over this vector space as
Define the inner product over this space as
\begin{equation}
\langle \vec{p}, \vec{q} \rangle = p_1 q_1 \lor p_2 q_2 \lor \dots \lor p_n q_n .
\end{equation}
\begin{lemma}
$({\bbr^*}^n, \bbr^*, \lor, \cdot, \langle \cdot, \cdot \rangle)$ is an inner product space.
\end{lemma}
\subsection{Probabilistic Matrices}
To represent the linear transformations of probabilistic vectors, one can construct matrices containing elements of $\bbr^*$.
......@@ -98,6 +81,20 @@ This is made trivial by using the probabilistic inner product:
A \vec{p} = \begin{bmatrix} \vec{a}_1^\top \\ \vdots \\ \vec{a}_n^\top \end{bmatrix} \vec{p} = \begin{bmatrix} \langle \vec{a}_1, \vec{p} \rangle \\ \vdots \\ \langle \vec{a}_n, \vec{p} \rangle \end{bmatrix} .
\end{equation*}
\begin{figure}
\centering
\begin{tikzpicture}
\begin{axis}[samples=500, domain=-4:4, restrict y to domain=-4:4,
xlabel={$p$},
ylabel={$\dagger p = \frac{-p}{1-p}$}]
\addplot[thick] plot (\x, {-\x/(1-\x)});
\addplot[dotted] plot (\x, 1);
\addplot[dotted] plot (1, \x);
\end{axis}
\end{tikzpicture}
\caption{Visualization of the inverses $\dagger p$.}
\end{figure}
\section{Stochastic Networks}
We define a stochastic network as a graph with vertices taking on a discrete set of $k$ values.
......@@ -383,6 +380,23 @@ roundnode/.style={circle, fill=white, draw=black, very thick}]
The paths follow the relationship $\mathcal{D} \supset \mathcal{C} \subset \mathcal{A} \subset \mathcal{B}$.}
\end{table}
\subsection{Notes}
If two paths cross and one arrives at the intersection before the other than the probability of infection is:
\begin{equation*}
p_1(t) + p_2(t) (1 - p_1(t)) = p_1(t) \lor p_2(t).
\end{equation*}
If two paths collide at the same time then the probability of infection is either
$$1 - (1 - p_1(t))(1-p_2(t)) = p_1(t) \lor p_2(t)$$
or
$$p_1(t) (1-p_2(t)) + p_2(t) (1-p_1(t)) = \left ( p_1(t) \lor p_2(t) \right ) - \left (p_1(t) \land p_2(t) \right ).$$
I think the first option makes more sense.
The total probability for a node is then the probsum of all paths that run through it.
Keywords: Bayesian networks, MCMC
\appendix
\section{Notes}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment