Commit 7e43e7ed authored by Conor McCoid's avatar Conor McCoid
Browse files

Tetra: finished proof of sign of denom. for higher dim.

parent 12e87d02
...@@ -1086,32 +1086,82 @@ For each of these the numerator of $\vec{h}(J | \Gamma_j) \cdot \vec{e}_j$ is th ...@@ -1086,32 +1086,82 @@ For each of these the numerator of $\vec{h}(J | \Gamma_j) \cdot \vec{e}_j$ is th
By this corollary, if there is a change in sign of $\vec{h}(J | \Gamma) \cdot \vec{e}_\eta$ then the entire $m$--face of $X$ defined by the indices $J$ ends up on the other side of the $(n-m)$--face of $Y$ defined by $\Gamma \cup \set{\eta}$. By this corollary, if there is a change in sign of $\vec{h}(J | \Gamma) \cdot \vec{e}_\eta$ then the entire $m$--face of $X$ defined by the indices $J$ ends up on the other side of the $(n-m)$--face of $Y$ defined by $\Gamma \cup \set{\eta}$.
If the $J$--th $m$--face of $X$ does not have $m+1$ intersections then the signs of all existing intersections can be found using the intersections of the $(m-1)$--faces of $X$ with indices that are subsets of $J$. If the $J$--th $m$--face of $X$ does not have $m+1$ intersections then the signs of all existing intersections can be found using the intersections of the $(m-1)$--faces of $X$ with indices that are subsets of $J$.
For the sake of notation let
\begin{align*}
X_J = & \begin{bmatrix} \vec{x}_{i_0} & \dots & \vec{x}_{i_m} \end{bmatrix}, \quad &
I_\Gamma = & \begin{bmatrix} \vec{e}_{\gamma_1} & \dots & \vec{e}_{\gamma_m} \end{bmatrix}
\end{align*}
for $J = \set{i_j}_{j=0}^m$ and $\Gamma = \set{\gamma_j}_{j=1}^m$.
Then $$\vec{h}(J|\Gamma) \cdot \vec{e}_\eta = \frac{\begin{vmatrix} X_J^\top \vec{e}_\eta & X_J^\top I_\Gamma \end{vmatrix} }{ \begin{vmatrix} \vec{1} & X_J^\top I_\Gamma \end{vmatrix}}.$$
\begin{lemma} \begin{lemma}
Suppose (nb: some stuff), then Suppose $\sign(\vec{h}(J \setminus \set{i} | \Gamma) \cdot \vec{e}_\eta) \neq \sign(\vec{h}(J \setminus \set{j} | \Gamma) \cdot \vec{e}_\eta)$, then
\begin{equation*} \begin{equation*}
\sign \left ( \begin{vmatrix} 1 & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_m} \\ \vdots & \vdots & & \vdots \\ 1 & \vec{x}_{i_m} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_m} \cdot \vec{e}_{\gamma_m} \end{vmatrix} \right ) = \sign \left ( \begin{vmatrix} \vec{1} & X_J^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} \right ) =
\sign \left ( \begin{vmatrix} \vec{x}_{i_0} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_m} \\ \sign \left ( \begin{vmatrix} X_{J \setminus \set{i}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}
\vdots & & \vdots \\ \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix}
\vec{x}_{i_{m-1}} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_{m-1}} \cdot \vec{e}_{\gamma_m} \end{vmatrix} \right ). \begin{vmatrix} \vec{1} & X_{J \setminus \set{j}}^\top I_\Gamma \end{vmatrix} \right ).
\end{equation*} \end{equation*}
\end{lemma} \end{lemma}
\begin{proof} \begin{proof}
We proceed by strong induction. Without loss of generality, suppose $\vec{x}_i$ is the first column of $X_{J \setminus \set{j}}$ and likewise $\vec{x}_j$ is the first column of $X_{J \setminus \set{i}}$.
The base case has already been proven in (nb: refer back). Furthermore, to adhere to the notation already established the first column of $I_{\Gamma \cup \set{\eta}}$ must be $\vec{e}_\eta$.
Suppose the statement is true for $m=k$.
That is, By assumption $\vec{h}(J \setminus \set{i} | \Gamma) \cdot \vec{e}_\eta - \vec{h}(J \setminus \set{j} | \Gamma) \cdot \vec{e}_\eta$ has the same sign as $\vec{h}(J \setminus \set{i} | \Gamma) \cdot \vec{e}_\eta$.
We begin by simplifying this expression:
\begin{align*}
\vec{h}(J \setminus \set{i} | \Gamma)\cdot \vec{e}_\eta & - \vec{h}(J \setminus \set{j} | \Gamma) \cdot \vec{e}_\eta =
\frac{ \begin{vmatrix} X_{J \setminus \set{i}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}
}{ \begin{vmatrix} \vec{1} & X_{J \setminus \set{i}}^\top I_\Gamma \end{vmatrix}} -
\frac{ \begin{vmatrix} X_{J \setminus \set{j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}
}{ \begin{vmatrix} \vec{1} & X_{J \setminus \set{j}}^\top I_\Gamma \end{vmatrix}} \\
= & \frac{ \begin{vmatrix} \vec{1} & X_{J \setminus \set{j}}^\top I_\Gamma \end{vmatrix}
\begin{vmatrix} X_{J \setminus \set{i}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} -
\begin{vmatrix} \vec{1} & X_{J \setminus \set{i}}^\top I_\Gamma \end{vmatrix}
\begin{vmatrix} X_{J \setminus \set{j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} }{
\begin{vmatrix} \vec{1} & X_{J \setminus \set{i}}^\top I_\Gamma \end{vmatrix}
\begin{vmatrix} \vec{1} & X_{J \setminus \set{j}}^\top I_\Gamma \end{vmatrix} }.
\end{align*}
We expand the numerator of this expression:
\begin{align*} \begin{align*}
\sign \left ( \begin{vmatrix} 1 & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_k} \\ & \left ( \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix} + \sum\limits_{k=0}^{m-1} (-1)^k \vec{x}_i^\top \vec{e}_{\gamma_k} \begin{vmatrix} \vec{1} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \setminus \set{\gamma_k}} \end{vmatrix} \right ) \begin{vmatrix} X_{J \setminus \set{i}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} \\
\vdots & \vdots & & \vdots \\ & - \left ( \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix} + \sum\limits_{k=0}^{m-1} (-1)^k \vec{x}_j^\top \vec{e}_{\gamma_k} \begin{vmatrix} \vec{1} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \setminus \set{\gamma_k}} \end{vmatrix} \right ) \begin{vmatrix} X_{J \setminus \set{j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} \\
1 & \vec{x}_{i_k} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_k} \cdot \vec{e}_{\gamma_k} \end{vmatrix} \right )= & = & \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix}
\sign \left ( \begin{vmatrix} \vec{x}_{i_0} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_k} \\ \begin{vmatrix} 1 & \vec{x}_i^\top I_{\Gamma \cup \set{\eta}} \\ 1 & \vec{x}_j^\top I_{\Gamma \cup \set{\eta}} \\ \vec{0} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}
\vdots & & \vdots \\ + \begin{vmatrix} \vec{x}_i^\top I_\Gamma \vec{w} & \vec{x}_i^\top I_{\Gamma \cup \set{\eta}} \\ \vec{x}_j^\top I_\Gamma \vec{w} & \vec{x}_j^\top I_{\Gamma \cup \set{\eta}} \\ \vec{0} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} \\
\vec{x}_{i_{k-1}} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_{k-1}} \cdot \vec{e}_{\gamma_k} \end{vmatrix} \right ) \\ = & \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix}
= & \sign \left ( \vec{h}(\set{i_j}_{j=0}^{k-1}|\set{\gamma_j}_{j=1}^{k-1}) \cdot \vec{e}_{\gamma_k} \right ) \sign \left ( \begin{vmatrix} 1 & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_0} \cdot \vec{e}_{\gamma_{k-1}} \\ \begin{vmatrix} 1 & \vec{x}_i^\top I_{\Gamma \cup \set{\eta}} \\ 1 & \vec{x}_j^\top I_{\Gamma \cup \set{\eta}} \\ \vec{0} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}
\vdots & \vdots & & \vdots \\ + \begin{vmatrix} 0 & \vec{x}_i^\top I_{\Gamma \cup \set{\eta}} \\ 0 & \vec{x}_j^\top I_{\Gamma \cup \set{\eta}} \\ -X_{J \setminus \set{i,j}}^\top I_\Gamma \vec{w} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} \\
1 & \vec{x}_{i_{k-1}} \cdot \vec{e}_{\gamma_1} & \dots & \vec{x}_{i_{k-1}} \cdot \vec{e}_{\gamma_{k-1}} \end{vmatrix} \right ). = & \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix} \begin{vmatrix} \vec{1} & X_J^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}.
\end{align*} \end{align*}
where $w_k = (-1)^k \begin{vmatrix} \vec{1} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \setminus \set{\gamma_k}} \end{vmatrix}.$
To prove the last equality note that
each element of $X_{J \setminus \set{i,j}}^\top I_\Gamma \vec{w}$ is equal to the same value:
\begin{align*}
\left ( X_{J \setminus \set{i,j}}^\top I_\Gamma \vec{w} \right )_l = &
\sum_{k=0}^{m-1} (-1)^k \vec{x}_l^\top \vec{e}_{\gamma_k} \begin{vmatrix} \vec{1} & X_{J \setminus \set{i,j}}^\top I_{\Gamma \setminus \set{\gamma_k}} \end{vmatrix} \\
= & \begin{vmatrix} 0 & \vec{x}_l^\top I_\Gamma \\ \vec{1} & X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix} \\
= & \begin{vmatrix} -1 & \vec{0}^\top \\ \vec{1} & X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix} \\
= & - \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix}.
\end{align*}
The simplified expression is then
\begin{equation*}
\vec{h}(J \setminus \set{i} | \Gamma)\cdot \vec{e}_\eta - \vec{h}(J \setminus \set{j} | \Gamma) \cdot \vec{e}_\eta =
\frac{ \begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix}
\begin{vmatrix} \vec{1} & X_J^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} }{
\begin{vmatrix} \vec{1} & X_{J \setminus \set{i}}^\top I_\Gamma \end{vmatrix}
\begin{vmatrix} \vec{1} & X_{J \setminus \set{j}}^\top I_\Gamma \end{vmatrix}} .
\end{equation*}
Therefore, the sign of $\begin{vmatrix} \vec{1} & X_J^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}$ is
\begin{equation*}
\sign \left ( \begin{vmatrix} \vec{1} & X_J^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix} \right ) =
\sign \left ( \begin{vmatrix} X_{J \setminus \set{i}}^\top I_{\Gamma \cup \set{\eta}} \end{vmatrix}
\begin{vmatrix} X_{J \setminus \set{i,j}}^\top I_\Gamma \end{vmatrix}
\begin{vmatrix} \vec{1} & X_{J \setminus \set{j}}^\top I_\Gamma \end{vmatrix} \right ).
\end{equation*}
Note that since both $J \setminus \set{i}$ and $J \setminus \set{j}$ are indices of intersections their common parent $J \setminus \set{i,j}$ also indexes an intersection.
Thus all elements of the right hand side are calculated in the previous step of the algorithm.
\end{proof} \end{proof}
\subsection{Algorithm for the intersection of n-dimensional simplices} \subsection{Algorithm for the intersection of n-dimensional simplices}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment