Skip to content

Commit

Permalink
Fix more typos
Browse files Browse the repository at this point in the history
  • Loading branch information
Zentrik committed Jun 4, 2024
1 parent 40b6295 commit f3cba13
Show file tree
Hide file tree
Showing 11 changed files with 50 additions and 50 deletions.
Binary file modified Applied Probability/AppliedProbSousi.pdf
Binary file not shown.
20 changes: 10 additions & 10 deletions CodingAndCryptography/02_noisy_channels.tex
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ \subsection{Decoding rules}
\item If $p < \frac{1}{2}$, then the maximum likelihood and minimum distance decoding rules agree.
\end{enumerate}
\end{lemma}
Note that the hypothesis in part (i) is reasonable if we first encode a message using noiseless coding.
The hypothesis in part (ii) is reasonable, since a channel with $p = \frac{1}{2}$ can carry no information, and a channel with $p > \frac{1}{2}$ can be used as a channel with probability $1 - p$ by inverting its outputs.
Note that the hypothesis in part (1) is reasonable if we first encode a message using noiseless coding.
The hypothesis in part (2) is reasonable, since a channel with $p = \frac{1}{2}$ can carry no information, and a channel with $p > \frac{1}{2}$ can be used as a channel with probability $1 - p$ by inverting its outputs.
Channels with $p = 0$ are called \vocab{lossless channels}, and channels with $p = \frac{1}{2}$ are called \vocab{useless channels}.
\begin{proof}
\emph{Part (1).}
Expand Down Expand Up @@ -136,7 +136,7 @@ \subsection{Error detection and correction}
We could write $d(x,y)$ as $\sum d_1(x_i,y_i)$ where $d_1$ is the discrete metric on $\mathbb F_2$.
\end{remark}

\subsection{Minimum distance}
\subsection{Minimum Distance}
\begin{definition}[Minimum Distance]
The \vocab{minimum distance} of a code is the minimum value of $d(c_1, c_2)$ for codewords $c_1 \neq c_2$.
\end{definition}
Expand Down Expand Up @@ -186,14 +186,14 @@ \subsection{Minimum distance}
The minimum distance can easily be shown to be exactly 3 as $0000000, 1110000$ are codewords, so it is a $[7,16,3]$-code.
\end{example}

\subsection{Covering estimates}
\subsection{Covering Estimates}
\begin{definition}[Closed Haming Ball]
Let $x \in \mathbb F_2^n$ and $r \geq 0$.
Then, we denote the \vocab{closed Hamming ball} with centre $x$ and radius $r$ by $B(x,r)$.
We write $V(n,r) = \abs{B(x,r)} = \sum_{i=0}^r \binom{n}{i}$ for the \vocab{volume} of this ball.
\end{definition}

\begin{lemma}[Hamming's bound; sphere packing bound]
\begin{lemma}[Hamming's Bound; Sphere Packing Bound]
An $e$-error correcting code $C$ of length $n$ has
\begin{align*}
\abs{C} \leq \frac{2^n}{V(n,e)}
Expand All @@ -215,7 +215,7 @@ \subsection{Covering estimates}

\begin{remark}
Equivalently, a code is perfect if for all $x \in \mathbb F_2^n$, $\exists! \; c \in C$ s.t. $d(x,c) \leq e$.
Alternatively, $\mathbb F_2^n$ is a union of disjoint balls $B(c,e)$ for all $c \in C$, or that any collection of $e + 1$ will cause the message to be decoded incorrectly.
Alternatively, $\mathbb F_2^n$ is a union of disjoint balls $B(c,e)$ for all $c \in C$, or that any $e + 1$ errors will cause the message to be decoded incorrectly.
\end{remark}

\begin{example}
Expand Down Expand Up @@ -297,7 +297,7 @@ \subsection{Covering estimates}

\subsection{Asymptotics}
We study the information rate $\frac{\log A(n,\floor*{n\delta})}{n}$ as $n \to \infty$ to see how large the information rate can be for a fixed error rate.
\begin{proposition}
\begin{proposition} \label{prp:aym}
Let $0 < \delta < \frac{1}{2}$.
Then,
\begin{enumerate}
Expand All @@ -308,7 +308,7 @@ \subsection{Asymptotics}
\end{proposition}

\begin{proof}
\emph{(i) implies (ii).}
\emph{(1) implies (2).}
By the GSV bound, we find
\begin{align*}
A(n,\floor*{n\delta}) \geq \frac{2^n}{V(n,\floor*{n\delta} - 1)} \geq \frac{2^n}{V(n,\floor*{n\delta})}
Expand All @@ -317,7 +317,7 @@ \subsection{Asymptotics}
\begin{align*}
\frac{1}{n}\log A(n,\floor*{n\delta}) \geq 1 - \frac{\log V(n,\floor*{n\delta})}{n} \geq 1 - H(\delta)
\end{align*}
\emph{Part (i).}
\emph{Part (1).}
$H(\delta)$ is increasing for $\delta < \frac{1}{2}$.
Therefore, wlog, we may assume $n\delta$ is an integer.
Now, as $\frac{\delta}{1-\delta} < 1$,
Expand Down Expand Up @@ -352,7 +352,7 @@ \subsection{Asymptotics}
\end{align*}
Recall Stirling's formula: $\ln n! = n \ln n - n + O(\log n)$.
\begin{align*}
\ln \binom{n}{r} &= (n \ln n - n) - (r \ln r - r) \\ &- \qty((n - r) \log(n - r) - (n - r)) + O(\log n) \\
\ln \binom{n}{r} &= (n \ln n - n) - (r \ln r - r) \\ &- \qty((n - r) \ln(n - r) - (n - r)) + O(\log n) \\
\log \binom{n}{r} &= -r \log \frac{r}{n} - (n - r) \log \frac{n - r}{n} + O(\log n) \\
&= n H\qty(\frac{r}{n}) + O(\log n).
\intertext{By $(\ast)$}
Expand Down
9 changes: 5 additions & 4 deletions CodingAndCryptography/03_information_theory.tex
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ \subsection{Asymptotic equipartition property}
We define
\begin{align*}
T_n &= \qty{(x_1, \dots, x_n) \midd \abs{-\frac{1}{n} \log p(x_1, \dots, x_n) - H} \leq \varepsilon} \\
&= \qty{(x_1, \dots, x_n) \mid \text{condition (ii) holds}}
&= \qty{(x_1, \dots, x_n) \mid \text{condition (2) holds}}
\end{align*}
For the converse,
\begin{align*}
Expand Down Expand Up @@ -237,11 +237,11 @@ \subsection{Capacity}

\begin{proof}
Let $\delta$ be s.t. $2p < \delta < \frac{1}{2}$.
We claim that we can reliably transmit at rate $R = 1 - H(\delta)\footnote{$-\delta \log \delta$} > 0$.
We claim that we can reliably transmit at rate $R = 1 - H(\delta) > 0$.
Let $C_n$ be a code of length $n$, and suppose it has minimum distance $\floor*{n\delta}$ and it's of maximal size.
Then, by the GSV bound,
Then, by \cref{prp:aym},
\begin{align*}
\abs{C_n} = A(n, \floor*{n\delta}) \geq \frac{2^n}{V(n, \floor*{n\delta} - 1)} \geq \frac{2^n}{2^{-n\delta \log \delta\footnote{$-\log \delta > 1$.}}} = 2^{n(1 - H(\delta))} = 2^{nR}
\abs{C_n} = A(n, \floor*{n\delta}) \geq 2^n(1-H(\delta)) = 2^{nR}
\end{align*}
Replacing $C_n$ with a subcode if necessary, we can assume $\abs{C_n} = \floor*{2^{nR}}$, with minimum distance at least $\floor*{n\delta}$.
Using minimum distance decoding,
Expand Down Expand Up @@ -451,6 +451,7 @@ \subsection{Shannon's second coding theorem}
Therefore,
\begin{align*}
I(X_1, \dots, X_n; Y_1, \dots, Y_n) &= H(Y_1, \dots, Y_n) - H(Y_1, \dots, Y_n \mid X_1, \dots, X_n) \\
&= H(Y_1, \dots, Y_n) - \sum_{i=1}^n H(Y_i \mid X_i) \\
&\leq \sum_{i=1}^n H(Y_i) - \sum_{i=1}^n H(Y_i \mid X_i) \\
&= \sum_{i=1}^n \qty[H(Y_i) - H(Y_i \mid X_i)] \\
&= \sum_{i=1}^n I(X_i;Y_i) \leq nC
Expand Down
38 changes: 19 additions & 19 deletions CodingAndCryptography/04_algebraic_coding_theory.tex
Original file line number Diff line number Diff line change
Expand Up @@ -284,16 +284,16 @@ \subsection{Reed--Muller codes}
\end{theorem}

\begin{proof}
\emph{Part (i).}
\emph{Part (1).}
There are $\sum_{s=0}^d \binom{d}{s} = 2^d = n$ vectors listed, so it suffices to show they are a spanning set, or equivalently $RM(d,d)$ is the trivial code.
Let $p \in X$, and let $y_i$ be $v_i$ if $p_i = 0$ and $v_0 + v_i$ if $p_i = 1$.
Then $1_{\qty{p}} = y_1 \wedge \dots \wedge y_d$.
Expanding this using the distributive law, $1_{\qty{p}} \in RM(d,d)$.
But the set of $1_{\qty{p}}$ for $p \in X$ spans $\mathbb F_2^n$, as required.

\emph{Part (ii).}
\emph{Part (2).}
$RM(d,r)$ is spanned by $v_{i_1} \wedge \dots \wedge v_{i_s}$ where $i_1 < \dots < i_s$ and $0 \leq s \leq r$.
Since these are linearly independent by (i), so a basis.
Since these are linearly independent by (1), so a basis.
Hence the rank of $RM(d,r)$ is the number of such vectors, which is $\sum_{s=0}^r \binom{d}{s}$.
\end{proof}

Expand All @@ -314,13 +314,13 @@ \subsection{New codes from old (again)}
\end{lemma}

\begin{proof}
\emph{Part (i).}
\emph{Part (1).}
If $C_1$ has basis $x_1, \dots, x_k$ and $C_2$ has basis $y_1, \dots, y_\ell$, then $C_1 \mid C_2$ has basis
\begin{align*}
\qty{(x_i \mid x_i) : 1 \leq i \leq k} \cup \qty{(0 \mid y_i) : 1 \leq i \leq \ell}
\end{align*}

\emph{Part (ii).}
\emph{Part (2).}
Let $0 \neq (x \mid x + y)\footnote{$x \in C_1, y \in C_2$ not both $0$.} \in C_1 \mid C_2$.
If $y \neq 0$, then $w(x \mid x + y) = w(x) + w(x + y) \geq w(y) \geq d(C_2)$.
If $y = 0$, then $w(x \mid x + y) = w(x \mid x) = 2w(x) \geq 2d(C_1)$.
Expand All @@ -338,25 +338,25 @@ \subsection{New codes from old (again)}
\end{theorem}

\begin{proof}
\emph{Part (i).}
\emph{Part (1).}
$RM(d-1, r-1) \subseteq RM(d-1, r)$, so bar product defined.
Order the elements of $X = \mathbb{F}_2^d$ s.t. $v_d = (\underbracket{0, \dots, 0}_{2^{d-1}} \mid \underbracket{1, \dots, 1}_{2^{d-1}})$ and $v_i = (v_i' \mid v_i')$ ($1 \leq i \leq d - 1$).
If $z \in RM(d, r)$ then $z$ is sum of wedge products of $v_1, \dots, v_d$.
Write $z = x + (y \wedge v_d)$ for $x, y$ sums of wedge products of $v_1, \dots, v_{d-1}$.
Then $x = (x' \mid x')$\footnote{Note $x'$ is the vector containing the first $2^{d-1}$ components of $x$. Similarly for $y$.}, some $x' \in RM(d-1, r)$ and $y = (y' \mid y')$, some $y' \in RM(d-1, r-1)$.
Then $z = x + (y \wedge v_d) = $
Then
\begin{align*}
z = x + (y \wedge v_d) &= (x' \mid x') + (y' \mid y') \wedge (0, \dots, 0 \mid 1, \dots, 1) \\
&= (x' \mid x' + y') \in RM(d-1, r) \mid RM(d-1, r-1).
\end{align*}

\emph{Part (ii).}
\emph{Part (2).}
If $r = 0$, then $RM(d,r)$ is the repetition code of length $2^d$, which has min distance $2^d$. \\
If $r = d$, $RM(d,r)$ is the trivial code of length $2^d$, which has min distance $1 = 2^{d-d}$. \\
We prove the remaining cases by induction on $d$.
From part (i), $RM(d,r) = RM(d-1,r) \mid RM(d-1,r-1)$.
From part (1), $RM(d,r) = RM(d-1,r) \mid RM(d-1,r-1)$.
By induction, the min distance of $RM(d-1,r)$ is $2^{d-1-r}$ and the min distance of $RM(d-1,r-1)$ is $2^{d-r}$.
By part (ii) of \cref{lem:12.6}, the min distance of $RM(d,r)$ is $\min\qty{2\cdot 2^{d-1-r}, 2^{d-r}} = 2^{d-r}$.
By part (2) of \cref{lem:12.6}, the min distance of $RM(d,r)$ is $\min\qty{2\cdot 2^{d-1-r}, 2^{d-r}} = 2^{d-r}$.
\end{proof}

\begin{remark}
Expand Down Expand Up @@ -435,7 +435,7 @@ \subsection{Cyclic Codes}
Note (3)' is the special case $f(X) = X$ of (3).
In general, $f(X) = \sum a_i X^i$ so
\begin{align*}
f(X) g(X) &= \sum_i a_i \underbracket{X^i g(X)}_{\in \mathcal{C} \text{ by (iii)}} \in \mathcal{C} \text{ by (2)}
f(X) g(X) &= \sum_i a_i \underbracket{X^i g(X)}_{\in \mathcal{C} \text{ by (3)}} \in \mathcal{C} \text{ by (2)}
\end{align*}
\end{proof}

Expand Down Expand Up @@ -465,8 +465,8 @@ \subsection{Cyclic Codes}
Then, $r = p - qg \in C$ as $C$ is an ideal.
But $\deg r < \deg g$, so $r = 0$.
Hence, $g \mid p$.
This shows $C \subseteq (g)$ in (i). \\
For part (ii), let $p(X) = X^n - 1$, giving $g \mid X^n - 1$.
This shows $C \subseteq (g)$ in (1). \\
For part (2), let $p(X) = X^n - 1$, giving $g \mid X^n - 1$.

Now we show uniqueness.
Suppose $C = (g_1) = (g_2)$.
Expand Down Expand Up @@ -682,17 +682,17 @@ \subsubsection{Decoding BCH Codes}
By definition of $C$, we have $c(\alpha^j) = 0$ for all $1 \leq j \leq \delta - 1$.
Hence $c(\alpha^j) = 0$ for $1 \leq j \leq 2t$.
As $r = c + e$, $r(\alpha^j) = e(\alpha^j)$ for all $1 \leq j \leq 2t$, hence $\sigma(X) \sum_{j=1}^{2t} r(\alpha^j) X^j = \omega(X)$ mod $X^{2t+1}$.
This verifies (i) and (ii) for this choice of $\omega$.
This verifies (1) and (2) for this choice of $\omega$.
$\omega(X) = -X\sigma'(X)$ so $\deg \omega = \deg \sigma = \abs{\mathcal E} \leq t$.

For uniqueness, suppose there exist $\widetilde \sigma, \widetilde \omega \in K[X]$ with the properties (i), (ii).
For uniqueness, suppose there exist $\widetilde \sigma, \widetilde \omega \in K[X]$ with the properties (1), (2).
WLOG, we can assume $\deg \widetilde \sigma \leq \deg \sigma$.
$\sigma(X)$ has distinct nonzero roots, so $\omega(X) = -X\sigma'(X)$ is nonzero at these roots.
Hence $\sigma, \omega$ are coprime. \\
By (ii), $\widetilde \sigma(X) \omega(X) = \sigma(X) \widetilde \omega(X)$ mod $X^{2t+1}$.
By (2), $\widetilde \sigma(X) \omega(X) = \sigma(X) \widetilde \omega(X)$ mod $X^{2t+1}$.
But the degrees of $\sigma, \widetilde \sigma, \omega, \widetilde \omega$ are at most $t$, so this congruence is an equality.
But $\sigma(X)$ and $\omega(X)$ are coprime, so $\sigma \mid \widetilde \sigma$, but $\deg \widetilde \sigma \leq \deg \sigma$ by assumption, so $\widetilde \sigma = \lambda \sigma$ for some $\lambda \in K$.
By (i), $\sigma(0) = \widetilde\sigma(0)$ hence $\lambda = 1$, giving $\widetilde \sigma = \sigma$.
By (1), $\sigma(0) = \widetilde\sigma(0)$ hence $\lambda = 1$, giving $\widetilde \sigma = \sigma$.
\end{proof}

\underline{Decoding Algorithm} \\
Expand Down Expand Up @@ -786,7 +786,7 @@ \subsection{Shift registers}
\underline{Over $\mathbb{C}$}: The general solution to a recurrence relation is a linear combination of $\alpha^n, n \alpha^n, \dots, n^{t-1} \alpha^n$ for $\alpha$ a root of $P(X)$ with multiplicity $t$.

\underline{Over $\mathbb{F}_2$}: However, we have a recurrence relation over $\mathbb{F}_2$ and $n^2 \equiv n \mod 2$ so we only get two solutions, which is not enough.
We resolve this by replacing $n^j \alpha^n$ by $\binom{n}{j} \alpha^n$, i.e. the general solution if a linear combination of $\binom{n}{0} \alpha^n, \binom{n}{1} \alpha^n, \dots$
We resolve this by replacing $n^j \alpha^n$ by $\binom{n}{j} \alpha^n$, i.e. the general solution is a linear combination of $\binom{n}{0} \alpha^n, \binom{n}{1} \alpha^n, \dots$

\begin{definition}[Feedback Polynomial]
The \vocab{feedback polynomial} is $\check{P}(X) = a_0 X^d + \dots + a_{d-1} X + 1 = \sum_{i=0}^d a_{d-i} X^i$.
Expand Down Expand Up @@ -817,7 +817,7 @@ \subsection{Shift registers}

\subsection{The Berlekamp--Massey method}
Let $(x_n)_{n \geq 0}$ be the output of a (binary) LFSR.
We wish to find the unknown length $d$ and values $a_0 (=1), a_1, \dots, a_{d-1}, a_d(=1)$ s.t. $a_0 x_n + \sum_{i=1}^d a_{d-i} x_{n-i} = 0$ for all $n \geq d$.
We wish to find the unknown length $d$ and values $a_0 (=1), a_1, \dots, a_{d-1}, a_d(=1)$ s.t. $a_d x_n + \sum_{i=1}^d a_{d-i} x_{n-i} = 0$ for all $n \geq d$.
We have
\begin{align*}
\underbrace{\begin{pmatrix}
Expand Down
27 changes: 13 additions & 14 deletions CodingAndCryptography/05_cryptography.tex
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ \subsection{Breaking cryptosystems}
We model the keys and messages as independent r.v.s $K, M$ taking values in $\mathcal K, \mathcal M$.
The ciphertext r.v. is $C = e(M, K) \in \mathcal C$.

\begin{definition}[Perfect Accuracy]
\begin{definition}[Perfect Secrecy]
A cryptosystem $(\mathcal M, \mathcal K, \mathcal C)$ has \vocab{perfect secrecy} if $H(M \mid C) = H(M)$, or equivalently, $M$ and $C$ are independent, or $I(M;C) = 0$.
\end{definition}

Expand Down Expand Up @@ -301,12 +301,12 @@ \subsection{Rabin cryptosystem}
\begin{proof}
\emph{Part (1).}
If there is a solution $x$, $-x$ also works.
If $x, y$ are solutions, then $x^2 \equiv y^2$ mod $p$ so $p \mid (x^2 - y^2) = (x-y)(x+y)$, so either $p \mid x-y$ or $p \mid x+y$, so $x = \pm y$.
If $x, y$ are solutions, then $x^2 \equiv y^2$ mod $p$ so $p \mid (x^2 - y^2) = (x-y)(x+y)$, so either $p \mid x-y$ or $p \mid x+y$, so $x = \pm y \mod p$.

\emph{Part (2).}
If $x_0$ is a solution, then by the Chinese remainder theorem, there exist solutions $x$ with $x \equiv \pm x_0$ mod $p$ and $x \equiv \pm x_0$ mod $q$.
This gives four solutions as required.
By (i), these are the only possible solutions.
By (1), these are the only possible solutions.
\end{proof}

Hence, to decrypt the Rabin cipher, we must find all four solutions to $x^2 \equiv c$ mod $N$.
Expand Down Expand Up @@ -520,20 +520,19 @@ \subsection{Elgamal Signature Scheme}
Let $g$ be a primitive root mod $p$.
The public key is $p, g, y = g^u \text{ mod } p$.
The private key is $u$.
Let $h \colon \mathcal M \to \qty{1, \dots, p-1}$ be a collision-resistant hash function.

To send a message $m$ with $0 \leq m \leq p-1$, Alice randomly chooses $k$ with $1 \leq k \leq p-2$ coprime to $p-1$.
She computes $r, s$ with $1 \leq r \leq p-1$ and $1 \leq s \leq p-2$ satisfying
\begin{align*}
r \equiv g^k \mod p;\quad h(m) \equiv ur + ks \mod (p-1)
r \equiv g^k \mod p;\quad m \equiv ur + ks \mod (p-1)
\end{align*}
Since $k$ is coprime to $p-1$, the congruence for $s$ always has a solution.
Alice signs the message $m$ with the signature $(r, s)$.
Now,
\begin{align*}
g^{h(m)} \equiv g^{ur + ks} \equiv (g^u)^r (g^k)^s \equiv y^r r^s \mod p
g^{m} \equiv g^{ur + ks} \equiv (g^u)^r (g^k)^s \equiv y^r r^s \mod p
\end{align*}
Bob accepts a signature if $g^{h(m)} \equiv y^r r^s$ mod $p$.
Bob accepts a signature if $g^{m} \equiv y^r r^s$ mod $p$.
To forge a signature, obvious attacks involve the discrete logarithm problem, finding $u$ from $y = g^u$.

\begin{lemma}
Expand All @@ -552,19 +551,19 @@ \subsection{Elgamal Signature Scheme}
Suppose she sends $m_1, m_2$ using the same value of $k$.
Denote the signatures $(r, s_1)$ and $(r, s_2)$; note that $r$ depends only on $k$ and is hence fixed.
\begin{align*}
h(m_1) \equiv ur + ks_1 \mod (p-1);\quad h(m_2) \equiv ur + ks_2 \mod (p-1)
m_1 \equiv ur + ks_1 \mod (p-1);\quad m_2 \equiv ur + ks_2 \mod (p-1)
\end{align*}
Hence,
\begin{align*}
h(m_1) - h(m_2) \equiv k(s_1 - s_2) \mod (p-1)
m_1 - m_2 \equiv k(s_1 - s_2) \mod (p-1)
\end{align*}
By the previous lemma, there are $d = \gcd(p-1, s_1 - s_2)$ of solutions for $k$ modulo $p-1$.
Choose the solution that gives the correct value in the first congruence $r \equiv g^k$ mod $p$.
Then,
\begin{align*}
s_1 \equiv \frac{h(m_1) - ur}{k} \mod (p-1)
s_1 \equiv \frac{m_1 - ur}{k} \mod (p-1)
\end{align*}
This gives $ur \equiv h(m_1) - ks_1$.
This gives $ur \equiv m_1 - ks_1$.
Hence, using the lemma again, there are $\gcd(p-1, r)$ solutions for $u$.
Choose the solution for $u$ that gives $y \equiv g^u$.
This allows us to deduce Alice's private key $u$, as well as the exponent $k$ used in both messages.
Expand All @@ -574,7 +573,7 @@ \subsection{Elgamal Signature Scheme}
In practice, this is stopped by signing a hash value of the message instead of the message itself.
\end{remark}

\subsection{The digital signature algorithm}
\subsection{The digital signature algorithm - Not Lectured}
The digital signature algorithm is a variant of the Elgamal signature scheme developed by the NSA.
The public key is $(p, q, g)$ constructed as follows.
% TODO: verify previous line
Expand Down Expand Up @@ -629,7 +628,7 @@ \subsubsection{Using a Public Key Cryptosystem}
To reveal her choice, Alice sends her private key to Bob, who can then use it to decipher the message $d_A(c) = d_A(e_A(m)) = m$.
He can also check that $d_A, e_A$ are inverse functions and thus ensure that Alice sent the correct private key.

\subsubsection{Using Coding Theory}
\subsubsection{Using Coding Theory - Not Lectured}
Alternatively, suppose that Alice has two ways to communicate to Bob: a clear channel which transmits with no errors, and a binary symmetric channel with error prob $p$.
Suppose $0 < p < \frac{1}{2}$, and the noisy channel corrupts bits independent of any action of Alice or Bob, so neither can affect its behaviour.

Expand All @@ -651,7 +650,7 @@ \subsubsection{Using Coding Theory}
If she were to send $c' \neq c$, she must ensure that $d(r,c') \approx Np$, but the prob that this happens is small unless she chooses $c'$ very close to $c$.
But any two distinct codewords have distance at least $d$, so she \underline{cannot} cheat.

\subsection{Secret sharing schemes - Non Examinable?}
\subsection{Secret sharing schemes - Non Examinable}
% This has been made nonexaminable.
Suppose that the CMS is attacked by the MIO.
% (mathematical institute, oxford)
Expand Down
Binary file modified CodingAndCryptography/cc.pdf
Binary file not shown.
Binary file modified Graph_Theory.pdf
Binary file not shown.
2 changes: 1 addition & 1 deletion ProbAndMeasure/01_measures.tex
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ \subsection{Lebesgue measure}
\end{align*}
\end{proof}

\begin{definition}[Lebesgue null set]
\begin{definition}[Lebesgue Null Set]
A Borel set $B \in \mathcal B$ is called a \vocab{Lebesgue null set} if $\lambda(B) = 0$ where $\lambda$ is the Lebesgue measure.
\end{definition}

Expand Down
Loading

0 comments on commit f3cba13

Please sign in to comment.