diff --git a/src/qailo/mps/__init__.py b/src/qailo/mps/__init__.py index ef155df..2050f86 100644 --- a/src/qailo/mps/__init__.py +++ b/src/qailo/mps/__init__.py @@ -1,7 +1,8 @@ from .apply import apply -from .mps import MPS +from .mps_c import MPS_C +from .mps_p import MPS_P from .norm import norm -from .product_state import product_state +from .product_state import product_state, tensor_decomposition from .projector import projector from .state_vector import state_vector from .svd import compact_svd, tensor_svd @@ -9,9 +10,11 @@ __all__ = [ apply, - MPS, + MPS_C, + MPS_P, norm, product_state, + tensor_decomposition, projector, state_vector, compact_svd, diff --git a/src/qailo/mps/apply.py b/src/qailo/mps/apply.py index d0d0b6c..bbec1b3 100644 --- a/src/qailo/mps/apply.py +++ b/src/qailo/mps/apply.py @@ -18,12 +18,12 @@ def _swap_tensors(m, s, maxdim=None): def _move_qubit(m, p, s, maxdim=None): if m.q2t[p] != s: - # print(f"moving qubit {p} at {m.q2t[p]} to {s}") + print(f"moving qubit {p} at {m.q2t[p]} to {s}") for u in range(m.q2t[p], s): - # print(f"swap tensors {u} and {u+1}") + print(f"swap tensors {u} and {u+1}") _swap_tensors(m, u, maxdim=maxdim) for u in range(m.q2t[p], s, -1): - # print(f"swap tensors {u-1} and {u}") + print(f"swap tensors {u-1} and {u}") _swap_tensors(m, u - 1, maxdim=maxdim) diff --git a/src/qailo/mps/mps.py b/src/qailo/mps/mps_c.py similarity index 97% rename from src/qailo/mps/mps.py rename to src/qailo/mps/mps_c.py index c11a846..df67963 100644 --- a/src/qailo/mps/mps.py +++ b/src/qailo/mps/mps_c.py @@ -1,10 +1,12 @@ +from copy import deepcopy + import numpy as np from ..operator import type as op from .svd import tensor_svd -class MPS: +class MPS_C: """ MPS representation of quantum pure state @@ -20,7 +22,7 @@ class MPS: """ def __init__(self, tensors): - self.tensors = tensors + self.tensors = deepcopy(tensors) n = len(self.tensors) self.q2t = list(range(n)) self.t2q = list(range(n)) diff --git a/src/qailo/mps/mps_p.py b/src/qailo/mps/mps_p.py new file mode 100644 index 0000000..6b4ca4f --- /dev/null +++ b/src/qailo/mps/mps_p.py @@ -0,0 +1,113 @@ +from copy import deepcopy + +import numpy as np + +from ..operator import type as op +from .projector import projector +from .svd import tensor_svd + + +class MPS_P: + """ + MPS representation of quantum pure state + + shape of tensors: [du, dp, dl] + du: dimension of upper leg (1 for top tensor) + dp: dimension of physical leg (typically 2) + dl: dimension of lower leg (1 for bottom tensor) + + canonical position: cp in range(n) + 0 <= cp(0) <= cp(1) < n + tensors [0...cp(0)-1]: top canonical + tensors [cp(1)+1...n-1]: bottom canonical + """ + + def __init__(self, tensors): + self.tensors = deepcopy(tensors) + n = len(self.tensors) + self.q2t = list(range(n)) + self.t2q = list(range(n)) + self.cp = [0, n - 1] + # canonicalization matrices + # put sentinels (1x1 identities) at t = 0 and t = n + self.cmat = [np.identity(1)] + [None] * (n - 1) + [np.identity(1)] + + def _canonicalize(self, p0, p1=None): + p1 = p0 if p1 is None else p1 + n = len(self.tensors) + assert 0 <= p0 and p0 <= p1 and p1 < n + if self.cp[0] < p0: + for t in range(self.cp[0], p0): + A = np.einsum(self.cmat[t], [0, 3], self.tensors[t], [3, 1, 2]) + _, self.cmat[t + 1] = tensor_svd(A, [[0, 1], [2]], "left") + self.cp[0] = p0 + self.cp[1] = max(p0, self.cp[1]) + if self.cp[1] > p1: + for t in range(self.cp[1], p1, -1): + A = np.einsum(self.tensors[t], [0, 1, 3], self.cmat[t + 1], [3, 2]) + self.cmat[t], _ = tensor_svd(A, [[0], [1, 2]], "right") + self.cp[1] = p1 + + def _is_canonical(self): + # tensor shape + n = len(self.tensors) + dims = [] + assert self.tensors[0].shape[0] == 1 + dims.append(self.tensors[0].shape[0]) + for t in range(1, n - 1): + dims.append(self.tensors[t].shape[0]) + assert self.tensors[t].shape[0] == self.tensors[t - 1].shape[2] + assert self.tensors[t].shape[2] == self.tensors[t + 1].shape[0] + assert self.tensors[n - 1].shape[2] == 1 + dims.append(self.tensors[n - 1].shape[0]) + dims.append(self.tensors[n - 1].shape[2]) + + # qubit <-> tensor mapping + for q in range(n): + assert self.t2q[self.q2t[q]] == q + for t in range(n): + assert self.q2t[self.t2q[t]] == t + + # canonicality + assert self.cp[0] in range(n) + assert self.cp[1] in range(n) + A = np.identity(1) + for t in range(0, self.cp[0]): + A = np.einsum(A, [0, 3], self.tensors[t], [3, 1, 2]) + A = np.einsum(A, [2, 3, 1], self.tensors[t].conj(), [2, 3, 0]) + B = np.einsum(self.cmat[t + 1], [2, 1], self.cmat[t + 1].conj(), [2, 0]) + assert A.shape == B.shape + assert np.allclose(A, B) + A = np.identity(1) + for t in range(n - 1, self.cp[1], -1): + A = np.einsum(self.tensors[t], [0, 1, 3], A, [3, 2]) + A = np.einsum(self.tensors[t].conj(), [1, 2, 3], A, [0, 2, 3]) + B = np.einsum(self.cmat[t], [0, 2], self.cmat[t].conj(), [1, 2]) + assert np.allclose(A, B) + return True + + def _apply_one(self, p, s): + assert op.num_qubits(p) == 1 + self.tensors[s] = np.einsum(self.tensors[s], [0, 3, 2], p, [1, 3]) + self.cp[0] = min(self.cp[0], s) + self.cp[1] = max(self.cp[1], s) + + def _apply_two(self, p, s, maxdim=None, reverse=False): + """ + apply 2-qubit operator on neighboring tensors, s and s+1 + """ + self._canonicalize(s, s + 1) + p0, p1 = tensor_svd(p, [[0, 2], [1, 3]]) + t0 = self.tensors[s] + t1 = self.tensors[s + 1] + if not reverse: + t0 = np.einsum(t0, [0, 4, 3], p0, [1, 4, 2]) + t1 = np.einsum(t1, [0, 4, 3], p1, [1, 2, 4]) + else: + t0 = np.einsum(t0, [0, 4, 3], p1, [2, 1, 4]) + t1 = np.einsum(t1, [0, 4, 3], p0, [2, 4, 1]) + tt0 = np.einsum(self.cmat[s], [0, 4], t0, [4, 1, 2, 3]) + tt1 = np.einsum(t1, [0, 1, 2, 4], self.cmat[s + 2], [4, 3]) + WL, WR = projector(tt0, [0, 1, 4, 5], tt1, [5, 4, 2, 3], nkeep=maxdim) + self.tensors[s] = np.einsum(t0, [0, 1, 3, 4], WR, [3, 4, 2]) + self.tensors[s + 1] = np.einsum(WL.conj(), [3, 4, 0], t1, [4, 3, 1, 2]) diff --git a/src/qailo/mps/norm.py b/src/qailo/mps/norm.py index 596c447..ff3bdc2 100644 --- a/src/qailo/mps/norm.py +++ b/src/qailo/mps/norm.py @@ -4,7 +4,7 @@ def norm(m): - A = np.identity(2) + A = np.identity(1) for t in range(num_qubits(m)): A = np.einsum("ij,jkl->ikl", A, m.tensors[t]) A = np.einsum("ijk,ijl->kl", A, m.tensors[t].conj()) diff --git a/src/qailo/mps/product_state.py b/src/qailo/mps/product_state.py index 2ed0994..f255175 100644 --- a/src/qailo/mps/product_state.py +++ b/src/qailo/mps/product_state.py @@ -1,5 +1,9 @@ import numpy as np +from ..state_vector.type import num_qubits +from ..state_vector.vector import vector +from .svd import tensor_svd + def product_state(n, c=0): assert n > 0 @@ -9,3 +13,16 @@ def product_state(n, c=0): tensor[0, (c >> (n - t - 1)) & 1, 0] = 1 tensors.append(tensor) return tensors + + +def tensor_decomposition(v, nkeep=None, tol=1e-12): + n = num_qubits(v) + vv = vector(v).reshape((1, 2**n)) + tensors = [] + for t in range(n - 1): + dims = vv.shape + vv = vv.reshape(dims[0], 2, dims[1] // 2) + t, vv = tensor_svd(vv, [[0, 1], [2]], "left", nkeep, tol) + tensors.append(t) + tensors.append(vv.reshape(vv.shape + (1,))) + return tensors diff --git a/src/qailo/mps/projector.py b/src/qailo/mps/projector.py index 92876ff..0b3ea83 100644 --- a/src/qailo/mps/projector.py +++ b/src/qailo/mps/projector.py @@ -11,20 +11,16 @@ def normalize_s(s0, s1): if s0[k] not in s1: ss0[k] = i i += 1 - print(s0, s1, ss0, ss1) for k in range(len(s1)): if s1[k] not in s0: ss1[k] = i i += 1 - print(s0, s1, ss0, ss1) for k in range(len(s0)): if s0[k] in s1: ss0[k] += i - print(s0, s1, ss0, ss1) for k in range(len(s1)): if s1[k] in s0: ss1[k] += i - print(s0, s1, ss0, ss1) return ss0, ss1 @@ -55,12 +51,12 @@ def projector(T0, s0, T1, s1, nkeep=None, tol=1e-12): dim1R = np.prod([T1.shape[i] for i in legs1R]) assert len(dims0R) == len(dims1L) TT0 = np.einsum(T0, ss0).reshape([dim0L] + dims0R) - TT1 = np.einsum(T1, ss1).reshape([dim1R] + dims1L) + TT1 = np.einsum(T1, ss1).reshape([dim1R] + dims0R) ss_sum = list(range(2, len(dims0R) + 2)) A = np.einsum(TT0, [0] + ss_sum, TT1, [1] + ss_sum) S, U, V = compact_svd(A, nkeep=nkeep, tol=tol) U = np.einsum(U, [0, 1], np.sqrt(1 / S), [1], [0, 1]) V = np.einsum(V, [0, 1], np.sqrt(1 / S), [1], [0, 1]) - WL = np.einsum(TT0, [0] + ss_sum, U, [0, max(ss_sum) + 1]) + WL = np.einsum(TT0.conj(), [0] + ss_sum, U, [0, max(ss_sum) + 1]) WR = np.einsum(TT1, [0] + ss_sum, V, [0, max(ss_sum) + 1]) return WL, WR diff --git a/test/mps/test_apply.py b/test/mps/test_apply.py index 22e805c..995abda 100644 --- a/test/mps/test_apply.py +++ b/test/mps/test_apply.py @@ -4,11 +4,13 @@ from pytest import approx -def apply(op, m0, m1, v, pos, maxdim=None): +def apply(op, m0, m1, m2, m3, v, pos, maxdim=None): m0 = q.mps.apply(m0, op, pos) - m1 = q.mps.apply(m1, op, pos, maxdim) + m1 = q.mps.apply(m1, op, pos) + m2 = q.mps.apply(m2, op, pos, maxdim) + m3 = q.mps.apply(m3, op, pos, maxdim) v = q.sv.apply(v, op, pos) - return m0, m1, v + return m0, m1, m2, m3, v def test_apply(): @@ -16,14 +18,16 @@ def test_apply(): p = 64 maxdim = 2 - m0 = q.mps.MPS(q.mps.product_state(n)) - m1 = q.mps.MPS(q.mps.product_state(n)) + m0 = q.mps.MPS_C(q.mps.product_state(n)) + m1 = q.mps.MPS_P(q.mps.product_state(n)) + m2 = q.mps.MPS_C(q.mps.product_state(n)) + m3 = q.mps.MPS_P(q.mps.product_state(n)) v = q.sv.state_vector(n) i = 4 j = 0 print("apply cz on {} and {}".format(i, j)) - m0, m1, v = apply(q.op.cz(), m0, m1, v, [i, j], maxdim) + m0, m1, m2, m3, v = apply(q.op.cz(), m0, m1, m2, m3, v, [i, j], maxdim) for _ in range(p): i = random.randrange(n) @@ -32,26 +36,41 @@ def test_apply(): t = random.randrange(3) if t == 0: print("apply h on {}".format(i)) - m0, m1, v = apply(q.op.h(), m0, m1, v, [i], maxdim) + m0, m1, m2, m3, v = apply(q.op.h(), m0, m1, m2, m3, v, [i], maxdim) elif t == 1: print("apply x on {}".format(i)) - m0, m1, v = apply(q.op.s(), m0, m1, v, [i], maxdim) + m0, m1, m2, m3, v = apply(q.op.s(), m0, m1, m2, m3, v, [i], maxdim) elif t == 2: print("apply s on {}".format(i)) - m0, m1, v = apply(q.op.t(), m0, m1, v, [i], maxdim) + m0, m1, m2, m3, v = apply(q.op.t(), m0, m1, m2, m3, v, [i], maxdim) else: t = random.randrange(2) if t == 0: print("apply cx on {} and {}".format(i, j)) - m0, m1, v = apply(q.op.cx(), m0, m1, v, [i, j], maxdim) + m0, m1, m2, m3, v = apply(q.op.cx(), m0, m1, m2, m3, v, [i, j], maxdim) elif t == 1: print("apply cz on {} and {}".format(i, j)) - m0, m1, v = apply(q.op.cz(), m0, m1, v, [i, j], maxdim) + m0, m1, m2, m3, v = apply(q.op.cz(), m0, m1, m2, m3, v, [i, j], maxdim) + m0._is_canonical() + m1._is_canonical() + m2._is_canonical() + m3._is_canonical() + f0 = q.sv.fidelity(q.mps.state_vector(m0), v) + f1 = q.sv.fidelity(q.mps.state_vector(m1), v) + f2 = q.sv.fidelity(q.mps.state_vector(m2), v) + f3 = q.sv.fidelity(q.mps.state_vector(m3), v) + print("fidelity = {} {} {} {}".format(f0, f1, f2, f3)) + assert f0 == approx(1) + assert f1 == approx(1) f0 = q.sv.fidelity(q.mps.state_vector(m0), v) f1 = q.sv.fidelity(q.mps.state_vector(m1), v) - print("final fidelity = {} and {}".format(f0, f1)) + f2 = q.sv.fidelity(q.mps.state_vector(m2), v) + f3 = q.sv.fidelity(q.mps.state_vector(m3), v) + print("final fidelity = {} {} {} {}".format(f0, f1, f2, f3)) assert f0 == approx(1) + assert f1 == approx(1) + assert f2 == approx(f3) if __name__ == "__main__": diff --git a/test/mps/test_canonical.py b/test/mps/test_canonical.py index 16a6a78..1741aa1 100644 --- a/test/mps/test_canonical.py +++ b/test/mps/test_canonical.py @@ -14,21 +14,56 @@ def test_canonical(): tensors.append(np.random.random((d, 2, dn))) d = dn tensors.append(np.random.random((d, 2, 1))) - mps = q.mps.MPS(tensors) - norm = q.mps.norm(mps) - assert q.mps.is_canonical(mps) + m0 = q.mps.MPS_C(tensors) + m1 = q.mps.MPS_P(tensors) + norm = q.mps.norm(m0) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) + assert q.mps.is_canonical(m0) + assert q.mps.is_canonical(m1) for _ in range(n): p = np.random.randint(n) - mps._canonicalize(p) - assert q.mps.norm(mps) == approx(norm) - assert q.mps.is_canonical(mps) + m0._canonicalize(p) + m1._canonicalize(p) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) + assert q.mps.is_canonical(m0) + assert q.mps.is_canonical(m1) for _ in range(n): p = np.random.randint(n - 1) - mps._canonicalize(p, p + 1) - assert q.mps.norm(mps) == approx(norm) - assert q.mps.is_canonical(mps) + m0._canonicalize(p, p + 1) + m1._canonicalize(p, p + 1) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) + assert q.mps.is_canonical(m0) + assert q.mps.is_canonical(m1) + + v = np.random.random(2**n).reshape((2,) * n + (1,)) + v /= np.linalg.norm(v) + tensors = q.mps.tensor_decomposition(v, maxdim) + m0 = q.mps.MPS_C(tensors) + m1 = q.mps.MPS_P(tensors) + norm = q.mps.norm(m0) + + for _ in range(n): + p = np.random.randint(n) + m0._canonicalize(p) + m1._canonicalize(p) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) + assert q.mps.is_canonical(m0) + assert q.mps.is_canonical(m1) + + for _ in range(n): + p = np.random.randint(n - 1) + m0._canonicalize(p, p + 1) + m1._canonicalize(p, p + 1) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) + assert q.mps.is_canonical(m0) + assert q.mps.is_canonical(m1) if __name__ == "__main__": diff --git a/test/mps/test_move.py b/test/mps/test_move.py index f023101..4856568 100644 --- a/test/mps/test_move.py +++ b/test/mps/test_move.py @@ -5,6 +5,7 @@ def test_swap(): + np.random.seed(1234) n = 12 maxdim = 4 tensors = [] @@ -15,27 +16,42 @@ def test_swap(): tensors.append(np.random.random((d, 2, dn))) d = dn tensors.append(np.random.random((d, 2, 1))) - m = q.mps.MPS(tensors) - q.mps.is_canonical(m) - norm = q.mps.norm(m) - v0 = q.sv.vector(q.mps.state_vector(m)) + m0 = q.mps.MPS_C(tensors) + m1 = q.mps.MPS_P(tensors) + q.mps.is_canonical(m0) + q.mps.is_canonical(m1) + norm = q.mps.norm(m0) + v = q.sv.vector(q.mps.state_vector(m0)) for _ in range(64): s = np.random.randint(n - 1) print(f"swap tensors at {s} and {s+1}") - m._canonicalize(s) - _swap_tensors(m, s) - assert q.mps.norm(m) == approx(norm) + m0._canonicalize(s) + m1._canonicalize(s) + _swap_tensors(m0, s) + _swap_tensors(m1, s) + print(q.sv.vector(q.mps.state_vector(m0))) + print(q.sv.vector(q.mps.state_vector(m1))) + q.mps.is_canonical(m0) + q.mps.is_canonical(m1) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) - v1 = q.sv.vector(q.mps.state_vector(m)) - assert len(v0) == len(v1) + v0 = q.sv.vector(q.mps.state_vector(m0)) + v1 = q.sv.vector(q.mps.state_vector(m1)) + assert len(v) == len(v0) + assert len(v) == len(v1) + print(v) print(v0) print(v1) - assert q.sv.is_close(v0, v1) + assert q.sv.is_close(v, v0) + assert q.sv.is_close(v, v1) def test_move(): - n = 12 + np.random.seed(1234) + # n = 12 + n = 4 maxdim = 4 tensors = [] d = np.random.randint(2, maxdim) @@ -45,22 +61,39 @@ def test_move(): tensors.append(np.random.random((d, 2, dn))) d = dn tensors.append(np.random.random((d, 2, 1))) - m = q.mps.MPS(tensors) - q.mps.is_canonical(m) - norm = q.mps.norm(m) - v0 = q.sv.vector(q.mps.state_vector(m)) + m0 = q.mps.MPS_C(tensors) + m1 = q.mps.MPS_P(tensors) + q.mps.is_canonical(m0) + q.mps.is_canonical(m1) + norm = q.mps.norm(m0) + v = q.sv.vector(q.mps.state_vector(m0)) + print(q.sv.vector(q.mps.state_vector(m0))) + print(q.sv.vector(q.mps.state_vector(m1))) for _ in range(16): p = np.random.randint(n) s = np.random.randint(n) - _move_qubit(m, p, s) - assert q.mps.norm(m) == approx(norm) + print(f"move qubit at {p} to {s}") + q.mps.is_canonical(m0) + q.mps.is_canonical(m1) + _move_qubit(m0, p, s) + _move_qubit(m1, p, s) + print(q.sv.vector(q.mps.state_vector(m0))) + print(q.sv.vector(q.mps.state_vector(m1))) + q.mps.is_canonical(m0) + q.mps.is_canonical(m1) + assert q.mps.norm(m0) == approx(norm) + assert q.mps.norm(m1) == approx(norm) - v1 = q.sv.vector(q.mps.state_vector(m)) - assert len(v0) == len(v1) + v0 = q.sv.vector(q.mps.state_vector(m0)) + v1 = q.sv.vector(q.mps.state_vector(m1)) + assert len(v) == len(v0) + assert len(v) == len(v1) + print(v) print(v0) print(v1) - assert q.sv.is_close(v0, v1) + assert q.sv.is_close(v, v0) + assert q.sv.is_close(v, v1) if __name__ == "__main__": diff --git a/test/mps/test_mps.py b/test/mps/test_mps.py index fca49d8..3d6f010 100644 --- a/test/mps/test_mps.py +++ b/test/mps/test_mps.py @@ -5,15 +5,12 @@ def test_mps(): n = 4 c = q.util.str2binary("1100") - mps = q.mps.MPS(q.mps.product_state(n, c)) - print(n, c, mps) - sv = q.mps.state_vector(mps) - print(sv) - print(q.sv.state_vector(n, c)) - v = q.sv.vector(sv) - print(v) - - assert np.allclose(sv, q.sv.state_vector(n, c)) + m0 = q.mps.MPS_C(q.mps.product_state(n, c)) + m1 = q.mps.MPS_P(q.mps.product_state(n, c)) + v0 = q.mps.state_vector(m0) + v1 = q.mps.state_vector(m1) + assert np.allclose(v0, q.sv.state_vector(n, c)) + assert np.allclose(v1, q.sv.state_vector(n, c)) if __name__ == "__main__": diff --git a/test/mps/test_projector.py b/test/mps/test_projector.py index d8a540c..858b38e 100644 --- a/test/mps/test_projector.py +++ b/test/mps/test_projector.py @@ -21,21 +21,45 @@ def test_projector(): for _ in range(nt): n0, n1, n2, n3, n4, n5, d = np.random.randint(2, maxn, size=(7,)) T0 = np.random.random((n0, n1, n2, n3)) - T1 = np.random.random((n2, n3, n4, n5)) - WL, WR = q.mps.projector(T0, [0, 1, 4, 5], T1, [4, 5, 2, 3], nkeep=d) + T1 = np.random.random((n3, n2, n4, n5)) + WL, WR = q.mps.projector(T0, [0, 1, 4, 5], T1, [5, 4, 2, 3], nkeep=d) assert WL.shape[2] <= d and WR.shape[2] <= d assert WL.shape[0] == n2 and WL.shape[1] == n3 assert WR.shape[0] == n2 and WR.shape[1] == n3 A = np.einsum( - T0, [0, 1, 4, 5], WR, [4, 5, 6], WL.conj(), [7, 8, 6], T1, [7, 8, 2, 3] + T0, [0, 1, 4, 5], WR, [4, 5, 6], WL.conj(), [7, 8, 6], T1, [8, 7, 2, 3] ) L, R = q.mps.tensor_svd( - np.einsum(T0, [0, 1, 4, 5], T1, [4, 5, 2, 3]), [[0, 1], [2, 3]], nkeep=d + np.einsum(T0, [0, 1, 4, 5], T1, [5, 4, 2, 3]), [[0, 1], [2, 3]], nkeep=d ) B = np.einsum(L, [0, 1, 4], R, [4, 2, 3]) assert np.allclose(A, B) + for _ in range(nt): + w0 = np.random.random((4, 4)) + 1.0j * np.random.random((4, 4)) + t0 = np.random.random((4, 2, 3)) + 1.0j * np.random.random((4, 2, 3)) + t1 = np.random.random((3, 2, 2)) + 1.0j * np.random.random((3, 2, 2)) + w2 = np.random.random((2, 2)) + 1.0j * np.random.random((2, 2)) + p = np.random.random((2, 2, 2, 2)) + 1.0j * np.random.random((2, 2, 2, 2)) + B = np.einsum(t0, [0, 4, 6], t1, [6, 5, 3], p, [1, 2, 4, 5]) + p0, p1 = q.mps.svd.tensor_svd(p, [[0, 2], [1, 3]]) + assert np.allclose(np.einsum(p0, [0, 2, 4], p1, [4, 1, 3]), p) + t0 = np.einsum(t0, [0, 4, 3], p0, [1, 4, 2]) + t1 = np.einsum(t1, [0, 4, 3], p1, [1, 2, 4]) + assert np.allclose(np.einsum(t0, [0, 1, 4, 5], t1, [5, 4, 2, 3]), B) + tt0 = np.einsum(w0, [0, 4], t0, [4, 1, 2, 3]) + tt1 = np.einsum(t1, [0, 1, 2, 4], w2, [4, 3]) + WL, WR = q.mps.projector(tt0, [0, 1, 4, 5], tt1, [5, 4, 2, 3]) + assert np.allclose( + np.einsum(WL.conj(), [2, 3, 0], WR, [2, 3, 1]), np.identity(WL.shape[2]) + ) + tt0 = np.einsum(t0, [0, 1, 3, 4], WR, [3, 4, 2]) + tt1 = np.einsum(WL.conj(), [3, 4, 0], t1, [4, 3, 1, 2]) + A = np.einsum(tt0, [0, 1, 4], tt1, [4, 2, 3]) + print(np.linalg.norm(A - B)) + assert np.allclose(A, B) + if __name__ == "__main__": test_projector() diff --git a/test/mps/test_svd.py b/test/mps/test_svd.py index 0ee7fe5..4db8a9b 100644 --- a/test/mps/test_svd.py +++ b/test/mps/test_svd.py @@ -9,16 +9,31 @@ def test_compact_svd(): m, n, d = np.random.randint(2, maxn, size=(3,)) A = np.random.random((m, n)) S, U, V = q.mps.compact_svd(A) - assert np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V)) + assert np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V.conj())) S, U, V = q.mps.compact_svd(A, nkeep=d) assert S.shape[0] == U.shape[1] assert S.shape[0] == V.shape[1] assert U.shape[0] == m assert V.shape[0] == n if d >= min(m, n): - assert np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V)) + assert np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V.conj())) else: - assert not np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V)) + assert not np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V.conj())) + + for _ in range(nt): + m, n, d = np.random.randint(2, maxn, size=(3,)) + A = np.random.random((m, n)) + 1j * np.random.random((m, n)) + S, U, V = q.mps.compact_svd(A) + assert np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V.conj())) + S, U, V = q.mps.compact_svd(A, nkeep=d) + assert S.shape[0] == U.shape[1] + assert S.shape[0] == V.shape[1] + assert U.shape[0] == m + assert V.shape[0] == n + if d >= min(m, n): + assert np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V.conj())) + else: + assert not np.allclose(A, np.einsum("k,ik,jk->ij", S, U, V.conj())) def test_svd_left(): @@ -47,6 +62,29 @@ def test_svd_left(): else: assert not np.allclose(T, np.einsum("ijl,lk->ijk", L, R)) + for _ in range(nt): + m, n, p, d = np.random.randint(2, maxn, size=(4,)) + T = np.random.random((m, n, p)) + 1j * np.random.random((m, n, p)) + L, R = q.mps.tensor_svd(T, [[0, 1], [2]], "left") + assert len(L.shape) == 3 + assert len(R.shape) == 2 + assert L.shape[0] == m + assert L.shape[1] == n + assert L.shape[2] == R.shape[0] + assert R.shape[1] == p + assert np.allclose(T, np.einsum("ijl,lk->ijk", L, R)) + L, R = q.mps.tensor_svd(T, [[0, 1], [2]], "left", nkeep=d) + assert len(L.shape) == 3 + assert len(R.shape) == 2 + assert L.shape[0] == m + assert L.shape[1] == n + assert L.shape[2] == R.shape[0] + assert R.shape[1] == p + if d >= min(m * n, p): + assert np.allclose(T, np.einsum("ijl,lk->ijk", L, R)) + else: + assert not np.allclose(T, np.einsum("ijl,lk->ijk", L, R)) + def test_svd_right(): maxn = 16 @@ -74,6 +112,29 @@ def test_svd_right(): else: assert not np.allclose(T, np.einsum("il,ljk->ijk", L, R)) + for _ in range(nt): + m, n, p, d = np.random.randint(2, maxn, size=(4,)) + T = np.random.random((m, n, p)) + 1j * np.random.random((m, n, p)) + L, R = q.mps.tensor_svd(T, [[0], [1, 2]], "right") + assert len(L.shape) == 2 + assert len(R.shape) == 3 + assert L.shape[0] == m + assert L.shape[1] == R.shape[0] + assert R.shape[1] == n + assert R.shape[2] == p + assert np.allclose(T, np.einsum("il,ljk->ijk", L, R)) + L, R = q.mps.tensor_svd(T, [[0], [1, 2]], "right", nkeep=d) + assert len(L.shape) == 2 + assert len(R.shape) == 3 + assert L.shape[0] == m + assert L.shape[1] == R.shape[0] + assert R.shape[1] == n + assert R.shape[2] == p + if d >= min(m, n * p): + assert np.allclose(T, np.einsum("il,ljk->ijk", L, R)) + else: + assert not np.allclose(T, np.einsum("il,ljk->ijk", L, R)) + def test_svd_two(): maxn = 8 @@ -103,6 +164,31 @@ def test_svd_two(): else: assert not np.allclose(T, np.einsum("ijm,mkl->ijkl", L, R)) + for _ in range(nt): + m, n, p, r, d = np.random.randint(2, maxn, size=(5,)) + T = np.random.random((m, n, p, r)) + 1j * np.random.random((m, n, p, r)) + L, R = q.mps.tensor_svd(T, [[0, 1], [2, 3]]) + assert len(L.shape) == 3 + assert len(R.shape) == 3 + assert L.shape[0] == m + assert L.shape[1] == n + assert L.shape[2] == R.shape[0] + assert R.shape[1] == p + assert R.shape[2] == r + assert np.allclose(T, np.einsum("ijm,mkl->ijkl", L, R)) + L, R = q.mps.tensor_svd(T, [[0, 1], [2, 3]], nkeep=d) + assert len(L.shape) == 3 + assert len(R.shape) == 3 + assert L.shape[0] == m + assert L.shape[1] == n + assert L.shape[2] == R.shape[0] + assert R.shape[1] == p + assert R.shape[2] == r + if d >= min(m * n, p * r): + assert np.allclose(T, np.einsum("ijm,mkl->ijkl", L, R)) + else: + assert not np.allclose(T, np.einsum("ijm,mkl->ijkl", L, R)) + if __name__ == "__main__": test_compact_svd()