-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathvae.py
71 lines (57 loc) · 1.68 KB
/
vae.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import torch
import torch.nn as nn
from ..utils import export, load_from_local_or_url
from typing import Any
@export
class VAE(nn.Module):
def __init__(
self,
image_size,
nz: int = 100,
**kwargs: Any
):
super().__init__()
self.image_size = image_size
self.nz = nz
# Q(z|X)
self.encoder = nn.Sequential(
nn.Flatten(1),
nn.Linear(self.image_size ** 2, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, self.nz * 2)
)
# P(X|z)
self.decoder = nn.Sequential(
nn.Linear(self.nz, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, self.image_size ** 2),
nn.Sigmoid(),
nn.Unflatten(1, (1, image_size, image_size))
)
def sample_z(self, mu, logvar):
eps = torch.randn_like(logvar)
return mu + eps * torch.exp(0.5 * logvar)
def forward(self, x):
mu, logvar = torch.chunk(self.encoder(x), 2, dim=1)
z = self.sample_z(mu, logvar)
x = self.decoder(z)
return x, mu, logvar
@export
def vae(
pretrained: bool = False,
pth: str = None,
progress: bool = True,
**kwargs: Any
):
model = VAE(**kwargs)
if pretrained:
load_from_local_or_url(model, pth, kwargs.get('url', None), progress)
return model