-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathpositional_encoding.py
65 lines (53 loc) · 2.63 KB
/
positional_encoding.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import math
import torch
import torch.nn as nn
from torch import Tensor
"""
参考:https://github.com/full-stack-deep-learning/fsdl-text-recognizer-2021-labs/blob/main/lab9/text_recognizer/models/transformer_util.py
"""
class PositionalEncodingImage(nn.Module):
"""
Module used to add 2-D positional encodings to the feature-map produced by the encoder.
Following https://arxiv.org/abs/2103.06450 by Sumeet Singh.
"""
def __init__(self, d_model: int, max_h: int = 2000, max_w: int = 2000) -> None:
super().__init__()
self.d_model = d_model
assert d_model % 2 == 0, f"Embedding depth {d_model} is not even"
pe = self.make_pe(d_model=d_model, max_h=max_h, max_w=max_w) # (d_model, max_h, max_w)
self.register_buffer("pe", pe)
@staticmethod
def make_pe(d_model: int, max_h: int, max_w: int) -> torch.Tensor:
pe_h = PositionalEncoding.make_pe(d_model=d_model // 2, max_len=max_h) # (max_h, 1 d_model // 2)
pe_h = pe_h.permute(2, 0, 1).expand(-1, -1, max_w) # (d_model // 2, max_h, max_w)
pe_w = PositionalEncoding.make_pe(d_model=d_model // 2, max_len=max_w) # (max_w, 1, d_model // 2)
pe_w = pe_w.permute(2, 1, 0).expand(-1, max_h, -1) # (d_model // 2, max_h, max_w)
pe = torch.cat([pe_h, pe_w], dim=0) # (d_model, max_h, max_w)
return pe
def forward(self, x: Tensor) -> Tensor:
"""pytorch.nn.module.forward"""
# x.shape = (B, d_model, H, W)
assert x.shape[1] == self.pe.shape[0] # type: ignore
x = x + self.pe[:, : x.size(2), : x.size(3)] # type: ignore
return x
class PositionalEncoding(torch.nn.Module):
"""Classic Attention-is-all-you-need positional encoding."""
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000) -> None:
super().__init__()
self.dropout = nn.Dropout(p=dropout)
pe = self.make_pe(d_model, max_len) # (max_len, 1, d_model)
self.register_buffer("pe", pe)
@staticmethod
def make_pe(d_model: int, max_len: int) -> Tensor:
"""Compute positional encoding."""
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(1)
return pe
def forward(self, x: Tensor) -> Tensor:
assert x.shape[2] == self.pe.shape[2] # type: ignore
x = x + self.pe[: x.size(0)] # type: ignore
return self.dropout(x)