Add SuperTransformerEncoder
This commit is contained in:
@@ -29,8 +29,8 @@ class SuperAttention(SuperModule):
|
||||
proj_dim: IntSpaceType,
|
||||
num_heads: IntSpaceType,
|
||||
qkv_bias: BoolSpaceType = False,
|
||||
attn_drop: float = 0.0,
|
||||
proj_drop: float = 0.0,
|
||||
attn_drop: Optional[float] = None,
|
||||
proj_drop: Optional[float] = None,
|
||||
):
|
||||
super(SuperAttention, self).__init__()
|
||||
self._input_dim = input_dim
|
||||
@@ -45,9 +45,9 @@ class SuperAttention(SuperModule):
|
||||
self.k_fc = SuperLinear(input_dim, input_dim, bias=qkv_bias)
|
||||
self.v_fc = SuperLinear(input_dim, input_dim, bias=qkv_bias)
|
||||
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.attn_drop = nn.Dropout(attn_drop or 0.0)
|
||||
self.proj = SuperLinear(input_dim, proj_dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
self.proj_drop = nn.Dropout(proj_drop or 0.0)
|
||||
|
||||
@property
|
||||
def num_heads(self):
|
||||
|
@@ -4,5 +4,7 @@
|
||||
from .super_module import SuperRunMode
|
||||
from .super_module import SuperModule
|
||||
from .super_linear import SuperLinear
|
||||
from .super_linear import SuperMLP
|
||||
from .super_linear import SuperMLPv1, SuperMLPv2
|
||||
from .super_norm import SuperLayerNorm1D
|
||||
from .super_attention import SuperAttention
|
||||
from .super_transformer import SuperTransformerEncoderLayer
|
||||
|
@@ -113,7 +113,7 @@ class SuperLinear(SuperModule):
|
||||
)
|
||||
|
||||
|
||||
class SuperMLP(SuperModule):
|
||||
class SuperMLPv1(SuperModule):
|
||||
"""An MLP layer: FC -> Activation -> Drop -> FC -> Drop."""
|
||||
|
||||
def __init__(
|
||||
@@ -124,7 +124,7 @@ class SuperMLP(SuperModule):
|
||||
act_layer: Callable[[], nn.Module] = nn.GELU,
|
||||
drop: Optional[float] = None,
|
||||
):
|
||||
super(SuperMLP, self).__init__()
|
||||
super(SuperMLPv1, self).__init__()
|
||||
self._in_features = in_features
|
||||
self._hidden_features = hidden_features
|
||||
self._out_features = out_features
|
||||
@@ -146,20 +146,17 @@ class SuperMLP(SuperModule):
|
||||
return root_node
|
||||
|
||||
def apply_candidate(self, abstract_child: spaces.VirtualNode):
|
||||
super(SuperMLP, self).apply_candidate(abstract_child)
|
||||
super(SuperMLPv1, self).apply_candidate(abstract_child)
|
||||
if "fc1" in abstract_child:
|
||||
self.fc1.apply_candidate(abstract_child["fc1"])
|
||||
if "fc2" in abstract_child:
|
||||
self.fc2.apply_candidate(abstract_child["fc2"])
|
||||
|
||||
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return self._unified_forward(input)
|
||||
return self.forward_raw(input)
|
||||
|
||||
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return self._unified_forward(input)
|
||||
|
||||
def _unified_forward(self, x):
|
||||
x = self.fc1(x)
|
||||
x = self.fc1(input)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
x = self.fc2(x)
|
||||
@@ -173,3 +170,137 @@ class SuperMLP(SuperModule):
|
||||
self._out_features,
|
||||
self._drop_rate,
|
||||
)
|
||||
|
||||
|
||||
class SuperMLPv2(SuperModule):
|
||||
"""An MLP layer: FC -> Activation -> Drop -> FC -> Drop."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_features: IntSpaceType,
|
||||
hidden_multiplier: IntSpaceType,
|
||||
out_features: IntSpaceType,
|
||||
act_layer: Callable[[], nn.Module] = nn.GELU,
|
||||
drop: Optional[float] = None,
|
||||
):
|
||||
super(SuperMLPv2, self).__init__()
|
||||
self._in_features = in_features
|
||||
self._hidden_multiplier = hidden_multiplier
|
||||
self._out_features = out_features
|
||||
self._drop_rate = drop
|
||||
self._params = nn.ParameterDict({})
|
||||
|
||||
self._create_linear(
|
||||
"fc1", self.in_features, int(self.in_features * self.hidden_multiplier)
|
||||
)
|
||||
self._create_linear(
|
||||
"fc2", int(self.in_features * self.hidden_multiplier), self.out_features
|
||||
)
|
||||
self.act = act_layer()
|
||||
self.drop = nn.Dropout(drop or 0.0)
|
||||
self.reset_parameters()
|
||||
|
||||
@property
|
||||
def in_features(self):
|
||||
return spaces.get_max(self._in_features)
|
||||
|
||||
@property
|
||||
def hidden_multiplier(self):
|
||||
return spaces.get_max(self._hidden_multiplier)
|
||||
|
||||
@property
|
||||
def out_features(self):
|
||||
return spaces.get_max(self._out_features)
|
||||
|
||||
def _create_linear(self, name, inC, outC):
|
||||
self._params["{:}_super_weight".format(name)] = torch.nn.Parameter(
|
||||
torch.Tensor(outC, inC)
|
||||
)
|
||||
self._params["{:}_super_bias".format(name)] = torch.nn.Parameter(
|
||||
torch.Tensor(outC)
|
||||
)
|
||||
|
||||
def reset_parameters(self) -> None:
|
||||
nn.init.kaiming_uniform_(self._params["fc1_super_weight"], a=math.sqrt(5))
|
||||
nn.init.kaiming_uniform_(self._params["fc2_super_weight"], a=math.sqrt(5))
|
||||
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
|
||||
self._params["fc1_super_weight"]
|
||||
)
|
||||
bound = 1 / math.sqrt(fan_in)
|
||||
nn.init.uniform_(self._params["fc1_super_bias"], -bound, bound)
|
||||
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
|
||||
self._params["fc2_super_weight"]
|
||||
)
|
||||
bound = 1 / math.sqrt(fan_in)
|
||||
nn.init.uniform_(self._params["fc2_super_bias"], -bound, bound)
|
||||
|
||||
@property
|
||||
def abstract_search_space(self):
|
||||
root_node = spaces.VirtualNode(id(self))
|
||||
if not spaces.is_determined(self._in_features):
|
||||
root_node.append(
|
||||
"_in_features", self._in_features.abstract(reuse_last=True)
|
||||
)
|
||||
if not spaces.is_determined(self._hidden_multiplier):
|
||||
root_node.append(
|
||||
"_hidden_multiplier", self._hidden_multiplier.abstract(reuse_last=True)
|
||||
)
|
||||
if not spaces.is_determined(self._out_features):
|
||||
root_node.append(
|
||||
"_out_features", self._out_features.abstract(reuse_last=True)
|
||||
)
|
||||
return root_node
|
||||
|
||||
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
|
||||
# check inputs ->
|
||||
if not spaces.is_determined(self._in_features):
|
||||
expected_input_dim = self.abstract_child["_in_features"].value
|
||||
else:
|
||||
expected_input_dim = spaces.get_determined_value(self._in_features)
|
||||
if input.size(-1) != expected_input_dim:
|
||||
raise ValueError(
|
||||
"Expect the input dim of {:} instead of {:}".format(
|
||||
expected_input_dim, input.size(-1)
|
||||
)
|
||||
)
|
||||
# create the weight and bias matrix for fc1
|
||||
if not spaces.is_determined(self._hidden_multiplier):
|
||||
hmul = self.abstract_child["_hidden_multiplier"].value * expected_input_dim
|
||||
else:
|
||||
hmul = spaces.get_determined_value(self._hidden_multiplier)
|
||||
hidden_dim = int(expected_input_dim * hmul)
|
||||
_fc1_weight = self._params["fc1_super_weight"][:hidden_dim, :expected_input_dim]
|
||||
_fc1_bias = self._params["fc1_super_bias"][:hidden_dim]
|
||||
x = F.linear(input, _fc1_weight, _fc1_bias)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
# create the weight and bias matrix for fc2
|
||||
if not spaces.is_determined(self._out_features):
|
||||
out_dim = self.abstract_child["_out_features"].value
|
||||
else:
|
||||
out_dim = spaces.get_determined_value(self._out_features)
|
||||
_fc2_weight = self._params["fc2_super_weight"][:out_dim, :hidden_dim]
|
||||
_fc2_bias = self._params["fc2_super_bias"][:out_dim]
|
||||
x = F.linear(x, _fc2_weight, _fc2_bias)
|
||||
x = self.drop(x)
|
||||
return x
|
||||
|
||||
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
|
||||
x = F.linear(
|
||||
input, self._params["fc1_super_weight"], self._params["fc1_super_bias"]
|
||||
)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
x = F.linear(
|
||||
x, self._params["fc2_super_weight"], self._params["fc2_super_bias"]
|
||||
)
|
||||
x = self.drop(x)
|
||||
return x
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return "in_features={:}, hidden_multiplier={:}, out_features={:}, drop={:}, fc1 -> act -> drop -> fc2 -> drop,".format(
|
||||
self._in_features,
|
||||
self._hidden_multiplier,
|
||||
self._out_features,
|
||||
self._drop_rate,
|
||||
)
|
||||
|
82
lib/xlayers/super_norm.py
Normal file
82
lib/xlayers/super_norm.py
Normal file
@@ -0,0 +1,82 @@
|
||||
#####################################################
|
||||
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
|
||||
#####################################################
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
import math
|
||||
from typing import Optional, Callable
|
||||
|
||||
import spaces
|
||||
from .super_module import SuperModule
|
||||
from .super_module import IntSpaceType
|
||||
from .super_module import BoolSpaceType
|
||||
|
||||
|
||||
class SuperLayerNorm1D(SuperModule):
|
||||
"""Super Layer Norm."""
|
||||
|
||||
def __init__(
|
||||
self, dim: IntSpaceType, eps: float = 1e-5, elementwise_affine: bool = True
|
||||
) -> None:
|
||||
super(SuperLayerNorm1D, self).__init__()
|
||||
self._in_dim = dim
|
||||
self._eps = eps
|
||||
self._elementwise_affine = elementwise_affine
|
||||
if self._elementwise_affine:
|
||||
self.weight = nn.Parameter(torch.Tensor(self.in_dim))
|
||||
self.bias = nn.Parameter(torch.Tensor(self.in_dim))
|
||||
else:
|
||||
self.register_parameter("weight", None)
|
||||
self.register_parameter("bias", None)
|
||||
self.reset_parameters()
|
||||
|
||||
@property
|
||||
def in_dim(self):
|
||||
return spaces.get_max(self._in_dim)
|
||||
|
||||
@property
|
||||
def eps(self):
|
||||
return self._eps
|
||||
|
||||
def reset_parameters(self) -> None:
|
||||
if self._elementwise_affine:
|
||||
nn.init.ones_(self.weight)
|
||||
nn.init.zeros_(self.bias)
|
||||
|
||||
@property
|
||||
def abstract_search_space(self):
|
||||
root_node = spaces.VirtualNode(id(self))
|
||||
if not spaces.is_determined(self._in_dim):
|
||||
root_node.append("_in_dim", self._in_dim.abstract(reuse_last=True))
|
||||
return root_node
|
||||
|
||||
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
|
||||
# check inputs ->
|
||||
if not spaces.is_determined(self._in_dim):
|
||||
expected_input_dim = self.abstract_child["_in_dim"].value
|
||||
else:
|
||||
expected_input_dim = spaces.get_determined_value(self._in_dim)
|
||||
if input.size(-1) != expected_input_dim:
|
||||
raise ValueError(
|
||||
"Expect the input dim of {:} instead of {:}".format(
|
||||
expected_input_dim, input.size(-1)
|
||||
)
|
||||
)
|
||||
if self._elementwise_affine:
|
||||
weight = self.weight[:expected_input_dim]
|
||||
bias = self.bias[:expected_input_dim]
|
||||
else:
|
||||
weight, bias = None, None
|
||||
return F.layer_norm(input, (expected_input_dim,), weight, bias, self.eps)
|
||||
|
||||
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return F.layer_norm(input, (self.in_dim,), self.weight, self.bias, self.eps)
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return "{in_dim}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(
|
||||
in_dim=self._in_dim,
|
||||
eps=self._eps,
|
||||
elementwise_affine=self._elementwise_affine,
|
||||
)
|
100
lib/xlayers/super_transformer.py
Normal file
100
lib/xlayers/super_transformer.py
Normal file
@@ -0,0 +1,100 @@
|
||||
#####################################################
|
||||
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
|
||||
#####################################################
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
from functools import partial
|
||||
from typing import Optional, Callable
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
import spaces
|
||||
from .super_module import IntSpaceType
|
||||
from .super_module import BoolSpaceType
|
||||
from .super_module import SuperModule
|
||||
from .super_linear import SuperMLPv2
|
||||
from .super_norm import SuperLayerNorm1D
|
||||
from .super_attention import SuperAttention
|
||||
|
||||
|
||||
class SuperTransformerEncoderLayer(SuperModule):
|
||||
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
|
||||
This is a super model for TransformerEncoderLayer that can support search for the transformer encoder layer.
|
||||
|
||||
Reference:
|
||||
- Paper: Attention Is All You Need, NeurIPS 2017
|
||||
- PyTorch Implementation: https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
|
||||
|
||||
Details:
|
||||
MHA -> residual -> norm -> MLP -> residual -> norm
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_dim: IntSpaceType,
|
||||
output_dim: IntSpaceType,
|
||||
num_heads: IntSpaceType,
|
||||
qkv_bias: BoolSpaceType = False,
|
||||
mlp_hidden_multiplier: IntSpaceType = 4,
|
||||
drop: Optional[float] = None,
|
||||
act_layer: Callable[[], nn.Module] = nn.GELU,
|
||||
):
|
||||
super(SuperTransformerEncoderLayer, self).__init__()
|
||||
self.mha = SuperAttention(
|
||||
input_dim,
|
||||
input_dim,
|
||||
num_heads=num_heads,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_drop=drop,
|
||||
proj_drop=drop,
|
||||
)
|
||||
self.drop1 = nn.Dropout(drop or 0.0)
|
||||
self.norm1 = SuperLayerNorm1D(input_dim)
|
||||
self.mlp = SuperMLPv2(
|
||||
input_dim,
|
||||
hidden_multiplier=mlp_hidden_multiplier,
|
||||
out_features=output_dim,
|
||||
act_layer=act_layer,
|
||||
drop=drop,
|
||||
)
|
||||
self.drop2 = nn.Dropout(drop or 0.0)
|
||||
self.norm2 = SuperLayerNorm1D(output_dim)
|
||||
|
||||
@property
|
||||
def abstract_search_space(self):
|
||||
root_node = spaces.VirtualNode(id(self))
|
||||
xdict = dict(
|
||||
mha=self.mha.abstract_search_space,
|
||||
norm1=self.norm1.abstract_search_space,
|
||||
mlp=self.mlp.abstract_search_space,
|
||||
norm2=self.norm2.abstract_search_space,
|
||||
)
|
||||
for key, space in xdict.items():
|
||||
if not spaces.is_determined(space):
|
||||
root_node.append(key, space)
|
||||
return root_node
|
||||
|
||||
def apply_candidate(self, abstract_child: spaces.VirtualNode):
|
||||
super(SuperTransformerEncoderLayer, self).apply_candidate(abstract_child)
|
||||
valid_keys = ["mha", "norm1", "mlp", "norm2"]
|
||||
for key in valid_keys:
|
||||
if key in abstract_child:
|
||||
getattr(self, key).apply_candidate(abstract_child[key])
|
||||
|
||||
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return self.forward_raw(input)
|
||||
|
||||
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
|
||||
# multi-head attention
|
||||
x = self.mha(input)
|
||||
x = x + self.drop1(x)
|
||||
x = self.norm1(x)
|
||||
# feed-forward layer
|
||||
x = self.mlp(x)
|
||||
x = x + self.drop2(x)
|
||||
x = self.norm2(x)
|
||||
return x
|
Reference in New Issue
Block a user