Update xlayers

This commit is contained in:
D-X-Y
2021-05-07 10:26:35 +08:00
parent f6a024a6ff
commit 80aaac4dfa
9 changed files with 333 additions and 83 deletions

View File

@@ -34,3 +34,4 @@ def get_model(config: Dict[Text, Any], **kwargs):
else:
raise TypeError("Unkonwn model type: {:}".format(model_type))
return model

View File

@@ -31,6 +31,9 @@ class SuperReLU(SuperModule):
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.relu(input, inplace=self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
def extra_repr(self) -> str:
return "inplace=True" if self._inplace else ""
@@ -53,6 +56,29 @@ class SuperLeakyReLU(SuperModule):
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.leaky_relu(input, self._negative_slope, self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
def extra_repr(self) -> str:
inplace_str = "inplace=True" if self._inplace else ""
return "negative_slope={}{}".format(self._negative_slope, inplace_str)
class SuperTanh(SuperModule):
"""Applies a the Tanh function element-wise."""
def __init__(self) -> None:
super(SuperTanh, self).__init__()
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return torch.tanh(input)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)

View File

@@ -111,3 +111,10 @@ class SuperSequential(SuperModule):
for module in self:
input = module(input)
return input
def forward_with_container(self, input, container, prefix=[]):
for index, module in enumerate(self):
input = module.forward_with_container(
input, container, prefix + [str(index)]
)
return input

View File

@@ -27,8 +27,13 @@ from .super_transformer import SuperTransformerEncoderLayer
from .super_activations import SuperReLU
from .super_activations import SuperLeakyReLU
from .super_activations import SuperTanh
super_name2activation = {"relu": SuperReLU, "leaky_relu": SuperLeakyReLU}
super_name2activation = {
"relu": SuperReLU,
"leaky_relu": SuperLeakyReLU,
"tanh": SuperTanh,
}
from .super_trade_stem import SuperAlphaEBDv1

View File

@@ -115,6 +115,16 @@ class SuperLinear(SuperModule):
self._in_features, self._out_features, self._bias
)
def forward_with_container(self, input, container, prefix=[]):
super_weight_name = ".".join(prefix + ["_super_weight"])
super_weight = container.query(super_weight_name)
super_bias_name = ".".join(prefix + ["_super_bias"])
if container.has(super_bias_name):
super_bias = container.query(super_bias_name)
else:
super_bias = None
return F.linear(input, super_weight, super_bias)
class SuperMLPv1(SuperModule):
"""An MLP layer: FC -> Activation -> Drop -> FC -> Drop."""

View File

@@ -39,6 +39,41 @@ class TensorContainer:
self._param_or_buffers = []
self._name2index = dict()
def additive(self, tensors):
result = TensorContainer()
for index, name in enumerate(self._names):
new_tensor = self._tensors[index] + tensors[index]
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def no_grad_clone(self):
result = TensorContainer()
with torch.no_grad():
for index, name in enumerate(self._names):
result.append(
name, self._tensors[index].clone(), self._param_or_buffers[index]
)
return result
@property
def tensors(self):
return self._tensors
def flatten(self, tensors=None):
if tensors is None:
tensors = self._tensors
tensors = [tensor.view(-1) for tensor in tensors]
return torch.cat(tensors)
def unflatten(self, tensor):
tensors, s = [], 0
for raw_tensor in self._tensors:
length = raw_tensor.numel()
x = torch.reshape(tensor[s : s + length], shape=raw_tensor.shape)
tensors.append(x)
s += length
return tensors
def append(self, name, tensor, param_or_buffer):
if not isinstance(tensor, torch.Tensor):
raise TypeError(
@@ -54,6 +89,23 @@ class TensorContainer:
)
self._name2index[name] = len(self._names) - 1
def query(self, name):
if not self.has(name):
raise ValueError(
"The {:} is not in {:}".format(name, list(self._name2index.keys()))
)
index = self._name2index[name]
return self._tensors[index]
def has(self, name):
return name in self._name2index
def has_prefix(self, prefix):
for name, idx in self._name2index.items():
if name.startswith(prefix):
return name
return False
def numel(self):
total = 0
for tensor in self._tensors:
@@ -181,3 +233,6 @@ class SuperModule(abc.ABC, nn.Module):
)
)
return outputs
def forward_with_container(self, inputs, container, prefix=[]):
raise NotImplementedError

View File

@@ -161,6 +161,21 @@ class SuperSimpleLearnableNorm(SuperModule):
mean, std = torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0)
return tensor.sub_(mean).div_(std)
def forward_with_container(self, input, container, prefix=[]):
if not self._inplace:
tensor = input.clone()
else:
tensor = input
mean_name = ".".join(prefix + ["_mean"])
std_name = ".".join(prefix + ["_std"])
mean, std = (
container.query(mean_name).to(tensor.device),
torch.abs(container.query(std_name).to(tensor.device)) + self._eps,
)
while mean.ndim < tensor.ndim:
mean, std = torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0)
return tensor.sub_(mean).div_(std)
def extra_repr(self) -> str:
return "mean={mean}, std={std}, inplace={inplace}".format(
mean=self._mean.item(), std=self._std.item(), inplace=self._inplace
@@ -191,3 +206,6 @@ class SuperIdentity(SuperModule):
def extra_repr(self) -> str:
return "inplace={inplace}".format(inplace=self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)

View File

@@ -0,0 +1,120 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# DISABLED / NOT-FINISHED
#####################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional, Callable
import spaces
from .super_container import SuperSequential
from .super_linear import SuperLinear
class SuperActor(SuperModule):
"""A Actor in RL."""
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward_candidate(self, **kwargs):
return self.forward_raw(**kwargs)
def forward_raw(self, obs, act=None):
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class SuperLfnaMetaMLP(SuperModule):
def __init__(self, obs_dim, hidden_sizes, act_cls):
super(SuperLfnaMetaMLP).__init__()
self.delta_net = SuperSequential(
SuperLinear(obs_dim, hidden_sizes[0]),
act_cls(),
SuperLinear(hidden_sizes[0], hidden_sizes[1]),
act_cls(),
SuperLinear(hidden_sizes[1], 1),
)
class SuperLfnaMetaMLP(SuperModule):
def __init__(self, obs_dim, act_dim, hidden_sizes, act_cls):
super(SuperLfnaMetaMLP).__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
self.mu_net = SuperSequential(
SuperLinear(obs_dim, hidden_sizes[0]),
act_cls(),
SuperLinear(hidden_sizes[0], hidden_sizes[1]),
act_cls(),
SuperLinear(hidden_sizes[1], act_dim),
)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1)
def forward_candidate(self, **kwargs):
return self.forward_raw(**kwargs)
def forward_raw(self, obs, act=None):
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class SuperMLPGaussianActor(SuperModule):
def __init__(self, obs_dim, act_dim, hidden_sizes, act_cls):
super(SuperMLPGaussianActor).__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
self.mu_net = SuperSequential(
SuperLinear(obs_dim, hidden_sizes[0]),
act_cls(),
SuperLinear(hidden_sizes[0], hidden_sizes[1]),
act_cls(),
SuperLinear(hidden_sizes[1], act_dim),
)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1)
def forward_candidate(self, **kwargs):
return self.forward_raw(**kwargs)
def forward_raw(self, obs, act=None):
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a