add autodl

This commit is contained in:
mhz
2024-08-25 18:02:31 +02:00
parent 192f286cfb
commit a0a25f291c
431 changed files with 50646 additions and 8 deletions

View File

@@ -0,0 +1,121 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest tests/test_basic_space.py -s #
#####################################################
import random
import unittest
from xautodl.spaces import Categorical
from xautodl.spaces import Continuous
from xautodl.spaces import Integer
from xautodl.spaces import is_determined
from xautodl.spaces import get_min
from xautodl.spaces import get_max
class TestBasicSpace(unittest.TestCase):
"""Test the basic search spaces."""
def test_categorical(self):
space = Categorical(1, 2, 3, 4)
for i in range(4):
self.assertEqual(space[i], i + 1)
self.assertEqual(
"Categorical(candidates=[1, 2, 3, 4], default_index=None)", str(space)
)
def test_integer(self):
space = Integer(lower=1, upper=4)
for i in range(4):
self.assertEqual(space[i], i + 1)
self.assertEqual("Integer(lower=1, upper=4, default=None)", str(space))
self.assertEqual(get_max(space), 4)
self.assertEqual(get_min(space), 1)
def test_continuous(self):
random.seed(999)
space = Continuous(0, 1)
self.assertGreaterEqual(space.random().value, 0)
self.assertGreaterEqual(1, space.random().value)
lower, upper = 1.5, 4.6
space = Continuous(lower, upper, log=False)
values = []
for i in range(1000000):
x = space.random(reuse_last=False).value
self.assertGreaterEqual(x, lower)
self.assertGreaterEqual(upper, x)
values.append(x)
self.assertAlmostEqual((lower + upper) / 2, sum(values) / len(values), places=2)
self.assertEqual(
"Continuous(lower=1.5, upper=4.6, default_value=None, log_scale=False)",
str(space),
)
def test_determined_and_has(self):
# Test Non-nested Space
space = Categorical(1, 2, 3, 4)
self.assertFalse(space.determined)
self.assertTrue(space.has(2))
self.assertFalse(space.has(6))
space = Categorical(4)
self.assertTrue(space.determined)
space = Continuous(0.11, 0.12)
self.assertTrue(space.has(0.115))
self.assertFalse(space.has(0.1))
self.assertFalse(space.determined)
space = Continuous(0.11, 0.11)
self.assertTrue(space.determined)
# Test Nested Space
space_1 = Categorical(1, 2, 3, 4)
space_2 = Categorical(1)
nested_space = Categorical(space_1)
self.assertFalse(nested_space.determined)
self.assertTrue(nested_space.has(4))
nested_space = Categorical(space_2)
self.assertTrue(nested_space.determined)
# Test Nested Space 2
nested_space = Categorical(
Categorical(1, 2, 3),
Categorical(4, Categorical(5, 6, 7, Categorical(8, 9), 10), 11),
12,
)
print("\nThe nested search space:\n{:}".format(nested_space))
for i in range(1, 13):
self.assertTrue(nested_space.has(i))
# Test Simple Op
self.assertTrue(is_determined(1))
self.assertFalse(is_determined(nested_space))
def test_duplicate(self):
space = Categorical(1, 2, 3, 4)
x = space.random()
for _ in range(100):
self.assertEqual(x, space.random(reuse_last=True))
class TestAbstractSpace(unittest.TestCase):
"""Test the abstract search spaces."""
def test_continous(self):
print("")
space = Continuous(0, 1)
self.assertEqual(space, space.abstract())
print("The abstract search space for Continuous: {:}".format(space.abstract()))
space = Categorical(1, 2, 3)
self.assertEqual(len(space.abstract()), 3)
print(space.abstract())
nested_space = Categorical(
Categorical(1, 2, 3),
Categorical(4, Categorical(5, 6, 7, Categorical(8, 9), 10), 11),
12,
)
abstract_nested_space = nested_space.abstract()
print("The abstract nested search space:\n{:}".format(abstract_nested_space))

View File

@@ -0,0 +1,21 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_import.py #
#####################################################
def test_import():
from xautodl import config_utils
from xautodl import datasets
from xautodl import log_utils
from xautodl import models
from xautodl import nas_infer_model
from xautodl import procedures
from xautodl import trade_models
from xautodl import utils
from xautodl import xlayers
from xautodl import xmisc
from xautodl import xmodels
from xautodl import spaces
print("Check all imports done")

View File

@@ -0,0 +1,29 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest tests/test_loader.py -s #
#####################################################
import unittest
import tempfile
import torch
from xautodl.datasets import get_datasets
def test_simple():
xdir = tempfile.mkdtemp()
train_data, valid_data, xshape, class_num = get_datasets("cifar10", xdir, -1)
print(train_data)
print(valid_data)
xloader = torch.utils.data.DataLoader(
train_data, batch_size=256, shuffle=True, num_workers=4, pin_memory=True
)
print(xloader)
print(next(iter(xloader)))
for i, data in enumerate(xloader):
print(i)
test_simple()

View File

@@ -0,0 +1,32 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest tests/test_math_static.py -s #
#####################################################
import unittest
from xautodl.datasets.math_core import QuadraticSFunc
from xautodl.datasets.math_core import ConstantFunc
class TestConstantFunc(unittest.TestCase):
"""Test the constant function."""
def test_simple(self):
function = ConstantFunc(0.1)
for i in range(100):
assert function(i) == 0.1
class TestQuadraticSFunc(unittest.TestCase):
"""Test the quadratic function."""
def test_simple(self):
function = QuadraticSFunc({0: 1, 1: 2, 2: 1})
print(function)
for x in (0, 0.5, 1):
print("f({:})={:}".format(x, function(x)))
thresh = 1e-7
self.assertTrue(abs(function(0) - 1) < thresh)
self.assertTrue(abs(function(0.5) - 0.5 * 0.5 - 2 * 0.5 - 1) < thresh)
self.assertTrue(abs(function(1) - 1 - 2 - 1) < thresh)

View File

@@ -0,0 +1,73 @@
####################################################
# Copyright (c) Facebook, Inc. and its affiliates. #
####################################################
# Inspired from https://github.com/facebookresearch/detectron2/blob/master/tests/test_scheduler.py
####################################################
import math
import numpy as np
from unittest import TestCase
import torch
from xautodl.xmisc.scheduler_utils import CosineParamScheduler, MultiStepParamScheduler
from xautodl.xmisc.scheduler_utils import LRMultiplier, WarmupParamScheduler
class TestScheduler(TestCase):
"""Test the scheduler."""
def test_warmup_multistep(self):
p = torch.nn.Parameter(torch.zeros(0))
opt = torch.optim.SGD([p], lr=5)
multiplier = WarmupParamScheduler(
MultiStepParamScheduler(
[1, 0.1, 0.01, 0.001],
milestones=[10, 15, 20],
num_updates=30,
),
0.001,
5 / 30,
)
sched = LRMultiplier(opt, multiplier, 30)
# This is an equivalent of:
# sched = WarmupMultiStepLR(
# opt, milestones=[10, 15, 20], gamma=0.1, warmup_factor=0.001, warmup_iters=5)
p.sum().backward()
opt.step()
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
self.assertTrue(np.allclose(lrs[5:10], 5.0))
self.assertTrue(np.allclose(lrs[10:15], 0.5))
self.assertTrue(np.allclose(lrs[15:20], 0.05))
self.assertTrue(np.allclose(lrs[20:], 0.005))
def test_warmup_cosine(self):
p = torch.nn.Parameter(torch.zeros(0))
opt = torch.optim.SGD([p], lr=5)
multiplier = WarmupParamScheduler(
CosineParamScheduler(1, 0),
0.001,
5 / 30,
)
sched = LRMultiplier(opt, multiplier, 30)
p.sum().backward()
opt.step()
self.assertEqual(opt.param_groups[0]["lr"], 0.005)
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
for idx, lr in enumerate(lrs):
expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30))
if idx >= 5:
self.assertAlmostEqual(lr, expected_cosine)
else:
self.assertNotAlmostEqual(lr, expected_cosine)

View File

@@ -0,0 +1,67 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_att.py -s #
#####################################################
import random
import unittest
from parameterized import parameterized
import torch
from xautodl import spaces
from xautodl.xlayers import super_core
class TestSuperSelfAttention(unittest.TestCase):
"""Test the super attention layer."""
def _internal_func(self, inputs, model):
outputs = model(inputs)
abstract_space = model.abstract_search_space
print(
"The abstract search space for SuperSelfAttention is:\n{:}".format(
abstract_space
)
)
abstract_space.clean_last()
abstract_child = abstract_space.random(reuse_last=True)
print("The abstract child program is:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
outputs = model(inputs)
return abstract_child, outputs
def test_super_attention(self):
proj_dim = spaces.Categorical(12, 24, 36)
num_heads = spaces.Categorical(2, 4, 6)
model = super_core.SuperSelfAttention(10, proj_dim, num_heads)
print(model)
model.apply_verbose(True)
inputs = torch.rand(4, 20, 10) # batch size, sequence length, channel
abstract_child, outputs = self._internal_func(inputs, model)
output_shape = (4, 20, abstract_child["proj"]["_out_features"].value)
self.assertEqual(tuple(outputs.shape), output_shape)
@parameterized.expand([[6], [12], [24], [48]])
def test_transformer_encoder(self, input_dim):
output_dim = spaces.Categorical(12, 24, 36)
model = super_core.SuperSequential(
super_core.SuperLinear(input_dim, output_dim),
super_core.SuperTransformerEncoderLayer(
output_dim,
num_heads=spaces.Categorical(2, 4, 6),
mlp_hidden_multiplier=spaces.Categorical(1, 2, 4),
),
)
print(model)
model.apply_verbose(True)
inputs = torch.rand(4, 20, input_dim)
abstract_child, outputs = self._internal_func(inputs, model)
output_shape = (
4,
20,
output_dim.abstract(reuse_last=True).random(reuse_last=True).value,
)
self.assertEqual(tuple(outputs.shape), output_shape)

View File

@@ -0,0 +1,85 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_container.py -s #
#####################################################
import random
import unittest
import pytest
import torch
from xautodl import spaces
from xautodl.xlayers import super_core
"""Test the super container layers."""
def _internal_func(inputs, model):
outputs = model(inputs)
abstract_space = model.abstract_search_space
print(
"The abstract search space for SuperAttention is:\n{:}".format(abstract_space)
)
abstract_space.clean_last()
abstract_child = abstract_space.random(reuse_last=True)
print("The abstract child program is:\n{:}".format(abstract_child))
model.enable_candidate()
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.apply_candidate(abstract_child)
outputs = model(inputs)
return abstract_child, outputs
def _create_stel(input_dim, output_dim, order):
return super_core.SuperSequential(
super_core.SuperLinear(input_dim, output_dim),
super_core.SuperTransformerEncoderLayer(
output_dim,
num_heads=spaces.Categorical(2, 4, 6),
mlp_hidden_multiplier=spaces.Categorical(1, 2, 4),
order=order,
),
)
@pytest.mark.parametrize("batch", (1, 2, 4))
@pytest.mark.parametrize("seq_dim", (1, 10, 30))
@pytest.mark.parametrize("input_dim", (6, 12, 24, 27))
@pytest.mark.parametrize(
"order", (super_core.LayerOrder.PreNorm, super_core.LayerOrder.PostNorm)
)
def test_super_sequential(batch, seq_dim, input_dim, order):
out1_dim = spaces.Categorical(12, 24, 36)
out2_dim = spaces.Categorical(24, 36, 48)
out3_dim = spaces.Categorical(36, 72, 100)
layer1 = _create_stel(input_dim, out1_dim, order)
layer2 = _create_stel(out1_dim, out2_dim, order)
layer3 = _create_stel(out2_dim, out3_dim, order)
model = super_core.SuperSequential(layer1, layer2, layer3)
print(model)
model.apply_verbose(True)
inputs = torch.rand(batch, seq_dim, input_dim)
abstract_child, outputs = _internal_func(inputs, model)
output_shape = (
batch,
seq_dim,
out3_dim.abstract(reuse_last=True).random(reuse_last=True).value,
)
assert tuple(outputs.shape) == output_shape
def test_super_sequential_v1():
model = super_core.SuperSequential(
super_core.SuperSimpleNorm(1, 1),
torch.nn.ReLU(),
super_core.SuperLeakyReLU(),
super_core.SuperLinear(10, 10),
super_core.SuperReLU(),
)
inputs = torch.rand(10, 10)
print(model)
outputs = model(inputs)
abstract_search_space = model.abstract_search_space
print(abstract_search_space)

View File

@@ -0,0 +1,130 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_model.py -s #
#####################################################
import torch
import unittest
from xautodl.xlayers import super_core
from xautodl import spaces
class TestSuperLinear(unittest.TestCase):
"""Test the super linear."""
def test_super_linear(self):
out_features = spaces.Categorical(12, 24, 36)
bias = spaces.Categorical(True, False)
model = super_core.SuperLinear(10, out_features, bias=bias)
print("The simple super linear module is:\n{:}".format(model))
model.apply_verbose(True)
print(model.super_run_type)
self.assertTrue(model.bias)
inputs = torch.rand(20, 10)
print("Input shape: {:}".format(inputs.shape))
print("Weight shape: {:}".format(model._super_weight.shape))
print("Bias shape: {:}".format(model._super_bias.shape))
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), (20, 36))
abstract_space = model.abstract_search_space
abstract_space.clean_last()
abstract_child = abstract_space.random()
print("The abstract searc space:\n{:}".format(abstract_space))
print("The abstract child program:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
output_shape = (20, abstract_child["_out_features"].value)
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), output_shape)
def test_super_mlp_v1(self):
hidden_features = spaces.Categorical(12, 24, 36)
out_features = spaces.Categorical(24, 36, 48)
mlp = super_core.SuperMLPv1(10, hidden_features, out_features)
print(mlp)
mlp.apply_verbose(False)
self.assertTrue(mlp.fc1._out_features, mlp.fc2._in_features)
inputs = torch.rand(4, 10)
outputs = mlp(inputs)
self.assertEqual(tuple(outputs.shape), (4, 48))
abstract_space = mlp.abstract_search_space
print(
"The abstract search space for SuperMLPv1 is:\n{:}".format(abstract_space)
)
self.assertEqual(
abstract_space["fc1"]["_out_features"],
abstract_space["fc2"]["_in_features"],
)
self.assertTrue(
abstract_space["fc1"]["_out_features"]
is abstract_space["fc2"]["_in_features"]
)
abstract_space.clean_last()
abstract_child = abstract_space.random(reuse_last=True)
print("The abstract child program is:\n{:}".format(abstract_child))
self.assertEqual(
abstract_child["fc1"]["_out_features"].value,
abstract_child["fc2"]["_in_features"].value,
)
mlp.set_super_run_type(super_core.SuperRunMode.Candidate)
mlp.enable_candidate()
mlp.apply_candidate(abstract_child)
outputs = mlp(inputs)
output_shape = (4, abstract_child["fc2"]["_out_features"].value)
self.assertEqual(tuple(outputs.shape), output_shape)
def test_super_mlp_v2(self):
hidden_multiplier = spaces.Categorical(1.0, 2.0, 3.0)
out_features = spaces.Categorical(24, 36, 48)
mlp = super_core.SuperMLPv2(10, hidden_multiplier, out_features)
print(mlp)
mlp.apply_verbose(False)
inputs = torch.rand(4, 10)
outputs = mlp(inputs)
self.assertEqual(tuple(outputs.shape), (4, 48))
abstract_space = mlp.abstract_search_space
print(
"The abstract search space for SuperMLPv2 is:\n{:}".format(abstract_space)
)
abstract_space.clean_last()
abstract_child = abstract_space.random(reuse_last=True)
print("The abstract child program is:\n{:}".format(abstract_child))
mlp.set_super_run_type(super_core.SuperRunMode.Candidate)
mlp.enable_candidate()
mlp.apply_candidate(abstract_child)
outputs = mlp(inputs)
output_shape = (4, abstract_child["_out_features"].value)
self.assertEqual(tuple(outputs.shape), output_shape)
def test_super_stem(self):
out_features = spaces.Categorical(24, 36, 48)
model = super_core.SuperAlphaEBDv1(6, out_features)
inputs = torch.rand(4, 360)
abstract_space = model.abstract_search_space
abstract_space.clean_last()
abstract_child = abstract_space.random(reuse_last=True)
print("The abstract searc space:\n{:}".format(abstract_space))
print("The abstract child program:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
outputs = model(inputs)
output_shape = (4, 60, abstract_child["_embed_dim"].value)
self.assertEqual(tuple(outputs.shape), output_shape)

View File

@@ -0,0 +1,79 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_norm.py -s #
#####################################################
import unittest
import torch
from xautodl.xlayers import super_core
from xautodl import spaces
class TestSuperSimpleNorm(unittest.TestCase):
"""Test the super simple norm."""
def test_super_simple_norm(self):
out_features = spaces.Categorical(12, 24, 36)
bias = spaces.Categorical(True, False)
model = super_core.SuperSequential(
super_core.SuperSimpleNorm(5, 0.5),
super_core.SuperLinear(10, out_features, bias=bias),
)
print("The simple super module is:\n{:}".format(model))
model.apply_verbose(True)
print(model.super_run_type)
self.assertTrue(model[1].bias)
inputs = torch.rand(20, 10)
print("Input shape: {:}".format(inputs.shape))
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), (20, 36))
abstract_space = model.abstract_search_space
abstract_space.clean_last()
abstract_child = abstract_space.random()
print("The abstract searc space:\n{:}".format(abstract_space))
print("The abstract child program:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
output_shape = (20, abstract_child["1"]["_out_features"].value)
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), output_shape)
def test_super_simple_learn_norm(self):
out_features = spaces.Categorical(12, 24, 36)
bias = spaces.Categorical(True, False)
model = super_core.SuperSequential(
super_core.SuperSimpleLearnableNorm(),
super_core.SuperIdentity(),
super_core.SuperLinear(10, out_features, bias=bias),
)
print("The simple super module is:\n{:}".format(model))
model.apply_verbose(True)
print(model.super_run_type)
self.assertTrue(model[2].bias)
inputs = torch.rand(20, 10)
print("Input shape: {:}".format(inputs.shape))
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), (20, 36))
abstract_space = model.abstract_search_space
abstract_space.clean_last()
abstract_child = abstract_space.random()
print("The abstract searc space:\n{:}".format(abstract_space))
print("The abstract child program:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
output_shape = (20, abstract_child["2"]["_out_features"].value)
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), output_shape)

View File

@@ -0,0 +1,24 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_rearrange.py -s #
#####################################################
import unittest
import torch
from xautodl import xlayers
class TestSuperReArrange(unittest.TestCase):
"""Test the super re-arrange layer."""
def test_super_re_arrange(self):
layer = xlayers.SuperReArrange(
"b c (h p1) (w p2) -> b (h w) (c p1 p2)", p1=4, p2=4
)
tensor = torch.rand((8, 4, 32, 32))
print("The tensor shape: {:}".format(tensor.shape))
print(layer)
outs = layer(tensor)
print("The output tensor shape: {:}".format(outs.shape))
assert tuple(outs.shape) == (8, 32 * 32 // 16, 4 * 4 * 4)

View File

@@ -0,0 +1,43 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_vit.py -s #
#####################################################
import unittest
from parameterized import parameterized
import torch
from xautodl.xmodels import transformers
from xautodl.utils.flop_benchmark import count_parameters
class TestSuperViT(unittest.TestCase):
"""Test the super re-arrange layer."""
def test_super_vit(self):
model = transformers.get_transformer("vit-base-16")
tensor = torch.rand((2, 3, 224, 224))
print("The tensor shape: {:}".format(tensor.shape))
# print(model)
outs = model(tensor)
print("The output tensor shape: {:}".format(outs.shape))
@parameterized.expand(
[
["vit-cifar10-p4-d4-h4-c32", 32],
["vit-base-16", 224],
["vit-large-16", 224],
["vit-huge-14", 224],
]
)
def test_imagenet(self, name, resolution):
tensor = torch.rand((2, 3, resolution, resolution))
config = transformers.name2config[name]
model = transformers.get_transformer(config)
outs = model(tensor)
size = count_parameters(model, "mb", True)
print(
"{:10s} : size={:.2f}MB, out-shape: {:}".format(
name, size, tuple(outs.shape)
)
)

View File

@@ -0,0 +1,20 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 #
#####################################################
# pytest tests/test_synthetic_env.py -s #
#####################################################
import unittest
from xautodl.datasets.synthetic_core import get_synthetic_env
class TestSynethicEnv(unittest.TestCase):
"""Test the synethtic environment."""
def test_simple(self):
versions = ["v1", "v2", "v3", "v4"]
for version in versions:
env = get_synthetic_env(version=version)
print(env)
for timestamp, (x, y) in env:
self.assertEqual(x.shape, (1000, env._data_generator.ndim))

View File

@@ -0,0 +1,23 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest tests/test_synthetic_utils.py -s #
#####################################################
import unittest
from xautodl.datasets.synthetic_core import TimeStamp
class TestTimeStamp(unittest.TestCase):
"""Test the timestamp generator."""
def test_simple(self):
for mode in (None, "train", "valid", "test"):
generator = TimeStamp(0, 1)
print(generator)
for idx, (i, xtime) in enumerate(generator):
self.assertTrue(i == idx)
if idx == 0:
self.assertTrue(xtime == 0)
if idx + 1 == len(generator):
self.assertTrue(abs(xtime - 1) < 1e-8)

View File

@@ -0,0 +1,24 @@
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import torch
import torch.nn as nn
import unittest
from xautodl.models.shape_searchs.SoftSelect import ChannelWiseInter
class TestTASFunc(unittest.TestCase):
"""Test the TAS function."""
def test_channel_interplation(self):
tensors = torch.rand((16, 128, 7, 7))
for oc in range(200, 210):
out_v1 = ChannelWiseInter(tensors, oc, "v1")
out_v2 = ChannelWiseInter(tensors, oc, "v2")
assert (out_v1 == out_v2).any().item() == 1
for oc in range(48, 160):
out_v1 = ChannelWiseInter(tensors, oc, "v1")
out_v2 = ChannelWiseInter(tensors, oc, "v2")
assert (out_v1 == out_v2).any().item() == 1

View File

@@ -0,0 +1,4 @@
# bash ./tests/test_torch.sh
pytest ./tests/test_torch_gpu_bugs.py::test_create -s
CUDA_VISIBLE_DEVICES="" pytest ./tests/test_torch_gpu_bugs.py::test_load -s

View File

@@ -0,0 +1,40 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_torch_gpu_bugs.py::test_create
#
# CUDA_VISIBLE_DEVICES="" pytest ./tests/test_torch_gpu_bugs.py::test_load
#####################################################
import os, sys, time, torch
import pickle
import tempfile
from pathlib import Path
root_dir = (Path(__file__).parent / ".." / "..").resolve()
from xautodl.trade_models.quant_transformer import QuantTransformer
def test_create():
"""Test the basic quant-model."""
if not torch.cuda.is_available():
return
quant_model = QuantTransformer(GPU=0)
temp_dir = root_dir / "tests" / ".pytest_cache"
temp_dir.mkdir(parents=True, exist_ok=True)
temp_file = temp_dir / "quant-model.pkl"
with temp_file.open("wb") as f:
# quant_model.to(None)
quant_model.to("cpu")
# del quant_model.model
# del quant_model.train_optimizer
pickle.dump(quant_model, f)
print("save into {:}".format(temp_file))
def test_load():
temp_file = root_dir / "tests" / ".pytest_cache" / "quant-model.pkl"
with temp_file.open("rb") as f:
model = pickle.load(f)
print(model.model)
print(model.train_optimizer)