Add int search space

This commit is contained in:
D-X-Y
2021-03-18 16:02:55 +08:00
parent ece6ac5f41
commit 63c8bb9bc8
67 changed files with 5150 additions and 1474 deletions

View File

@@ -15,7 +15,13 @@ if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config, configure2str
from datasets import get_datasets, get_nas_search_loaders
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
get_optim_scheduler,
)
from utils import get_model_infos, obtain_accuracy
from log_utils import AverageMeter, time_string, convert_secs2time
from models import get_cell_based_tiny_net, get_search_spaces
@@ -26,7 +32,9 @@ def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=1e-2):
def _hessian_vector_product(
vector, network, criterion, base_inputs, base_targets, r=1e-2
):
R = r / _concat(vector).norm()
for p, v in zip(network.module.get_weights(), vector):
p.data.add_(R, v)
@@ -45,7 +53,15 @@ def _hessian_vector_product(vector, network, criterion, base_inputs, base_target
return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]
def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets):
def backward_step_unrolled(
network,
criterion,
base_inputs,
base_targets,
w_optimizer,
arch_inputs,
arch_targets,
):
# _compute_unrolled_model
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
@@ -57,11 +73,17 @@ def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_opti
with torch.no_grad():
theta = _concat(network.module.get_weights())
try:
moment = _concat(w_optimizer.state[v]["momentum_buffer"] for v in network.module.get_weights())
moment = _concat(
w_optimizer.state[v]["momentum_buffer"]
for v in network.module.get_weights()
)
moment = moment.mul_(momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, network.module.get_weights())) + WD * theta
dtheta = (
_concat(torch.autograd.grad(loss, network.module.get_weights()))
+ WD * theta
)
params = theta.sub(LR, moment + dtheta)
unrolled_model = deepcopy(network)
model_dict = unrolled_model.state_dict()
@@ -82,7 +104,9 @@ def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_opti
dalpha = unrolled_model.module.arch_parameters.grad
vector = [v.grad.data for v in unrolled_model.module.get_weights()]
[implicit_grads] = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)
[implicit_grads] = _hessian_vector_product(
vector, network, criterion, base_inputs, base_targets
)
dalpha.data.sub_(LR, implicit_grads.data)
@@ -93,13 +117,25 @@ def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_opti
return unrolled_loss.detach(), unrolled_logits.detach()
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):
def search_func(
xloader,
network,
criterion,
scheduler,
w_optimizer,
a_optimizer,
epoch_str,
print_freq,
logger,
):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
xloader
):
scheduler.update(None, 1.0 * step / len(xloader))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
@@ -109,11 +145,19 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
# update the architecture-weight
a_optimizer.zero_grad()
arch_loss, arch_logits = backward_step_unrolled(
network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets
network,
criterion,
base_inputs,
base_targets,
w_optimizer,
arch_inputs,
arch_targets,
)
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(arch_logits.data, arch_targets.data, topk=(1, 5))
arch_prec1, arch_prec5 = obtain_accuracy(
arch_logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
@@ -126,7 +170,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
torch.nn.utils.clip_grad_norm_(network.parameters(), 5)
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_prec1, base_prec5 = obtain_accuracy(
logits.data, base_targets.data, topk=(1, 5)
)
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
@@ -136,7 +182,11 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
Sstr = (
"*SEARCH* "
+ time_string()
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
)
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
batch_time=batch_time, data_time=data_time
)
@@ -164,7 +214,9 @@ def valid_func(xloader, network, criterion):
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
@@ -183,10 +235,19 @@ def main(xargs):
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, xargs.data_path, -1
)
config = load_config(
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
)
search_loader, _, valid_loader = get_nas_search_loaders(
train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers
train_data,
valid_data,
xargs.dataset,
"configs/nas-benchmark/",
config.batch_size,
xargs.workers,
)
logger.log(
"||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
@@ -212,9 +273,14 @@ def main(xargs):
search_model = get_cell_based_tiny_net(model_config)
logger.log("search-model :\n{:}".format(search_model))
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
search_model.get_weights(), config
)
a_optimizer = torch.optim.Adam(
search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay
search_model.get_alphas(),
lr=xargs.arch_learning_rate,
betas=(0.5, 0.999),
weight_decay=xargs.arch_weight_decay,
)
logger.log("w-optimizer : {:}".format(w_optimizer))
logger.log("a-optimizer : {:}".format(a_optimizer))
@@ -229,11 +295,17 @@ def main(xargs):
api = API(xargs.arch_nas_dataset)
logger.log("{:} create API = {:} done".format(time_string(), api))
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
last_info, model_base_path, model_best_path = (
logger.path("info"),
logger.path("model"),
logger.path("best"),
)
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
if last_info.exists(): # automatically resume from previous checkpoint
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
logger.log(
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
)
last_info = torch.load(last_info)
start_epoch = last_info["epoch"]
checkpoint = torch.load(last_info["last_checkpoint"])
@@ -244,11 +316,17 @@ def main(xargs):
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
logger.log(
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
last_info, start_epoch
)
)
else:
logger.log("=> do not find the last-info file : {:}".format(last_info))
start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {-1: search_model.genotype()}
start_epoch, valid_accuracies, genotypes = (
0,
{"best": -1},
{-1: search_model.genotype()},
)
# start training
start_time, search_time, epoch_time, total_epoch = (
@@ -259,10 +337,16 @@ def main(xargs):
)
for epoch in range(start_epoch, total_epoch):
w_scheduler.update(epoch, 0.0)
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
)
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
min_LR = min(w_scheduler.get_lr())
logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format(epoch_str, need_time, min_LR))
logger.log(
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
epoch_str, need_time, min_LR
)
)
search_w_loss, search_w_top1, search_w_top5 = search_func(
search_loader,
@@ -281,7 +365,9 @@ def main(xargs):
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
)
)
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion)
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
valid_loader, network, criterion
)
logger.log(
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
@@ -297,7 +383,9 @@ def main(xargs):
find_best = False
genotypes[epoch] = search_model.genotype()
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
logger.log(
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
)
# save checkpoint
save_path = save_checkpoint(
{
@@ -331,7 +419,9 @@ def main(xargs):
copy_checkpoint(model_base_path, model_best_path, logger)
with torch.no_grad():
logger.log(
"arch-parameters :\n{:}".format(nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu())
"arch-parameters :\n{:}".format(
nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu()
)
)
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200")))
@@ -365,7 +455,9 @@ if __name__ == "__main__":
parser.add_argument("--search_space_name", type=str, help="The search space name.")
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, help="The number of channels.")
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
parser.add_argument(
"--num_cells", type=int, help="The number of cells in one stage."
)
parser.add_argument(
"--track_running_stats",
type=int,
@@ -373,13 +465,32 @@ if __name__ == "__main__":
help="Whether use track_running_stats or not in the BN layer.",
)
# architecture leraning rate
parser.add_argument("--arch_learning_rate", type=float, default=3e-4, help="learning rate for arch encoding")
parser.add_argument("--arch_weight_decay", type=float, default=1e-3, help="weight decay for arch encoding")
# log
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
parser.add_argument(
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
"--arch_learning_rate",
type=float,
default=3e-4,
help="learning rate for arch encoding",
)
parser.add_argument(
"--arch_weight_decay",
type=float,
default=1e-3,
help="weight decay for arch encoding",
)
# log
parser.add_argument(
"--workers",
type=int,
default=2,
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--save_dir", type=str, help="Folder to save checkpoints and log."
)
parser.add_argument(
"--arch_nas_dataset",
type=str,
help="The path to load the architecture dataset (tiny-nas-benchmark).",
)
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
parser.add_argument("--rand_seed", type=int, help="manual seed")