Add int search space
This commit is contained in:
@@ -35,7 +35,9 @@ def get_configuration_space(max_nodes, search_space):
|
||||
for i in range(1, max_nodes):
|
||||
for j in range(i):
|
||||
node_str = "{:}<-{:}".format(i, j)
|
||||
cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space))
|
||||
cs.add_hyperparameter(
|
||||
ConfigSpace.CategoricalHyperparameter(node_str, search_space)
|
||||
)
|
||||
return cs
|
||||
|
||||
|
||||
@@ -55,7 +57,15 @@ def config2structure_func(max_nodes):
|
||||
|
||||
|
||||
class MyWorker(Worker):
|
||||
def __init__(self, *args, convert_func=None, dataname=None, nas_bench=None, time_budget=None, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
convert_func=None,
|
||||
dataname=None,
|
||||
nas_bench=None,
|
||||
time_budget=None,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.convert_func = convert_func
|
||||
self._dataname = dataname
|
||||
@@ -70,7 +80,9 @@ class MyWorker(Worker):
|
||||
assert len(self.seen_archs) > 0
|
||||
best_index, best_acc = -1, None
|
||||
for arch_index in self.seen_archs:
|
||||
info = self._nas_bench.get_more_info(arch_index, self._dataname, None, hp="200", is_random=True)
|
||||
info = self._nas_bench.get_more_info(
|
||||
arch_index, self._dataname, None, hp="200", is_random=True
|
||||
)
|
||||
vacc = info["valid-accuracy"]
|
||||
if best_acc is None or best_acc < vacc:
|
||||
best_acc = vacc
|
||||
@@ -82,7 +94,9 @@ class MyWorker(Worker):
|
||||
start_time = time.time()
|
||||
structure = self.convert_func(config)
|
||||
arch_index = self._nas_bench.query_index_by_arch(structure)
|
||||
info = self._nas_bench.get_more_info(arch_index, self._dataname, None, hp="200", is_random=True)
|
||||
info = self._nas_bench.get_more_info(
|
||||
arch_index, self._dataname, None, hp="200", is_random=True
|
||||
)
|
||||
cur_time = info["train-all-time"] + info["valid-per-time"]
|
||||
cur_vacc = info["valid-accuracy"]
|
||||
self.real_cost_time += time.time() - start_time
|
||||
@@ -101,7 +115,11 @@ class MyWorker(Worker):
|
||||
self.is_end = True
|
||||
return {
|
||||
"loss": 100,
|
||||
"info": {"seen-arch": len(self.seen_archs), "sim-test-time": self.sim_cost_time, "current-arch": None},
|
||||
"info": {
|
||||
"seen-arch": len(self.seen_archs),
|
||||
"sim-test-time": self.sim_cost_time,
|
||||
"current-arch": None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -119,13 +137,17 @@ def main(xargs, nas_bench):
|
||||
else:
|
||||
dataname = xargs.dataset
|
||||
if xargs.data_path is not None:
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
split_Fpath = "configs/nas-benchmark/cifar-split.txt"
|
||||
cifar_split = load_config(split_Fpath, None, None)
|
||||
train_split, valid_split = cifar_split.train, cifar_split.valid
|
||||
logger.log("Load split file from {:}".format(split_Fpath))
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
# To split data
|
||||
train_data_v2 = deepcopy(train_data)
|
||||
train_data_v2.transform = valid_data.transform
|
||||
@@ -152,7 +174,11 @@ def main(xargs, nas_bench):
|
||||
)
|
||||
)
|
||||
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
|
||||
extra_info = {"config": config, "train_loader": train_loader, "valid_loader": valid_loader}
|
||||
extra_info = {
|
||||
"config": config,
|
||||
"train_loader": train_loader,
|
||||
"valid_loader": valid_loader,
|
||||
}
|
||||
else:
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, None, logger)
|
||||
@@ -213,7 +239,11 @@ def main(xargs, nas_bench):
|
||||
|
||||
id2config = results.get_id2config_mapping()
|
||||
incumbent = results.get_incumbent_id()
|
||||
logger.log("Best found configuration: {:} within {:.3f} s".format(id2config[incumbent]["config"], real_cost_time))
|
||||
logger.log(
|
||||
"Best found configuration: {:} within {:.3f} s".format(
|
||||
id2config[incumbent]["config"], real_cost_time
|
||||
)
|
||||
)
|
||||
best_arch = config2structure(id2config[incumbent]["config"])
|
||||
|
||||
info = nas_bench.query_by_arch(best_arch, "200")
|
||||
@@ -223,13 +253,19 @@ def main(xargs, nas_bench):
|
||||
logger.log("{:}".format(info))
|
||||
logger.log("-" * 100)
|
||||
|
||||
logger.log("workers : {:.1f}s with {:} archs".format(workers[0].time_budget, len(workers[0].seen_archs)))
|
||||
logger.log(
|
||||
"workers : {:.1f}s with {:} archs".format(
|
||||
workers[0].time_budget, len(workers[0].seen_archs)
|
||||
)
|
||||
)
|
||||
logger.close()
|
||||
return logger.log_dir, nas_bench.query_index_by_arch(best_arch), real_cost_time
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser("BOHB: Robust and Efficient Hyperparameter Optimization at Scale")
|
||||
parser = argparse.ArgumentParser(
|
||||
"BOHB: Robust and Efficient Hyperparameter Optimization at Scale"
|
||||
)
|
||||
parser.add_argument("--data_path", type=str, help="Path to dataset")
|
||||
parser.add_argument(
|
||||
"--dataset",
|
||||
@@ -241,28 +277,71 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument("--time_budget", type=int, help="The total time cost budge for searching (in seconds).")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_budget",
|
||||
type=int,
|
||||
help="The total time cost budge for searching (in seconds).",
|
||||
)
|
||||
# BOHB
|
||||
parser.add_argument(
|
||||
"--strategy", default="sampling", type=str, nargs="?", help="optimization strategy for the acquisition function"
|
||||
)
|
||||
parser.add_argument("--min_bandwidth", default=0.3, type=float, nargs="?", help="minimum bandwidth for KDE")
|
||||
parser.add_argument(
|
||||
"--num_samples", default=64, type=int, nargs="?", help="number of samples for the acquisition function"
|
||||
"--strategy",
|
||||
default="sampling",
|
||||
type=str,
|
||||
nargs="?",
|
||||
help="optimization strategy for the acquisition function",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--random_fraction", default=0.33, type=float, nargs="?", help="fraction of random configurations"
|
||||
"--min_bandwidth",
|
||||
default=0.3,
|
||||
type=float,
|
||||
nargs="?",
|
||||
help="minimum bandwidth for KDE",
|
||||
)
|
||||
parser.add_argument("--bandwidth_factor", default=3, type=int, nargs="?", help="factor multiplied to the bandwidth")
|
||||
parser.add_argument(
|
||||
"--n_iters", default=100, type=int, nargs="?", help="number of iterations for optimization method"
|
||||
"--num_samples",
|
||||
default=64,
|
||||
type=int,
|
||||
nargs="?",
|
||||
help="number of samples for the acquisition function",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--random_fraction",
|
||||
default=0.33,
|
||||
type=float,
|
||||
nargs="?",
|
||||
help="fraction of random configurations",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--bandwidth_factor",
|
||||
default=3,
|
||||
type=int,
|
||||
nargs="?",
|
||||
help="factor multiplied to the bandwidth",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--n_iters",
|
||||
default=100,
|
||||
type=int,
|
||||
nargs="?",
|
||||
help="number of iterations for optimization method",
|
||||
)
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
@@ -271,7 +350,11 @@ if __name__ == "__main__":
|
||||
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
|
||||
nas_bench = None
|
||||
else:
|
||||
print("{:} build NAS-Benchmark-API from {:}".format(time_string(), args.arch_nas_dataset))
|
||||
print(
|
||||
"{:} build NAS-Benchmark-API from {:}".format(
|
||||
time_string(), args.arch_nas_dataset
|
||||
)
|
||||
)
|
||||
nas_bench = API(args.arch_nas_dataset)
|
||||
if args.rand_seed < 0:
|
||||
save_dir, all_indexes, num, all_times = None, [], 500, []
|
||||
|
@@ -13,7 +13,13 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, get_nas_search_loaders
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_cell_based_tiny_net, get_search_spaces
|
||||
@@ -21,14 +27,25 @@ from nas_201_api import NASBench201API as API
|
||||
|
||||
|
||||
def search_func(
|
||||
xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger, gradient_clip
|
||||
xloader,
|
||||
network,
|
||||
criterion,
|
||||
scheduler,
|
||||
w_optimizer,
|
||||
a_optimizer,
|
||||
epoch_str,
|
||||
print_freq,
|
||||
logger,
|
||||
gradient_clip,
|
||||
):
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
network.train()
|
||||
end = time.time()
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
|
||||
xloader
|
||||
):
|
||||
scheduler.update(None, 1.0 * step / len(xloader))
|
||||
base_targets = base_targets.cuda(non_blocking=True)
|
||||
arch_targets = arch_targets.cuda(non_blocking=True)
|
||||
@@ -44,7 +61,9 @@ def search_func(
|
||||
torch.nn.utils.clip_grad_norm_(network.parameters(), gradient_clip)
|
||||
w_optimizer.step()
|
||||
# record
|
||||
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
|
||||
base_prec1, base_prec5 = obtain_accuracy(
|
||||
logits.data, base_targets.data, topk=(1, 5)
|
||||
)
|
||||
base_losses.update(base_loss.item(), base_inputs.size(0))
|
||||
base_top1.update(base_prec1.item(), base_inputs.size(0))
|
||||
base_top5.update(base_prec5.item(), base_inputs.size(0))
|
||||
@@ -56,7 +75,9 @@ def search_func(
|
||||
arch_loss.backward()
|
||||
a_optimizer.step()
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -66,7 +87,11 @@ def search_func(
|
||||
end = time.time()
|
||||
|
||||
if step % print_freq == 0 or step + 1 == len(xloader):
|
||||
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
Sstr = (
|
||||
"*SEARCH* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
@@ -94,7 +119,9 @@ def valid_func(xloader, network, criterion):
|
||||
_, logits = network(arch_inputs)
|
||||
arch_loss = criterion(logits, arch_targets)
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -113,11 +140,20 @@ def main(xargs):
|
||||
prepare_seed(xargs.rand_seed)
|
||||
logger = prepare_logger(args)
|
||||
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
# config_path = 'configs/nas-benchmark/algos/DARTS.config'
|
||||
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
search_loader, _, valid_loader = get_nas_search_loaders(
|
||||
train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers
|
||||
train_data,
|
||||
valid_data,
|
||||
xargs.dataset,
|
||||
"configs/nas-benchmark/",
|
||||
config.batch_size,
|
||||
xargs.workers,
|
||||
)
|
||||
logger.log(
|
||||
"||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
|
||||
@@ -155,9 +191,14 @@ def main(xargs):
|
||||
search_model = get_cell_based_tiny_net(model_config)
|
||||
logger.log("search-model :\n{:}".format(search_model))
|
||||
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
|
||||
search_model.get_weights(), config
|
||||
)
|
||||
a_optimizer = torch.optim.Adam(
|
||||
search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay
|
||||
search_model.get_alphas(),
|
||||
lr=xargs.arch_learning_rate,
|
||||
betas=(0.5, 0.999),
|
||||
weight_decay=xargs.arch_weight_decay,
|
||||
)
|
||||
logger.log("w-optimizer : {:}".format(w_optimizer))
|
||||
logger.log("a-optimizer : {:}".format(a_optimizer))
|
||||
@@ -172,11 +213,17 @@ def main(xargs):
|
||||
api = API(xargs.arch_nas_dataset)
|
||||
logger.log("{:} create API = {:} done".format(time_string(), api))
|
||||
|
||||
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
|
||||
last_info, model_base_path, model_best_path = (
|
||||
logger.path("info"),
|
||||
logger.path("model"),
|
||||
logger.path("best"),
|
||||
)
|
||||
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
|
||||
|
||||
if last_info.exists(): # automatically resume from previous checkpoint
|
||||
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
|
||||
)
|
||||
last_info = torch.load(last_info)
|
||||
start_epoch = last_info["epoch"]
|
||||
checkpoint = torch.load(last_info["last_checkpoint"])
|
||||
@@ -187,11 +234,17 @@ def main(xargs):
|
||||
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
|
||||
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
|
||||
last_info, start_epoch
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.log("=> do not find the last-info file : {:}".format(last_info))
|
||||
start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {-1: search_model.genotype()}
|
||||
start_epoch, valid_accuracies, genotypes = (
|
||||
0,
|
||||
{"best": -1},
|
||||
{-1: search_model.genotype()},
|
||||
)
|
||||
|
||||
# start training
|
||||
start_time, search_time, epoch_time, total_epoch = (
|
||||
@@ -202,9 +255,15 @@ def main(xargs):
|
||||
)
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
w_scheduler.update(epoch, 0.0)
|
||||
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
|
||||
need_time = "Time Left: {:}".format(
|
||||
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
|
||||
)
|
||||
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
|
||||
logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format(epoch_str, need_time, min(w_scheduler.get_lr())))
|
||||
logger.log(
|
||||
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
|
||||
epoch_str, need_time, min(w_scheduler.get_lr())
|
||||
)
|
||||
)
|
||||
|
||||
search_w_loss, search_w_top1, search_w_top5 = search_func(
|
||||
search_loader,
|
||||
@@ -224,7 +283,9 @@ def main(xargs):
|
||||
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
|
||||
)
|
||||
)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
|
||||
valid_loader, network, criterion
|
||||
)
|
||||
logger.log(
|
||||
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
|
||||
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
|
||||
@@ -240,7 +301,9 @@ def main(xargs):
|
||||
find_best = False
|
||||
|
||||
genotypes[epoch] = search_model.genotype()
|
||||
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
|
||||
logger.log(
|
||||
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
|
||||
)
|
||||
# save checkpoint
|
||||
save_path = save_checkpoint(
|
||||
{
|
||||
@@ -305,7 +368,9 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--track_running_stats",
|
||||
type=int,
|
||||
@@ -320,13 +385,32 @@ if __name__ == "__main__":
|
||||
)
|
||||
parser.add_argument("--gradient_clip", type=float, default=5, help="")
|
||||
# architecture leraning rate
|
||||
parser.add_argument("--arch_learning_rate", type=float, default=3e-4, help="learning rate for arch encoding")
|
||||
parser.add_argument("--arch_weight_decay", type=float, default=1e-3, help="weight decay for arch encoding")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (nas-benchmark)."
|
||||
"--arch_learning_rate",
|
||||
type=float,
|
||||
default=3e-4,
|
||||
help="learning rate for arch encoding",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_weight_decay",
|
||||
type=float,
|
||||
default=1e-3,
|
||||
help="weight decay for arch encoding",
|
||||
)
|
||||
# log
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
|
@@ -15,7 +15,13 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, get_nas_search_loaders
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_cell_based_tiny_net, get_search_spaces
|
||||
@@ -26,7 +32,9 @@ def _concat(xs):
|
||||
return torch.cat([x.view(-1) for x in xs])
|
||||
|
||||
|
||||
def _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=1e-2):
|
||||
def _hessian_vector_product(
|
||||
vector, network, criterion, base_inputs, base_targets, r=1e-2
|
||||
):
|
||||
R = r / _concat(vector).norm()
|
||||
for p, v in zip(network.module.get_weights(), vector):
|
||||
p.data.add_(R, v)
|
||||
@@ -45,7 +53,15 @@ def _hessian_vector_product(vector, network, criterion, base_inputs, base_target
|
||||
return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]
|
||||
|
||||
|
||||
def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets):
|
||||
def backward_step_unrolled(
|
||||
network,
|
||||
criterion,
|
||||
base_inputs,
|
||||
base_targets,
|
||||
w_optimizer,
|
||||
arch_inputs,
|
||||
arch_targets,
|
||||
):
|
||||
# _compute_unrolled_model
|
||||
_, logits = network(base_inputs)
|
||||
loss = criterion(logits, base_targets)
|
||||
@@ -57,11 +73,17 @@ def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_opti
|
||||
with torch.no_grad():
|
||||
theta = _concat(network.module.get_weights())
|
||||
try:
|
||||
moment = _concat(w_optimizer.state[v]["momentum_buffer"] for v in network.module.get_weights())
|
||||
moment = _concat(
|
||||
w_optimizer.state[v]["momentum_buffer"]
|
||||
for v in network.module.get_weights()
|
||||
)
|
||||
moment = moment.mul_(momentum)
|
||||
except:
|
||||
moment = torch.zeros_like(theta)
|
||||
dtheta = _concat(torch.autograd.grad(loss, network.module.get_weights())) + WD * theta
|
||||
dtheta = (
|
||||
_concat(torch.autograd.grad(loss, network.module.get_weights()))
|
||||
+ WD * theta
|
||||
)
|
||||
params = theta.sub(LR, moment + dtheta)
|
||||
unrolled_model = deepcopy(network)
|
||||
model_dict = unrolled_model.state_dict()
|
||||
@@ -82,7 +104,9 @@ def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_opti
|
||||
|
||||
dalpha = unrolled_model.module.arch_parameters.grad
|
||||
vector = [v.grad.data for v in unrolled_model.module.get_weights()]
|
||||
[implicit_grads] = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)
|
||||
[implicit_grads] = _hessian_vector_product(
|
||||
vector, network, criterion, base_inputs, base_targets
|
||||
)
|
||||
|
||||
dalpha.data.sub_(LR, implicit_grads.data)
|
||||
|
||||
@@ -93,13 +117,25 @@ def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_opti
|
||||
return unrolled_loss.detach(), unrolled_logits.detach()
|
||||
|
||||
|
||||
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):
|
||||
def search_func(
|
||||
xloader,
|
||||
network,
|
||||
criterion,
|
||||
scheduler,
|
||||
w_optimizer,
|
||||
a_optimizer,
|
||||
epoch_str,
|
||||
print_freq,
|
||||
logger,
|
||||
):
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
network.train()
|
||||
end = time.time()
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
|
||||
xloader
|
||||
):
|
||||
scheduler.update(None, 1.0 * step / len(xloader))
|
||||
base_targets = base_targets.cuda(non_blocking=True)
|
||||
arch_targets = arch_targets.cuda(non_blocking=True)
|
||||
@@ -109,11 +145,19 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
# update the architecture-weight
|
||||
a_optimizer.zero_grad()
|
||||
arch_loss, arch_logits = backward_step_unrolled(
|
||||
network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets
|
||||
network,
|
||||
criterion,
|
||||
base_inputs,
|
||||
base_targets,
|
||||
w_optimizer,
|
||||
arch_inputs,
|
||||
arch_targets,
|
||||
)
|
||||
a_optimizer.step()
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(arch_logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
arch_logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -126,7 +170,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
torch.nn.utils.clip_grad_norm_(network.parameters(), 5)
|
||||
w_optimizer.step()
|
||||
# record
|
||||
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
|
||||
base_prec1, base_prec5 = obtain_accuracy(
|
||||
logits.data, base_targets.data, topk=(1, 5)
|
||||
)
|
||||
base_losses.update(base_loss.item(), base_inputs.size(0))
|
||||
base_top1.update(base_prec1.item(), base_inputs.size(0))
|
||||
base_top5.update(base_prec5.item(), base_inputs.size(0))
|
||||
@@ -136,7 +182,11 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
end = time.time()
|
||||
|
||||
if step % print_freq == 0 or step + 1 == len(xloader):
|
||||
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
Sstr = (
|
||||
"*SEARCH* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
@@ -164,7 +214,9 @@ def valid_func(xloader, network, criterion):
|
||||
_, logits = network(arch_inputs)
|
||||
arch_loss = criterion(logits, arch_targets)
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -183,10 +235,19 @@ def main(xargs):
|
||||
prepare_seed(xargs.rand_seed)
|
||||
logger = prepare_logger(args)
|
||||
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
config = load_config(
|
||||
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
search_loader, _, valid_loader = get_nas_search_loaders(
|
||||
train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers
|
||||
train_data,
|
||||
valid_data,
|
||||
xargs.dataset,
|
||||
"configs/nas-benchmark/",
|
||||
config.batch_size,
|
||||
xargs.workers,
|
||||
)
|
||||
logger.log(
|
||||
"||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
|
||||
@@ -212,9 +273,14 @@ def main(xargs):
|
||||
search_model = get_cell_based_tiny_net(model_config)
|
||||
logger.log("search-model :\n{:}".format(search_model))
|
||||
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
|
||||
search_model.get_weights(), config
|
||||
)
|
||||
a_optimizer = torch.optim.Adam(
|
||||
search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay
|
||||
search_model.get_alphas(),
|
||||
lr=xargs.arch_learning_rate,
|
||||
betas=(0.5, 0.999),
|
||||
weight_decay=xargs.arch_weight_decay,
|
||||
)
|
||||
logger.log("w-optimizer : {:}".format(w_optimizer))
|
||||
logger.log("a-optimizer : {:}".format(a_optimizer))
|
||||
@@ -229,11 +295,17 @@ def main(xargs):
|
||||
api = API(xargs.arch_nas_dataset)
|
||||
logger.log("{:} create API = {:} done".format(time_string(), api))
|
||||
|
||||
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
|
||||
last_info, model_base_path, model_best_path = (
|
||||
logger.path("info"),
|
||||
logger.path("model"),
|
||||
logger.path("best"),
|
||||
)
|
||||
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
|
||||
|
||||
if last_info.exists(): # automatically resume from previous checkpoint
|
||||
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
|
||||
)
|
||||
last_info = torch.load(last_info)
|
||||
start_epoch = last_info["epoch"]
|
||||
checkpoint = torch.load(last_info["last_checkpoint"])
|
||||
@@ -244,11 +316,17 @@ def main(xargs):
|
||||
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
|
||||
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
|
||||
last_info, start_epoch
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.log("=> do not find the last-info file : {:}".format(last_info))
|
||||
start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {-1: search_model.genotype()}
|
||||
start_epoch, valid_accuracies, genotypes = (
|
||||
0,
|
||||
{"best": -1},
|
||||
{-1: search_model.genotype()},
|
||||
)
|
||||
|
||||
# start training
|
||||
start_time, search_time, epoch_time, total_epoch = (
|
||||
@@ -259,10 +337,16 @@ def main(xargs):
|
||||
)
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
w_scheduler.update(epoch, 0.0)
|
||||
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
|
||||
need_time = "Time Left: {:}".format(
|
||||
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
|
||||
)
|
||||
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
|
||||
min_LR = min(w_scheduler.get_lr())
|
||||
logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format(epoch_str, need_time, min_LR))
|
||||
logger.log(
|
||||
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
|
||||
epoch_str, need_time, min_LR
|
||||
)
|
||||
)
|
||||
|
||||
search_w_loss, search_w_top1, search_w_top5 = search_func(
|
||||
search_loader,
|
||||
@@ -281,7 +365,9 @@ def main(xargs):
|
||||
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
|
||||
)
|
||||
)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
|
||||
valid_loader, network, criterion
|
||||
)
|
||||
logger.log(
|
||||
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
|
||||
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
|
||||
@@ -297,7 +383,9 @@ def main(xargs):
|
||||
find_best = False
|
||||
|
||||
genotypes[epoch] = search_model.genotype()
|
||||
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
|
||||
logger.log(
|
||||
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
|
||||
)
|
||||
# save checkpoint
|
||||
save_path = save_checkpoint(
|
||||
{
|
||||
@@ -331,7 +419,9 @@ def main(xargs):
|
||||
copy_checkpoint(model_base_path, model_best_path, logger)
|
||||
with torch.no_grad():
|
||||
logger.log(
|
||||
"arch-parameters :\n{:}".format(nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu())
|
||||
"arch-parameters :\n{:}".format(
|
||||
nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu()
|
||||
)
|
||||
)
|
||||
if api is not None:
|
||||
logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200")))
|
||||
@@ -365,7 +455,9 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--track_running_stats",
|
||||
type=int,
|
||||
@@ -373,13 +465,32 @@ if __name__ == "__main__":
|
||||
help="Whether use track_running_stats or not in the BN layer.",
|
||||
)
|
||||
# architecture leraning rate
|
||||
parser.add_argument("--arch_learning_rate", type=float, default=3e-4, help="learning rate for arch encoding")
|
||||
parser.add_argument("--arch_weight_decay", type=float, default=1e-3, help="weight decay for arch encoding")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--arch_learning_rate",
|
||||
type=float,
|
||||
default=3e-4,
|
||||
help="learning rate for arch encoding",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_weight_decay",
|
||||
type=float,
|
||||
default=1e-3,
|
||||
help="weight decay for arch encoding",
|
||||
)
|
||||
# log
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
|
@@ -15,16 +15,37 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, get_nas_search_loaders
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_cell_based_tiny_net, get_search_spaces
|
||||
from nas_201_api import NASBench201API as API
|
||||
|
||||
|
||||
def train_shared_cnn(xloader, shared_cnn, controller, criterion, scheduler, optimizer, epoch_str, print_freq, logger):
|
||||
def train_shared_cnn(
|
||||
xloader,
|
||||
shared_cnn,
|
||||
controller,
|
||||
criterion,
|
||||
scheduler,
|
||||
optimizer,
|
||||
epoch_str,
|
||||
print_freq,
|
||||
logger,
|
||||
):
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
losses, top1s, top5s, xend = AverageMeter(), AverageMeter(), AverageMeter(), time.time()
|
||||
losses, top1s, top5s, xend = (
|
||||
AverageMeter(),
|
||||
AverageMeter(),
|
||||
AverageMeter(),
|
||||
time.time(),
|
||||
)
|
||||
|
||||
shared_cnn.train()
|
||||
controller.eval()
|
||||
@@ -56,7 +77,11 @@ def train_shared_cnn(xloader, shared_cnn, controller, criterion, scheduler, opti
|
||||
xend = time.time()
|
||||
|
||||
if step % print_freq == 0 or step + 1 == len(xloader):
|
||||
Sstr = "*Train-Shared-CNN* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
Sstr = (
|
||||
"*Train-Shared-CNN* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
@@ -67,11 +92,29 @@ def train_shared_cnn(xloader, shared_cnn, controller, criterion, scheduler, opti
|
||||
return losses.avg, top1s.avg, top5s.avg
|
||||
|
||||
|
||||
def train_controller(xloader, shared_cnn, controller, criterion, optimizer, config, epoch_str, print_freq, logger):
|
||||
def train_controller(
|
||||
xloader,
|
||||
shared_cnn,
|
||||
controller,
|
||||
criterion,
|
||||
optimizer,
|
||||
config,
|
||||
epoch_str,
|
||||
print_freq,
|
||||
logger,
|
||||
):
|
||||
# config. (containing some necessary arg)
|
||||
# baseline: The baseline score (i.e. average val_acc) from the previous epoch
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = (
|
||||
(
|
||||
GradnormMeter,
|
||||
LossMeter,
|
||||
ValAccMeter,
|
||||
EntropyMeter,
|
||||
BaselineMeter,
|
||||
RewardMeter,
|
||||
xend,
|
||||
) = (
|
||||
AverageMeter(),
|
||||
AverageMeter(),
|
||||
AverageMeter(),
|
||||
@@ -106,7 +149,9 @@ def train_controller(xloader, shared_cnn, controller, criterion, optimizer, conf
|
||||
if config.baseline is None:
|
||||
baseline = val_top1
|
||||
else:
|
||||
baseline = config.baseline - (1 - config.ctl_bl_dec) * (config.baseline - reward)
|
||||
baseline = config.baseline - (1 - config.ctl_bl_dec) * (
|
||||
config.baseline - reward
|
||||
)
|
||||
|
||||
loss = -1 * log_prob * (reward - baseline)
|
||||
|
||||
@@ -134,18 +179,29 @@ def train_controller(xloader, shared_cnn, controller, criterion, optimizer, conf
|
||||
Sstr = (
|
||||
"*Train-Controller* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, config.ctl_train_steps * config.ctl_num_aggre)
|
||||
+ " [{:}][{:03d}/{:03d}]".format(
|
||||
epoch_str, step, config.ctl_train_steps * config.ctl_num_aggre
|
||||
)
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
Wstr = "[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})".format(
|
||||
loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter
|
||||
loss=LossMeter,
|
||||
top1=ValAccMeter,
|
||||
reward=RewardMeter,
|
||||
basel=BaselineMeter,
|
||||
)
|
||||
Estr = "Entropy={:.4f} ({:.4f})".format(EntropyMeter.val, EntropyMeter.avg)
|
||||
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Estr)
|
||||
|
||||
return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg, baseline.item()
|
||||
return (
|
||||
LossMeter.avg,
|
||||
ValAccMeter.avg,
|
||||
BaselineMeter.avg,
|
||||
RewardMeter.avg,
|
||||
baseline.item(),
|
||||
)
|
||||
|
||||
|
||||
def get_best_arch(controller, shared_cnn, xloader, n_samples=10):
|
||||
@@ -164,7 +220,9 @@ def get_best_arch(controller, shared_cnn, xloader, n_samples=10):
|
||||
_, _, sampled_arch = controller()
|
||||
arch = shared_cnn.module.update_arch(sampled_arch)
|
||||
_, logits = shared_cnn(inputs)
|
||||
val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))
|
||||
val_top1, val_top5 = obtain_accuracy(
|
||||
logits.cpu().data, targets.data, topk=(1, 5)
|
||||
)
|
||||
|
||||
archs.append(arch)
|
||||
valid_accs.append(val_top1.item())
|
||||
@@ -188,7 +246,9 @@ def valid_func(xloader, network, criterion):
|
||||
_, logits = network(arch_inputs)
|
||||
arch_loss = criterion(logits, arch_targets)
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -207,11 +267,20 @@ def main(xargs):
|
||||
prepare_seed(xargs.rand_seed)
|
||||
logger = prepare_logger(args)
|
||||
|
||||
train_data, test_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, test_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
logger.log("use config from : {:}".format(xargs.config_path))
|
||||
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
_, train_loader, valid_loader = get_nas_search_loaders(
|
||||
train_data, test_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers
|
||||
train_data,
|
||||
test_data,
|
||||
xargs.dataset,
|
||||
"configs/nas-benchmark/",
|
||||
config.batch_size,
|
||||
xargs.workers,
|
||||
)
|
||||
# since ENAS will train the controller on valid-loader, we need to use train transformation for valid-loader
|
||||
valid_loader.dataset.transform = deepcopy(train_loader.dataset.transform)
|
||||
@@ -242,9 +311,14 @@ def main(xargs):
|
||||
shared_cnn = get_cell_based_tiny_net(model_config)
|
||||
controller = shared_cnn.create_controller()
|
||||
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(shared_cnn.parameters(), config)
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
|
||||
shared_cnn.parameters(), config
|
||||
)
|
||||
a_optimizer = torch.optim.Adam(
|
||||
controller.parameters(), lr=config.controller_lr, betas=config.controller_betas, eps=config.controller_eps
|
||||
controller.parameters(),
|
||||
lr=config.controller_lr,
|
||||
betas=config.controller_betas,
|
||||
eps=config.controller_eps,
|
||||
)
|
||||
logger.log("w-optimizer : {:}".format(w_optimizer))
|
||||
logger.log("a-optimizer : {:}".format(a_optimizer))
|
||||
@@ -259,12 +333,22 @@ def main(xargs):
|
||||
else:
|
||||
api = API(xargs.arch_nas_dataset)
|
||||
logger.log("{:} create API = {:} done".format(time_string(), api))
|
||||
shared_cnn, controller, criterion = torch.nn.DataParallel(shared_cnn).cuda(), controller.cuda(), criterion.cuda()
|
||||
shared_cnn, controller, criterion = (
|
||||
torch.nn.DataParallel(shared_cnn).cuda(),
|
||||
controller.cuda(),
|
||||
criterion.cuda(),
|
||||
)
|
||||
|
||||
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
|
||||
last_info, model_base_path, model_best_path = (
|
||||
logger.path("info"),
|
||||
logger.path("model"),
|
||||
logger.path("best"),
|
||||
)
|
||||
|
||||
if last_info.exists(): # automatically resume from previous checkpoint
|
||||
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
|
||||
)
|
||||
last_info = torch.load(last_info)
|
||||
start_epoch = last_info["epoch"]
|
||||
checkpoint = torch.load(last_info["last_checkpoint"])
|
||||
@@ -277,7 +361,9 @@ def main(xargs):
|
||||
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
|
||||
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
|
||||
last_info, start_epoch
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.log("=> do not find the last-info file : {:}".format(last_info))
|
||||
@@ -292,7 +378,9 @@ def main(xargs):
|
||||
)
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
w_scheduler.update(epoch, 0.0)
|
||||
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
|
||||
need_time = "Time Left: {:}".format(
|
||||
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
|
||||
)
|
||||
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
|
||||
logger.log(
|
||||
"\n[Search the {:}-th epoch] {:}, LR={:}, baseline={:}".format(
|
||||
@@ -339,7 +427,13 @@ def main(xargs):
|
||||
search_time.update(time.time() - start_time)
|
||||
logger.log(
|
||||
"[{:}] controller : loss={:.2f}, accuracy={:.2f}%, baseline={:.2f}, reward={:.2f}, current-baseline={:.4f}, time-cost={:.1f} s".format(
|
||||
epoch_str, ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline, search_time.sum
|
||||
epoch_str,
|
||||
ctl_loss,
|
||||
ctl_acc,
|
||||
ctl_baseline,
|
||||
ctl_reward,
|
||||
baseline,
|
||||
search_time.sum,
|
||||
)
|
||||
)
|
||||
best_arch, _ = get_best_arch(controller, shared_cnn, valid_loader)
|
||||
@@ -356,7 +450,9 @@ def main(xargs):
|
||||
else:
|
||||
find_best = False
|
||||
|
||||
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
|
||||
logger.log(
|
||||
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
|
||||
)
|
||||
# save checkpoint
|
||||
save_path = save_checkpoint(
|
||||
{
|
||||
@@ -397,18 +493,32 @@ def main(xargs):
|
||||
start_time = time.time()
|
||||
|
||||
logger.log("\n" + "-" * 100)
|
||||
logger.log("During searching, the best architecture is {:}".format(genotypes["best"]))
|
||||
logger.log(
|
||||
"During searching, the best architecture is {:}".format(genotypes["best"])
|
||||
)
|
||||
logger.log("Its accuracy is {:.2f}%".format(valid_accuracies["best"]))
|
||||
logger.log("Randomly select {:} architectures and select the best.".format(xargs.controller_num_samples))
|
||||
logger.log(
|
||||
"Randomly select {:} architectures and select the best.".format(
|
||||
xargs.controller_num_samples
|
||||
)
|
||||
)
|
||||
start_time = time.time()
|
||||
final_arch, _ = get_best_arch(controller, shared_cnn, valid_loader, xargs.controller_num_samples)
|
||||
final_arch, _ = get_best_arch(
|
||||
controller, shared_cnn, valid_loader, xargs.controller_num_samples
|
||||
)
|
||||
search_time.update(time.time() - start_time)
|
||||
shared_cnn.module.update_arch(final_arch)
|
||||
final_loss, final_top1, final_top5 = valid_func(valid_loader, shared_cnn, criterion)
|
||||
logger.log("The Selected Final Architecture : {:}".format(final_arch))
|
||||
logger.log("Loss={:.3f}, Accuracy@1={:.2f}%, Accuracy@5={:.2f}%".format(final_loss, final_top1, final_top5))
|
||||
logger.log(
|
||||
"ENAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(total_epoch, search_time.sum, final_arch)
|
||||
"Loss={:.3f}, Accuracy@1={:.2f}%, Accuracy@5={:.2f}%".format(
|
||||
final_loss, final_top1, final_top5
|
||||
)
|
||||
)
|
||||
logger.log(
|
||||
"ENAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(
|
||||
total_epoch, search_time.sum, final_arch
|
||||
)
|
||||
)
|
||||
if api is not None:
|
||||
logger.log("{:}".format(api.query_by_arch(final_arch)))
|
||||
@@ -434,18 +544,35 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument("--config_path", type=str, help="The config file to train ENAS.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config_path", type=str, help="The config file to train ENAS."
|
||||
)
|
||||
parser.add_argument("--controller_train_steps", type=int, help=".")
|
||||
parser.add_argument("--controller_num_aggregate", type=int, help=".")
|
||||
parser.add_argument("--controller_entropy_weight", type=float, help="The weight for the entropy of the controller.")
|
||||
parser.add_argument(
|
||||
"--controller_entropy_weight",
|
||||
type=float,
|
||||
help="The weight for the entropy of the controller.",
|
||||
)
|
||||
parser.add_argument("--controller_bl_dec", type=float, help=".")
|
||||
parser.add_argument("--controller_num_samples", type=int, help=".")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (nas-benchmark)."
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
|
@@ -13,20 +13,38 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config
|
||||
from datasets import get_datasets, get_nas_search_loaders
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_cell_based_tiny_net, get_search_spaces
|
||||
from nas_201_api import NASBench201API as API
|
||||
|
||||
|
||||
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):
|
||||
def search_func(
|
||||
xloader,
|
||||
network,
|
||||
criterion,
|
||||
scheduler,
|
||||
w_optimizer,
|
||||
a_optimizer,
|
||||
epoch_str,
|
||||
print_freq,
|
||||
logger,
|
||||
):
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
network.train()
|
||||
end = time.time()
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
|
||||
xloader
|
||||
):
|
||||
scheduler.update(None, 1.0 * step / len(xloader))
|
||||
base_targets = base_targets.cuda(non_blocking=True)
|
||||
arch_targets = arch_targets.cuda(non_blocking=True)
|
||||
@@ -41,7 +59,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
torch.nn.utils.clip_grad_norm_(network.parameters(), 5)
|
||||
w_optimizer.step()
|
||||
# record
|
||||
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
|
||||
base_prec1, base_prec5 = obtain_accuracy(
|
||||
logits.data, base_targets.data, topk=(1, 5)
|
||||
)
|
||||
base_losses.update(base_loss.item(), base_inputs.size(0))
|
||||
base_top1.update(base_prec1.item(), base_inputs.size(0))
|
||||
base_top5.update(base_prec5.item(), base_inputs.size(0))
|
||||
@@ -53,7 +73,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
arch_loss.backward()
|
||||
a_optimizer.step()
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -63,7 +85,11 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
end = time.time()
|
||||
|
||||
if step % print_freq == 0 or step + 1 == len(xloader):
|
||||
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
Sstr = (
|
||||
"*SEARCH* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
@@ -74,7 +100,14 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
loss=arch_losses, top1=arch_top1, top5=arch_top5
|
||||
)
|
||||
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr)
|
||||
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
|
||||
return (
|
||||
base_losses.avg,
|
||||
base_top1.avg,
|
||||
base_top5.avg,
|
||||
arch_losses.avg,
|
||||
arch_top1.avg,
|
||||
arch_top5.avg,
|
||||
)
|
||||
|
||||
|
||||
def main(xargs):
|
||||
@@ -86,11 +119,20 @@ def main(xargs):
|
||||
prepare_seed(xargs.rand_seed)
|
||||
logger = prepare_logger(args)
|
||||
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
# config_path = 'configs/nas-benchmark/algos/GDAS.config'
|
||||
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
search_loader, _, valid_loader = get_nas_search_loaders(
|
||||
train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers
|
||||
train_data,
|
||||
valid_data,
|
||||
xargs.dataset,
|
||||
"configs/nas-benchmark/",
|
||||
config.batch_size,
|
||||
xargs.workers,
|
||||
)
|
||||
logger.log(
|
||||
"||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}".format(
|
||||
@@ -129,9 +171,14 @@ def main(xargs):
|
||||
logger.log("search-model :\n{:}".format(search_model))
|
||||
logger.log("model-config : {:}".format(model_config))
|
||||
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
|
||||
search_model.get_weights(), config
|
||||
)
|
||||
a_optimizer = torch.optim.Adam(
|
||||
search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay
|
||||
search_model.get_alphas(),
|
||||
lr=xargs.arch_learning_rate,
|
||||
betas=(0.5, 0.999),
|
||||
weight_decay=xargs.arch_weight_decay,
|
||||
)
|
||||
logger.log("w-optimizer : {:}".format(w_optimizer))
|
||||
logger.log("a-optimizer : {:}".format(a_optimizer))
|
||||
@@ -146,11 +193,17 @@ def main(xargs):
|
||||
api = API(xargs.arch_nas_dataset)
|
||||
logger.log("{:} create API = {:} done".format(time_string(), api))
|
||||
|
||||
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
|
||||
last_info, model_base_path, model_best_path = (
|
||||
logger.path("info"),
|
||||
logger.path("model"),
|
||||
logger.path("best"),
|
||||
)
|
||||
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
|
||||
|
||||
if last_info.exists(): # automatically resume from previous checkpoint
|
||||
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
|
||||
)
|
||||
last_info = torch.load(last_info)
|
||||
start_epoch = last_info["epoch"]
|
||||
checkpoint = torch.load(last_info["last_checkpoint"])
|
||||
@@ -161,11 +214,17 @@ def main(xargs):
|
||||
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
|
||||
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
|
||||
last_info, start_epoch
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.log("=> do not find the last-info file : {:}".format(last_info))
|
||||
start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {-1: search_model.genotype()}
|
||||
start_epoch, valid_accuracies, genotypes = (
|
||||
0,
|
||||
{"best": -1},
|
||||
{-1: search_model.genotype()},
|
||||
)
|
||||
|
||||
# start training
|
||||
start_time, search_time, epoch_time, total_epoch = (
|
||||
@@ -176,16 +235,27 @@ def main(xargs):
|
||||
)
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
w_scheduler.update(epoch, 0.0)
|
||||
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
|
||||
need_time = "Time Left: {:}".format(
|
||||
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
|
||||
)
|
||||
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
|
||||
search_model.set_tau(xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1))
|
||||
search_model.set_tau(
|
||||
xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1)
|
||||
)
|
||||
logger.log(
|
||||
"\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}".format(
|
||||
epoch_str, need_time, search_model.get_tau(), min(w_scheduler.get_lr())
|
||||
)
|
||||
)
|
||||
|
||||
search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5 = search_func(
|
||||
(
|
||||
search_w_loss,
|
||||
search_w_top1,
|
||||
search_w_top5,
|
||||
valid_a_loss,
|
||||
valid_a_top1,
|
||||
valid_a_top5,
|
||||
) = search_func(
|
||||
search_loader,
|
||||
network,
|
||||
criterion,
|
||||
@@ -217,7 +287,9 @@ def main(xargs):
|
||||
find_best = False
|
||||
|
||||
genotypes[epoch] = search_model.genotype()
|
||||
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
|
||||
logger.log(
|
||||
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
|
||||
)
|
||||
# save checkpoint
|
||||
save_path = save_checkpoint(
|
||||
{
|
||||
@@ -282,29 +354,52 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--track_running_stats",
|
||||
type=int,
|
||||
choices=[0, 1],
|
||||
help="Whether use track_running_stats or not in the BN layer.",
|
||||
)
|
||||
parser.add_argument("--config_path", type=str, help="The path of the configuration.")
|
||||
parser.add_argument(
|
||||
"--config_path", type=str, help="The path of the configuration."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_config",
|
||||
type=str,
|
||||
help="The path of the model configuration. When this arg is set, it will cover max_nodes / channels / num_cells.",
|
||||
)
|
||||
# architecture leraning rate
|
||||
parser.add_argument("--arch_learning_rate", type=float, default=3e-4, help="learning rate for arch encoding")
|
||||
parser.add_argument("--arch_weight_decay", type=float, default=1e-3, help="weight decay for arch encoding")
|
||||
parser.add_argument(
|
||||
"--arch_learning_rate",
|
||||
type=float,
|
||||
default=3e-4,
|
||||
help="learning rate for arch encoding",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_weight_decay",
|
||||
type=float,
|
||||
default=1e-3,
|
||||
help="weight decay for arch encoding",
|
||||
)
|
||||
parser.add_argument("--tau_min", type=float, help="The minimum tau for Gumbel")
|
||||
parser.add_argument("--tau_max", type=float, help="The maximum tau for Gumbel")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
|
@@ -15,19 +15,29 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, get_nas_search_loaders
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_cell_based_tiny_net, get_search_spaces
|
||||
from nas_201_api import NASBench201API as API
|
||||
|
||||
|
||||
def search_func(xloader, network, criterion, scheduler, w_optimizer, epoch_str, print_freq, logger):
|
||||
def search_func(
|
||||
xloader, network, criterion, scheduler, w_optimizer, epoch_str, print_freq, logger
|
||||
):
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
network.train()
|
||||
end = time.time()
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
|
||||
xloader
|
||||
):
|
||||
scheduler.update(None, 1.0 * step / len(xloader))
|
||||
base_targets = base_targets.cuda(non_blocking=True)
|
||||
arch_targets = arch_targets.cuda(non_blocking=True)
|
||||
@@ -43,7 +53,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, epoch_str,
|
||||
nn.utils.clip_grad_norm_(network.parameters(), 5)
|
||||
w_optimizer.step()
|
||||
# record
|
||||
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
|
||||
base_prec1, base_prec5 = obtain_accuracy(
|
||||
logits.data, base_targets.data, topk=(1, 5)
|
||||
)
|
||||
base_losses.update(base_loss.item(), base_inputs.size(0))
|
||||
base_top1.update(base_prec1.item(), base_inputs.size(0))
|
||||
base_top5.update(base_prec5.item(), base_inputs.size(0))
|
||||
@@ -53,7 +65,11 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, epoch_str,
|
||||
end = time.time()
|
||||
|
||||
if step % print_freq == 0 or step + 1 == len(xloader):
|
||||
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
Sstr = (
|
||||
"*SEARCH* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
@@ -80,7 +96,9 @@ def valid_func(xloader, network, criterion):
|
||||
_, logits = network(arch_inputs)
|
||||
arch_loss = criterion(logits, arch_targets)
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -105,7 +123,9 @@ def search_find_best(xloader, network, n_samples):
|
||||
inputs, targets = next(loader_iter)
|
||||
|
||||
_, logits = network(inputs)
|
||||
val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))
|
||||
val_top1, val_top5 = obtain_accuracy(
|
||||
logits.cpu().data, targets.data, topk=(1, 5)
|
||||
)
|
||||
|
||||
archs.append(arch)
|
||||
valid_accs.append(val_top1.item())
|
||||
@@ -124,8 +144,12 @@ def main(xargs):
|
||||
prepare_seed(xargs.rand_seed)
|
||||
logger = prepare_logger(args)
|
||||
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
config = load_config(
|
||||
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
search_loader, _, valid_loader = get_nas_search_loaders(
|
||||
train_data,
|
||||
valid_data,
|
||||
@@ -157,7 +181,9 @@ def main(xargs):
|
||||
)
|
||||
search_model = get_cell_based_tiny_net(model_config)
|
||||
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.parameters(), config)
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
|
||||
search_model.parameters(), config
|
||||
)
|
||||
logger.log("w-optimizer : {:}".format(w_optimizer))
|
||||
logger.log("w-scheduler : {:}".format(w_scheduler))
|
||||
logger.log("criterion : {:}".format(criterion))
|
||||
@@ -167,11 +193,17 @@ def main(xargs):
|
||||
api = API(xargs.arch_nas_dataset)
|
||||
logger.log("{:} create API = {:} done".format(time_string(), api))
|
||||
|
||||
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
|
||||
last_info, model_base_path, model_best_path = (
|
||||
logger.path("info"),
|
||||
logger.path("model"),
|
||||
logger.path("best"),
|
||||
)
|
||||
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
|
||||
|
||||
if last_info.exists(): # automatically resume from previous checkpoint
|
||||
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
|
||||
)
|
||||
last_info = torch.load(last_info)
|
||||
start_epoch = last_info["epoch"]
|
||||
checkpoint = torch.load(last_info["last_checkpoint"])
|
||||
@@ -181,7 +213,9 @@ def main(xargs):
|
||||
w_scheduler.load_state_dict(checkpoint["w_scheduler"])
|
||||
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
|
||||
last_info, start_epoch
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.log("=> do not find the last-info file : {:}".format(last_info))
|
||||
@@ -196,13 +230,26 @@ def main(xargs):
|
||||
)
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
w_scheduler.update(epoch, 0.0)
|
||||
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
|
||||
need_time = "Time Left: {:}".format(
|
||||
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
|
||||
)
|
||||
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
|
||||
logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format(epoch_str, need_time, min(w_scheduler.get_lr())))
|
||||
logger.log(
|
||||
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
|
||||
epoch_str, need_time, min(w_scheduler.get_lr())
|
||||
)
|
||||
)
|
||||
|
||||
# selected_arch = search_find_best(valid_loader, network, criterion, xargs.select_num)
|
||||
search_w_loss, search_w_top1, search_w_top5 = search_func(
|
||||
search_loader, network, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger
|
||||
search_loader,
|
||||
network,
|
||||
criterion,
|
||||
w_scheduler,
|
||||
w_optimizer,
|
||||
epoch_str,
|
||||
xargs.print_freq,
|
||||
logger,
|
||||
)
|
||||
search_time.update(time.time() - start_time)
|
||||
logger.log(
|
||||
@@ -210,14 +257,22 @@ def main(xargs):
|
||||
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
|
||||
)
|
||||
)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
|
||||
valid_loader, network, criterion
|
||||
)
|
||||
logger.log(
|
||||
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
|
||||
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
|
||||
)
|
||||
)
|
||||
cur_arch, cur_valid_acc = search_find_best(valid_loader, network, xargs.select_num)
|
||||
logger.log("[{:}] find-the-best : {:}, accuracy@1={:.2f}%".format(epoch_str, cur_arch, cur_valid_acc))
|
||||
cur_arch, cur_valid_acc = search_find_best(
|
||||
valid_loader, network, xargs.select_num
|
||||
)
|
||||
logger.log(
|
||||
"[{:}] find-the-best : {:}, accuracy@1={:.2f}%".format(
|
||||
epoch_str, cur_arch, cur_valid_acc
|
||||
)
|
||||
)
|
||||
genotypes[epoch] = cur_arch
|
||||
# check the best accuracy
|
||||
valid_accuracies[epoch] = valid_a_top1
|
||||
@@ -289,11 +344,19 @@ if __name__ == "__main__":
|
||||
)
|
||||
# channels and number-of-cells
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--config_path", type=str, help="The path to the configuration.")
|
||||
parser.add_argument(
|
||||
"--config_path", type=str, help="The path to the configuration."
|
||||
)
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument("--select_num", type=int, help="The number of selected architectures to evaluate.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--select_num",
|
||||
type=int,
|
||||
help="The number of selected architectures to evaluate.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--track_running_stats",
|
||||
type=int,
|
||||
@@ -301,10 +364,19 @@ if __name__ == "__main__":
|
||||
help="Whether use track_running_stats or not in the BN layer.",
|
||||
)
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
|
@@ -13,7 +13,13 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, SearchDataset
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_search_spaces
|
||||
@@ -35,13 +41,17 @@ def main(xargs, nas_bench):
|
||||
else:
|
||||
dataname = xargs.dataset
|
||||
if xargs.data_path is not None:
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
split_Fpath = "configs/nas-benchmark/cifar-split.txt"
|
||||
cifar_split = load_config(split_Fpath, None, None)
|
||||
train_split, valid_split = cifar_split.train, cifar_split.valid
|
||||
logger.log("Load split file from {:}".format(split_Fpath))
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
# To split data
|
||||
train_data_v2 = deepcopy(train_data)
|
||||
train_data_v2.transform = valid_data.transform
|
||||
@@ -68,7 +78,11 @@ def main(xargs, nas_bench):
|
||||
)
|
||||
)
|
||||
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
|
||||
extra_info = {"config": config, "train_loader": train_loader, "valid_loader": valid_loader}
|
||||
extra_info = {
|
||||
"config": config,
|
||||
"train_loader": train_loader,
|
||||
"valid_loader": valid_loader,
|
||||
}
|
||||
else:
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, None, logger)
|
||||
@@ -91,10 +105,17 @@ def main(xargs, nas_bench):
|
||||
history.append(arch)
|
||||
if best_arch is None or best_acc < accuracy:
|
||||
best_acc, best_arch = accuracy, arch
|
||||
logger.log("[{:03d}] : {:} : accuracy = {:.2f}%".format(len(history), arch, accuracy))
|
||||
logger.log(
|
||||
"[{:03d}] : {:} : accuracy = {:.2f}%".format(len(history), arch, accuracy)
|
||||
)
|
||||
logger.log(
|
||||
"{:} best arch is {:}, accuracy = {:.2f}%, visit {:} archs with {:.1f} s (real-cost = {:.3f} s).".format(
|
||||
time_string(), best_arch, best_acc, len(history), total_time_cost, time.time() - x_start_time
|
||||
time_string(),
|
||||
best_arch,
|
||||
best_acc,
|
||||
len(history),
|
||||
total_time_cost,
|
||||
time.time() - x_start_time,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -121,14 +142,29 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
# parser.add_argument('--random_num', type=int, help='The number of random selected architectures.')
|
||||
parser.add_argument("--time_budget", type=int, help="The total time cost budge for searching (in seconds).")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
# parser.add_argument('--random_num', type=int, help='The number of random selected architectures.')
|
||||
parser.add_argument(
|
||||
"--time_budget",
|
||||
type=int,
|
||||
help="The total time cost budge for searching (in seconds).",
|
||||
)
|
||||
# log
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
@@ -137,7 +173,11 @@ if __name__ == "__main__":
|
||||
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
|
||||
nas_bench = None
|
||||
else:
|
||||
print("{:} build NAS-Benchmark-API from {:}".format(time_string(), args.arch_nas_dataset))
|
||||
print(
|
||||
"{:} build NAS-Benchmark-API from {:}".format(
|
||||
time_string(), args.arch_nas_dataset
|
||||
)
|
||||
)
|
||||
nas_bench = API(args.arch_nas_dataset)
|
||||
if args.rand_seed < 0:
|
||||
save_dir, all_indexes, num = None, [], 500
|
||||
|
@@ -15,7 +15,13 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, SearchDataset
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from nas_201_api import NASBench201API as API
|
||||
@@ -38,13 +44,20 @@ class Model(object):
|
||||
# In this case, the LR schedular is converged.
|
||||
# For use_012_epoch_training = False, the architecture is planed to be trained for 200 epochs, but we early stop its procedure.
|
||||
#
|
||||
def train_and_eval(arch, nas_bench, extra_info, dataname="cifar10-valid", use_012_epoch_training=True):
|
||||
def train_and_eval(
|
||||
arch, nas_bench, extra_info, dataname="cifar10-valid", use_012_epoch_training=True
|
||||
):
|
||||
|
||||
if use_012_epoch_training and nas_bench is not None:
|
||||
arch_index = nas_bench.query_index_by_arch(arch)
|
||||
assert arch_index >= 0, "can not find this arch : {:}".format(arch)
|
||||
info = nas_bench.get_more_info(arch_index, dataname, iepoch=None, hp="12", is_random=True)
|
||||
valid_acc, time_cost = info["valid-accuracy"], info["train-all-time"] + info["valid-per-time"]
|
||||
info = nas_bench.get_more_info(
|
||||
arch_index, dataname, iepoch=None, hp="12", is_random=True
|
||||
)
|
||||
valid_acc, time_cost = (
|
||||
info["valid-accuracy"],
|
||||
info["train-all-time"] + info["valid-per-time"],
|
||||
)
|
||||
# _, valid_acc = info.get_metrics('cifar10-valid', 'x-valid' , 25, True) # use the validation accuracy after 25 training epochs
|
||||
elif not use_012_epoch_training and nas_bench is not None:
|
||||
# Please contact me if you want to use the following logic, because it has some potential issues.
|
||||
@@ -52,7 +65,9 @@ def train_and_eval(arch, nas_bench, extra_info, dataname="cifar10-valid", use_01
|
||||
# It did return values for cifar100 and ImageNet16-120, but it has some potential issues. (Please email me for more details)
|
||||
arch_index, nepoch = nas_bench.query_index_by_arch(arch), 25
|
||||
assert arch_index >= 0, "can not find this arch : {:}".format(arch)
|
||||
xoinfo = nas_bench.get_more_info(arch_index, "cifar10-valid", iepoch=None, hp="12")
|
||||
xoinfo = nas_bench.get_more_info(
|
||||
arch_index, "cifar10-valid", iepoch=None, hp="12"
|
||||
)
|
||||
xocost = nas_bench.get_cost_info(arch_index, "cifar10-valid", hp="200")
|
||||
info = nas_bench.get_more_info(
|
||||
arch_index, dataname, nepoch, hp="200", is_random=True
|
||||
@@ -85,9 +100,15 @@ def train_and_eval(arch, nas_bench, extra_info, dataname="cifar10-valid", use_01
|
||||
* cost["latency"]
|
||||
)
|
||||
try:
|
||||
valid_acc, time_cost = info["valid-accuracy"], estimated_train_cost + estimated_valid_cost
|
||||
valid_acc, time_cost = (
|
||||
info["valid-accuracy"],
|
||||
estimated_train_cost + estimated_valid_cost,
|
||||
)
|
||||
except:
|
||||
valid_acc, time_cost = info["valtest-accuracy"], estimated_train_cost + estimated_valid_cost
|
||||
valid_acc, time_cost = (
|
||||
info["valtest-accuracy"],
|
||||
estimated_train_cost + estimated_valid_cost,
|
||||
)
|
||||
else:
|
||||
# train a model from scratch.
|
||||
raise ValueError("NOT IMPLEMENT YET")
|
||||
@@ -131,7 +152,15 @@ def mutate_arch_func(op_names):
|
||||
|
||||
|
||||
def regularized_evolution(
|
||||
cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, nas_bench, extra_info, dataname
|
||||
cycles,
|
||||
population_size,
|
||||
sample_size,
|
||||
time_budget,
|
||||
random_arch,
|
||||
mutate_arch,
|
||||
nas_bench,
|
||||
extra_info,
|
||||
dataname,
|
||||
):
|
||||
"""Algorithm for regularized evolution (i.e. aging evolution).
|
||||
|
||||
@@ -149,13 +178,18 @@ def regularized_evolution(
|
||||
during the evolution experiment.
|
||||
"""
|
||||
population = collections.deque()
|
||||
history, total_time_cost = [], 0 # Not used by the algorithm, only used to report results.
|
||||
history, total_time_cost = (
|
||||
[],
|
||||
0,
|
||||
) # Not used by the algorithm, only used to report results.
|
||||
|
||||
# Initialize the population with random models.
|
||||
while len(population) < population_size:
|
||||
model = Model()
|
||||
model.arch = random_arch()
|
||||
model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info, dataname)
|
||||
model.accuracy, time_cost = train_and_eval(
|
||||
model.arch, nas_bench, extra_info, dataname
|
||||
)
|
||||
population.append(model)
|
||||
history.append(model)
|
||||
total_time_cost += time_cost
|
||||
@@ -180,7 +214,9 @@ def regularized_evolution(
|
||||
child = Model()
|
||||
child.arch = mutate_arch(parent.arch)
|
||||
total_time_cost += time.time() - start_time
|
||||
child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info, dataname)
|
||||
child.accuracy, time_cost = train_and_eval(
|
||||
child.arch, nas_bench, extra_info, dataname
|
||||
)
|
||||
if total_time_cost + time_cost > time_budget: # return
|
||||
return history, total_time_cost
|
||||
else:
|
||||
@@ -207,13 +243,17 @@ def main(xargs, nas_bench):
|
||||
else:
|
||||
dataname = xargs.dataset
|
||||
if xargs.data_path is not None:
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
split_Fpath = "configs/nas-benchmark/cifar-split.txt"
|
||||
cifar_split = load_config(split_Fpath, None, None)
|
||||
train_split, valid_split = cifar_split.train, cifar_split.valid
|
||||
logger.log("Load split file from {:}".format(split_Fpath))
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
# To split data
|
||||
train_data_v2 = deepcopy(train_data)
|
||||
train_data_v2.transform = valid_data.transform
|
||||
@@ -240,7 +280,11 @@ def main(xargs, nas_bench):
|
||||
)
|
||||
)
|
||||
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
|
||||
extra_info = {"config": config, "train_loader": train_loader, "valid_loader": valid_loader}
|
||||
extra_info = {
|
||||
"config": config,
|
||||
"train_loader": train_loader,
|
||||
"valid_loader": valid_loader,
|
||||
}
|
||||
else:
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, None, logger)
|
||||
@@ -253,7 +297,10 @@ def main(xargs, nas_bench):
|
||||
# x =random_arch() ; y = mutate_arch(x)
|
||||
x_start_time = time.time()
|
||||
logger.log("{:} use nas_bench : {:}".format(time_string(), nas_bench))
|
||||
logger.log("-" * 30 + " start searching with the time budget of {:} s".format(xargs.time_budget))
|
||||
logger.log(
|
||||
"-" * 30
|
||||
+ " start searching with the time budget of {:} s".format(xargs.time_budget)
|
||||
)
|
||||
history, total_cost = regularized_evolution(
|
||||
xargs.ea_cycles,
|
||||
xargs.ea_population,
|
||||
@@ -297,17 +344,36 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument("--ea_cycles", type=int, help="The number of cycles in EA.")
|
||||
parser.add_argument("--ea_population", type=int, help="The population size in EA.")
|
||||
parser.add_argument("--ea_sample_size", type=int, help="The sample size in EA.")
|
||||
parser.add_argument("--ea_fast_by_api", type=int, help="Use our API to speed up the experiments or not.")
|
||||
parser.add_argument("--time_budget", type=int, help="The total time cost budge for searching (in seconds).")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--ea_fast_by_api",
|
||||
type=int,
|
||||
help="Use our API to speed up the experiments or not.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_budget",
|
||||
type=int,
|
||||
help="The total time cost budge for searching (in seconds).",
|
||||
)
|
||||
# log
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
|
||||
@@ -318,7 +384,11 @@ if __name__ == "__main__":
|
||||
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
|
||||
nas_bench = None
|
||||
else:
|
||||
print("{:} build NAS-Benchmark-API from {:}".format(time_string(), args.arch_nas_dataset))
|
||||
print(
|
||||
"{:} build NAS-Benchmark-API from {:}".format(
|
||||
time_string(), args.arch_nas_dataset
|
||||
)
|
||||
)
|
||||
nas_bench = API(args.arch_nas_dataset)
|
||||
if args.rand_seed < 0:
|
||||
save_dir, all_indexes, num = None, [], 500
|
||||
|
@@ -15,20 +15,38 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, get_nas_search_loaders
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from models import get_cell_based_tiny_net, get_search_spaces
|
||||
from nas_201_api import NASBench201API as API
|
||||
|
||||
|
||||
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):
|
||||
def search_func(
|
||||
xloader,
|
||||
network,
|
||||
criterion,
|
||||
scheduler,
|
||||
w_optimizer,
|
||||
a_optimizer,
|
||||
epoch_str,
|
||||
print_freq,
|
||||
logger,
|
||||
):
|
||||
data_time, batch_time = AverageMeter(), AverageMeter()
|
||||
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
|
||||
end = time.time()
|
||||
network.train()
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
|
||||
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
|
||||
xloader
|
||||
):
|
||||
scheduler.update(None, 1.0 * step / len(xloader))
|
||||
base_targets = base_targets.cuda(non_blocking=True)
|
||||
arch_targets = arch_targets.cuda(non_blocking=True)
|
||||
@@ -45,7 +63,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
base_loss.backward()
|
||||
w_optimizer.step()
|
||||
# record
|
||||
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
|
||||
base_prec1, base_prec5 = obtain_accuracy(
|
||||
logits.data, base_targets.data, topk=(1, 5)
|
||||
)
|
||||
base_losses.update(base_loss.item(), base_inputs.size(0))
|
||||
base_top1.update(base_prec1.item(), base_inputs.size(0))
|
||||
base_top5.update(base_prec5.item(), base_inputs.size(0))
|
||||
@@ -58,7 +78,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
arch_loss.backward()
|
||||
a_optimizer.step()
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -68,7 +90,11 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
end = time.time()
|
||||
|
||||
if step % print_freq == 0 or step + 1 == len(xloader):
|
||||
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
Sstr = (
|
||||
"*SEARCH* "
|
||||
+ time_string()
|
||||
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
|
||||
)
|
||||
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
|
||||
batch_time=batch_time, data_time=data_time
|
||||
)
|
||||
@@ -81,7 +107,14 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
|
||||
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr)
|
||||
# print (nn.functional.softmax(network.module.arch_parameters, dim=-1))
|
||||
# print (network.module.arch_parameters)
|
||||
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
|
||||
return (
|
||||
base_losses.avg,
|
||||
base_top1.avg,
|
||||
base_top5.avg,
|
||||
arch_losses.avg,
|
||||
arch_top1.avg,
|
||||
arch_top5.avg,
|
||||
)
|
||||
|
||||
|
||||
def get_best_arch(xloader, network, n_samples):
|
||||
@@ -99,7 +132,9 @@ def get_best_arch(xloader, network, n_samples):
|
||||
inputs, targets = next(loader_iter)
|
||||
|
||||
_, logits = network(inputs)
|
||||
val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))
|
||||
val_top1, val_top5 = obtain_accuracy(
|
||||
logits.cpu().data, targets.data, topk=(1, 5)
|
||||
)
|
||||
|
||||
valid_accs.append(val_top1.item())
|
||||
|
||||
@@ -122,7 +157,9 @@ def valid_func(xloader, network, criterion):
|
||||
_, logits = network(arch_inputs)
|
||||
arch_loss = criterion(logits, arch_targets)
|
||||
# record
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
|
||||
arch_prec1, arch_prec5 = obtain_accuracy(
|
||||
logits.data, arch_targets.data, topk=(1, 5)
|
||||
)
|
||||
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
|
||||
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
|
||||
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
|
||||
@@ -141,8 +178,12 @@ def main(xargs):
|
||||
prepare_seed(xargs.rand_seed)
|
||||
logger = prepare_logger(args)
|
||||
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
config = load_config(
|
||||
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
search_loader, _, valid_loader = get_nas_search_loaders(
|
||||
train_data,
|
||||
valid_data,
|
||||
@@ -187,9 +228,14 @@ def main(xargs):
|
||||
logger.log("search space : {:}".format(search_space))
|
||||
search_model = get_cell_based_tiny_net(model_config)
|
||||
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
|
||||
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
|
||||
search_model.get_weights(), config
|
||||
)
|
||||
a_optimizer = torch.optim.Adam(
|
||||
search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay
|
||||
search_model.get_alphas(),
|
||||
lr=xargs.arch_learning_rate,
|
||||
betas=(0.5, 0.999),
|
||||
weight_decay=xargs.arch_weight_decay,
|
||||
)
|
||||
logger.log("w-optimizer : {:}".format(w_optimizer))
|
||||
logger.log("a-optimizer : {:}".format(a_optimizer))
|
||||
@@ -204,11 +250,17 @@ def main(xargs):
|
||||
api = API(xargs.arch_nas_dataset)
|
||||
logger.log("{:} create API = {:} done".format(time_string(), api))
|
||||
|
||||
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
|
||||
last_info, model_base_path, model_best_path = (
|
||||
logger.path("info"),
|
||||
logger.path("model"),
|
||||
logger.path("best"),
|
||||
)
|
||||
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
|
||||
|
||||
if last_info.exists(): # automatically resume from previous checkpoint
|
||||
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
|
||||
)
|
||||
last_info = torch.load(last_info)
|
||||
start_epoch = last_info["epoch"]
|
||||
checkpoint = torch.load(last_info["last_checkpoint"])
|
||||
@@ -219,7 +271,9 @@ def main(xargs):
|
||||
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
|
||||
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
|
||||
logger.log(
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
|
||||
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
|
||||
last_info, start_epoch
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.log("=> do not find the last-info file : {:}".format(last_info))
|
||||
@@ -235,11 +289,24 @@ def main(xargs):
|
||||
)
|
||||
for epoch in range(start_epoch, total_epoch):
|
||||
w_scheduler.update(epoch, 0.0)
|
||||
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
|
||||
need_time = "Time Left: {:}".format(
|
||||
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
|
||||
)
|
||||
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
|
||||
logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format(epoch_str, need_time, min(w_scheduler.get_lr())))
|
||||
logger.log(
|
||||
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
|
||||
epoch_str, need_time, min(w_scheduler.get_lr())
|
||||
)
|
||||
)
|
||||
|
||||
search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 = search_func(
|
||||
(
|
||||
search_w_loss,
|
||||
search_w_top1,
|
||||
search_w_top5,
|
||||
search_a_loss,
|
||||
search_a_top1,
|
||||
search_a_top5,
|
||||
) = search_func(
|
||||
search_loader,
|
||||
network,
|
||||
criterion,
|
||||
@@ -264,7 +331,9 @@ def main(xargs):
|
||||
|
||||
genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)
|
||||
network.module.set_cal_mode("dynamic", genotype)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
|
||||
valid_loader, network, criterion
|
||||
)
|
||||
logger.log(
|
||||
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}".format(
|
||||
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype
|
||||
@@ -283,7 +352,9 @@ def main(xargs):
|
||||
valid_accuracies[epoch] = valid_a_top1
|
||||
|
||||
genotypes[epoch] = genotype
|
||||
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
|
||||
logger.log(
|
||||
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
|
||||
)
|
||||
# save checkpoint
|
||||
save_path = save_checkpoint(
|
||||
{
|
||||
@@ -321,12 +392,22 @@ def main(xargs):
|
||||
genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)
|
||||
search_time.update(time.time() - start_time)
|
||||
network.module.set_cal_mode("dynamic", genotype)
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion)
|
||||
logger.log("Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.".format(genotype, valid_a_top1))
|
||||
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
|
||||
valid_loader, network, criterion
|
||||
)
|
||||
logger.log(
|
||||
"Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.".format(
|
||||
genotype, valid_a_top1
|
||||
)
|
||||
)
|
||||
|
||||
logger.log("\n" + "-" * 100)
|
||||
# check the performance from the architecture dataset
|
||||
logger.log("SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(total_epoch, search_time.sum, genotype))
|
||||
logger.log(
|
||||
"SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(
|
||||
total_epoch, search_time.sum, genotype
|
||||
)
|
||||
)
|
||||
if api is not None:
|
||||
logger.log("{:}".format(api.query_by_arch(genotype, "200")))
|
||||
logger.close()
|
||||
@@ -345,23 +426,50 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument("--select_num", type=int, help="The number of selected architectures to evaluate.")
|
||||
parser.add_argument(
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--select_num",
|
||||
type=int,
|
||||
help="The number of selected architectures to evaluate.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--track_running_stats",
|
||||
type=int,
|
||||
choices=[0, 1],
|
||||
help="Whether use track_running_stats or not in the BN layer.",
|
||||
)
|
||||
parser.add_argument("--config_path", type=str, help="The path of the configuration.")
|
||||
# architecture leraning rate
|
||||
parser.add_argument("--arch_learning_rate", type=float, default=3e-4, help="learning rate for arch encoding")
|
||||
parser.add_argument("--arch_weight_decay", type=float, default=1e-3, help="weight decay for arch encoding")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--config_path", type=str, help="The path of the configuration."
|
||||
)
|
||||
# architecture leraning rate
|
||||
parser.add_argument(
|
||||
"--arch_learning_rate",
|
||||
type=float,
|
||||
default=3e-4,
|
||||
help="learning rate for arch encoding",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_weight_decay",
|
||||
type=float,
|
||||
default=1e-3,
|
||||
help="weight decay for arch encoding",
|
||||
)
|
||||
# log
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, help="manual seed")
|
||||
|
@@ -16,7 +16,13 @@ if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
from config_utils import load_config, dict2config, configure2str
|
||||
from datasets import get_datasets, SearchDataset
|
||||
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
|
||||
from procedures import (
|
||||
prepare_seed,
|
||||
prepare_logger,
|
||||
save_checkpoint,
|
||||
copy_checkpoint,
|
||||
get_optim_scheduler,
|
||||
)
|
||||
from utils import get_model_infos, obtain_accuracy
|
||||
from log_utils import AverageMeter, time_string, convert_secs2time
|
||||
from nas_201_api import NASBench201API as API
|
||||
@@ -34,7 +40,9 @@ class Policy(nn.Module):
|
||||
for j in range(i):
|
||||
node_str = "{:}<-{:}".format(i, j)
|
||||
self.edge2index[node_str] = len(self.edge2index)
|
||||
self.arch_parameters = nn.Parameter(1e-3 * torch.randn(len(self.edge2index), len(search_space)))
|
||||
self.arch_parameters = nn.Parameter(
|
||||
1e-3 * torch.randn(len(self.edge2index), len(search_space))
|
||||
)
|
||||
|
||||
def generate_arch(self, actions):
|
||||
genotypes = []
|
||||
@@ -74,7 +82,9 @@ class ExponentialMovingAverage(object):
|
||||
self._momentum = momentum
|
||||
|
||||
def update(self, value):
|
||||
self._numerator = self._momentum * self._numerator + (1 - self._momentum) * value
|
||||
self._numerator = (
|
||||
self._momentum * self._numerator + (1 - self._momentum) * value
|
||||
)
|
||||
self._denominator = self._momentum * self._denominator + (1 - self._momentum)
|
||||
|
||||
def value(self):
|
||||
@@ -104,13 +114,17 @@ def main(xargs, nas_bench):
|
||||
else:
|
||||
dataname = xargs.dataset
|
||||
if xargs.data_path is not None:
|
||||
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
|
||||
train_data, valid_data, xshape, class_num = get_datasets(
|
||||
xargs.dataset, xargs.data_path, -1
|
||||
)
|
||||
split_Fpath = "configs/nas-benchmark/cifar-split.txt"
|
||||
cifar_split = load_config(split_Fpath, None, None)
|
||||
train_split, valid_split = cifar_split.train, cifar_split.valid
|
||||
logger.log("Load split file from {:}".format(split_Fpath))
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, {"class_num": class_num, "xshape": xshape}, logger)
|
||||
config = load_config(
|
||||
config_path, {"class_num": class_num, "xshape": xshape}, logger
|
||||
)
|
||||
# To split data
|
||||
train_data_v2 = deepcopy(train_data)
|
||||
train_data_v2.transform = valid_data.transform
|
||||
@@ -137,7 +151,11 @@ def main(xargs, nas_bench):
|
||||
)
|
||||
)
|
||||
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
|
||||
extra_info = {"config": config, "train_loader": train_loader, "valid_loader": valid_loader}
|
||||
extra_info = {
|
||||
"config": config,
|
||||
"train_loader": train_loader,
|
||||
"valid_loader": valid_loader,
|
||||
}
|
||||
else:
|
||||
config_path = "configs/nas-benchmark/algos/R-EA.config"
|
||||
config = load_config(config_path, None, logger)
|
||||
@@ -160,7 +178,9 @@ def main(xargs, nas_bench):
|
||||
# REINFORCE
|
||||
# attempts = 0
|
||||
x_start_time = time.time()
|
||||
logger.log("Will start searching with time budget of {:} s.".format(xargs.time_budget))
|
||||
logger.log(
|
||||
"Will start searching with time budget of {:} s.".format(xargs.time_budget)
|
||||
)
|
||||
total_steps, total_costs, trace = 0, 0, []
|
||||
# for istep in range(xargs.RL_steps):
|
||||
while total_costs < xargs.time_budget:
|
||||
@@ -222,16 +242,35 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--search_space_name", type=str, help="The search space name.")
|
||||
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
|
||||
parser.add_argument("--channel", type=int, help="The number of channels.")
|
||||
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
|
||||
parser.add_argument("--learning_rate", type=float, help="The learning rate for REINFORCE.")
|
||||
# parser.add_argument('--RL_steps', type=int, help='The steps for REINFORCE.')
|
||||
parser.add_argument("--EMA_momentum", type=float, help="The momentum value for EMA.")
|
||||
parser.add_argument("--time_budget", type=int, help="The total time cost budge for searching (in seconds).")
|
||||
# log
|
||||
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
|
||||
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
|
||||
"--num_cells", type=int, help="The number of cells in one stage."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--learning_rate", type=float, help="The learning rate for REINFORCE."
|
||||
)
|
||||
# parser.add_argument('--RL_steps', type=int, help='The steps for REINFORCE.')
|
||||
parser.add_argument(
|
||||
"--EMA_momentum", type=float, help="The momentum value for EMA."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_budget",
|
||||
type=int,
|
||||
help="The total time cost budge for searching (in seconds).",
|
||||
)
|
||||
# log
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=2,
|
||||
help="number of data loading workers (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir", type=str, help="Folder to save checkpoints and log."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch_nas_dataset",
|
||||
type=str,
|
||||
help="The path to load the architecture dataset (tiny-nas-benchmark).",
|
||||
)
|
||||
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
|
||||
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
|
||||
@@ -240,7 +279,11 @@ if __name__ == "__main__":
|
||||
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
|
||||
nas_bench = None
|
||||
else:
|
||||
print("{:} build NAS-Benchmark-API from {:}".format(time_string(), args.arch_nas_dataset))
|
||||
print(
|
||||
"{:} build NAS-Benchmark-API from {:}".format(
|
||||
time_string(), args.arch_nas_dataset
|
||||
)
|
||||
)
|
||||
nas_bench = API(args.arch_nas_dataset)
|
||||
if args.rand_seed < 0:
|
||||
save_dir, all_indexes, num = None, [], 500
|
||||
|
Reference in New Issue
Block a user