Upgrade NAS-Bench-201 to APIv1.3/FILEv1.1

This commit is contained in:
D-X-Y
2020-03-15 22:50:17 +11:00
parent c53a9ce407
commit fb76814369
20 changed files with 259 additions and 75 deletions

View File

@@ -1,36 +1,84 @@
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 #
########################################################
# python exps/NAS-Bench-201/test-weights.py --api_path $HOME/.torch/NAS-Bench-201-v1_0-e61699.pth
########################################################
###############################################################################################
# Before run these commands, the files must be properly put.
# python exps/NAS-Bench-201/test-weights.py --base_path $HOME/.torch/NAS-Bench-201-v1_0-e61699
# python exps/NAS-Bench-201/test-weights.py --base_path $HOME/.torch/NAS-Bench-201-v1_1-096897
###############################################################################################
import os, sys, time, glob, random, argparse
import numpy as np
import torch
import torch.nn as nn
from pathlib import Path
from tqdm import tqdm
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from nas_201_api import NASBench201API as API
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from nas_201_api import NASBench201API as API
from log_utils import time_string
from models import get_cell_based_tiny_net
from utils import weight_watcher
def main(meta_file, weight_dir, save_dir):
import pdb;
pdb.set_trace()
def get_cor(A, B):
return float(np.corrcoef(A, B)[0,1])
def evaluate(api, weight_dir, data: str, use_12epochs_result: bool, valid_or_test: bool):
norms, accs = [], []
for idx in tqdm(range(len(api))):
info = api.get_more_info(idx, data, use_12epochs_result=use_12epochs_result, is_random=False)
if valid_or_test:
accs.append(info['valid-accuracy'])
else:
accs.append(info['test-accuracy'])
config = api.get_net_config(idx, data)
net = get_cell_based_tiny_net(config)
api.reload(weight_dir, idx)
params = api.get_net_param(idx, data, None)
cur_norms = []
for seed, param in params.items():
net.load_state_dict(param)
_, summary = weight_watcher.analyze(net, alphas=False)
cur_norms.append( summary['lognorm'] )
norms.append( float(np.mean(cur_norms)) )
api.clear_params(idx, use_12epochs_result)
correlation = get_cor(norms, accs)
print('For {:} with {:} epochs on {:} : the correlation is {:}'.format(data, 12 if use_12epochs_result else 200, 'valid' if valid_or_test else 'test', correlation))
def main(meta_file: str, weight_dir, save_dir):
api = API(meta_file)
datasets = ['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120']
print(time_string() + ' ' + '='*50)
for data in datasets:
nums = api.statistics(data, True)
total = sum([k*v for k, v in nums.items()])
print('Using 012 epochs, trained on {:20s} : {:} trials in total ({:}).'.format(data, total, nums))
print(time_string() + ' ' + '='*50)
for data in datasets:
nums = api.statistics(data, False)
total = sum([k*v for k, v in nums.items()])
print('Using 200 epochs, trained on {:20s} : {:} trials in total ({:}).'.format(data, total, nums))
print(time_string() + ' ' + '='*50)
evaluate(api, weight_dir, 'cifar10-valid', False, True)
print('{:} finish this test.'.format(time_string()))
if __name__ == '__main__':
parser = argparse.ArgumentParser("Analysis of NAS-Bench-201")
parser.add_argument('--save_dir', type=str, default='./output/search-cell-nas-bench-201/visuals', help='The base-name of folder to save checkpoints and log.')
parser.add_argument('--api_path', type=str, default=None, help='The path to the NAS-Bench-201 benchmark file.')
parser.add_argument('--weight_dir', type=str, default=None, help='The directory path to the weights of every NAS-Bench-201 architecture.')
parser.add_argument('--base_path', type=str, default=None, help='The path to the NAS-Bench-201 benchmark file and weight dir.')
args = parser.parse_args()
save_dir = Path(args.save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
meta_file = Path(args.api_path)
weight_dir = Path(args.weight_dir)
meta_file = Path(args.base_path + '.pth')
weight_dir = Path(args.base_path + '-archive')
assert meta_file.exists(), 'invalid path for api : {:}'.format(meta_file)
assert weight_dir.exists() and weight_dir.is_dir(), 'invalid path for weight dir : {:}'.format(weight_dir)
main(meta_file, weight_dir, save_dir)
main(str(meta_file), weight_dir, save_dir)

View File

@@ -50,10 +50,11 @@ def config2structure_func(max_nodes):
class MyWorker(Worker):
def __init__(self, *args, convert_func=None, nas_bench=None, time_budget=None, **kwargs):
def __init__(self, *args, convert_func=None, dataname=None, nas_bench=None, time_budget=None, **kwargs):
super().__init__(*args, **kwargs)
self.convert_func = convert_func
self.nas_bench = nas_bench
self._dataname = dataname
self._nas_bench = nas_bench
self.time_budget = time_budget
self.seen_archs = []
self.sim_cost_time = 0
@@ -64,7 +65,7 @@ class MyWorker(Worker):
assert len(self.seen_archs) > 0
best_index, best_acc = -1, None
for arch_index in self.seen_archs:
info = self.nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)
info = self._nas_bench.get_more_info(arch_index, self._dataname, None, True, True)
vacc = info['valid-accuracy']
if best_acc is None or best_acc < vacc:
best_acc = vacc
@@ -75,8 +76,8 @@ class MyWorker(Worker):
def compute(self, config, budget, **kwargs):
start_time = time.time()
structure = self.convert_func( config )
arch_index = self.nas_bench.query_index_by_arch( structure )
info = self.nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)
arch_index = self._nas_bench.query_index_by_arch( structure )
info = self._nas_bench.get_more_info(arch_index, self._dataname, None, True, True)
cur_time = info['train-all-time'] + info['valid-per-time']
cur_vacc = info['valid-accuracy']
self.real_cost_time += (time.time() - start_time)
@@ -106,7 +107,10 @@ def main(xargs, nas_bench):
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
if xargs.dataset == 'cifar10':
dataname = 'cifar10-valid'
else:
dataname = xargs.dataset
if xargs.data_path is not None:
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
split_Fpath = 'configs/nas-benchmark/cifar-split.txt'
@@ -148,7 +152,7 @@ def main(xargs, nas_bench):
#logger.log('{:} Create NAS-BENCH-API DONE'.format(time_string()))
workers = []
for i in range(num_workers):
w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, nas_bench=nas_bench, time_budget=xargs.time_budget, run_id=hb_run_id, id=i)
w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataname=dataname, nas_bench=nas_bench, time_budget=xargs.time_budget, run_id=hb_run_id, id=i)
w.run(background=True)
workers.append(w)

View File

@@ -28,7 +28,10 @@ def main(xargs, nas_bench):
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
if xargs.dataset == 'cifar10':
dataname = 'cifar10-valid'
else:
dataname = xargs.dataset
if xargs.data_path is not None:
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
split_Fpath = 'configs/nas-benchmark/cifar-split.txt'
@@ -62,7 +65,7 @@ def main(xargs, nas_bench):
#for idx in range(xargs.random_num):
while total_time_cost < xargs.time_budget:
arch = random_arch()
accuracy, cost_time = train_and_eval(arch, nas_bench, extra_info)
accuracy, cost_time = train_and_eval(arch, nas_bench, extra_info, dataname)
if total_time_cost + cost_time > xargs.time_budget: break
else: total_time_cost += cost_time
history.append(arch)

View File

@@ -33,19 +33,21 @@ class Model(object):
# This function is to mimic the training and evaluatinig procedure for a single architecture `arch`.
# The time_cost is calculated as the total training time for a few (e.g., 12 epochs) plus the evaluation time for one epoch.
# For use_converged_LR = True, the architecture is trained for 12 epochs, with LR being decaded from 0.1 to 0.
# For use_012_epoch_training = True, the architecture is trained for 12 epochs, with LR being decaded from 0.1 to 0.
# In this case, the LR schedular is converged.
# For use_converged_LR = False, the architecture is planed to be trained for 200 epochs, but we early stop its procedure.
# For use_012_epoch_training = False, the architecture is planed to be trained for 200 epochs, but we early stop its procedure.
#
def train_and_eval(arch, nas_bench, extra_info, dataname='cifar10-valid', use_converged_LR=True):
if use_converged_LR and nas_bench is not None:
def train_and_eval(arch, nas_bench, extra_info, dataname='cifar10-valid', use_012_epoch_training=True):
if use_012_epoch_training and nas_bench is not None:
arch_index = nas_bench.query_index_by_arch( arch )
assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)
info = nas_bench.get_more_info(arch_index, dataname, None, True)
valid_acc, time_cost = info['valid-accuracy'], info['train-all-time'] + info['valid-per-time']
#_, valid_acc = info.get_metrics('cifar10-valid', 'x-valid' , 25, True) # use the validation accuracy after 25 training epochs
elif not use_converged_LR and nas_bench is not None:
# Please use `use_converged_LR=False` for cifar10 only.
elif not use_012_epoch_training and nas_bench is not None:
# Please contact me if you want to use the following logic, because it has some potential issues.
# Please use `use_012_epoch_training=False` for cifar10 only.
# It did return values for cifar100 and ImageNet16-120, but it has some potential issues. (Please email me for more details)
arch_index, nepoch = nas_bench.query_index_by_arch( arch ), 25
assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)
@@ -64,7 +66,7 @@ def train_and_eval(arch, nas_bench, extra_info, dataname='cifar10-valid', use_co
try:
valid_acc, time_cost = info['valid-accuracy'], estimated_train_cost + estimated_valid_cost
except:
valid_acc, time_cost = info['est-valid-accuracy'], estimated_train_cost + estimated_valid_cost
valid_acc, time_cost = info['valtest-accuracy'], estimated_train_cost + estimated_valid_cost
else:
# train a model from scratch.
raise ValueError('NOT IMPLEMENT YET')
@@ -127,7 +129,7 @@ def regularized_evolution(cycles, population_size, sample_size, time_budget, ran
while len(population) < population_size:
model = Model()
model.arch = random_arch()
model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info)
model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info, dataname)
population.append(model)
history.append(model)
total_time_cost += time_cost
@@ -152,7 +154,7 @@ def regularized_evolution(cycles, population_size, sample_size, time_budget, ran
child = Model()
child.arch = mutate_arch(parent.arch)
total_time_cost += time.time() - start_time
child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info)
child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info, dataname)
if total_time_cost + time_cost > time_budget: # return
return history, total_time_cost
else:
@@ -174,7 +176,6 @@ def main(xargs, nas_bench):
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
if xargs.dataset == 'cifar10':
dataname = 'cifar10-valid'
else:

View File

@@ -98,7 +98,10 @@ def main(xargs, nas_bench):
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
if xargs.dataset == 'cifar10':
dataname = 'cifar10-valid'
else:
dataname = xargs.dataset
if xargs.data_path is not None:
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
split_Fpath = 'configs/nas-benchmark/cifar-split.txt'
@@ -148,7 +151,7 @@ def main(xargs, nas_bench):
start_time = time.time()
log_prob, action = select_action( policy )
arch = policy.generate_arch( action )
reward, cost_time = train_and_eval(arch, nas_bench, extra_info)
reward, cost_time = train_and_eval(arch, nas_bench, extra_info, dataname)
trace.append( (reward, arch) )
# accumulate time
if total_costs + cost_time < xargs.time_budget: