Add int search space

This commit is contained in:
D-X-Y
2021-03-18 16:02:55 +08:00
parent ece6ac5f41
commit 63c8bb9bc8
67 changed files with 5150 additions and 1474 deletions

View File

@@ -13,20 +13,38 @@ if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config
from datasets import get_datasets, get_nas_search_loaders
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
get_optim_scheduler,
)
from utils import get_model_infos, obtain_accuracy
from log_utils import AverageMeter, time_string, convert_secs2time
from models import get_cell_based_tiny_net, get_search_spaces
from nas_201_api import NASBench201API as API
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):
def search_func(
xloader,
network,
criterion,
scheduler,
w_optimizer,
a_optimizer,
epoch_str,
print_freq,
logger,
):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
xloader
):
scheduler.update(None, 1.0 * step / len(xloader))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
@@ -41,7 +59,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
torch.nn.utils.clip_grad_norm_(network.parameters(), 5)
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_prec1, base_prec5 = obtain_accuracy(
logits.data, base_targets.data, topk=(1, 5)
)
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
@@ -53,7 +73,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
arch_loss.backward()
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
@@ -63,7 +85,11 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
Sstr = (
"*SEARCH* "
+ time_string()
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
)
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
batch_time=batch_time, data_time=data_time
)
@@ -74,7 +100,14 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
loss=arch_losses, top1=arch_top1, top5=arch_top5
)
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
return (
base_losses.avg,
base_top1.avg,
base_top5.avg,
arch_losses.avg,
arch_top1.avg,
arch_top5.avg,
)
def main(xargs):
@@ -86,11 +119,20 @@ def main(xargs):
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, xargs.data_path, -1
)
# config_path = 'configs/nas-benchmark/algos/GDAS.config'
config = load_config(xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger)
config = load_config(
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
)
search_loader, _, valid_loader = get_nas_search_loaders(
train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers
train_data,
valid_data,
xargs.dataset,
"configs/nas-benchmark/",
config.batch_size,
xargs.workers,
)
logger.log(
"||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}".format(
@@ -129,9 +171,14 @@ def main(xargs):
logger.log("search-model :\n{:}".format(search_model))
logger.log("model-config : {:}".format(model_config))
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
search_model.get_weights(), config
)
a_optimizer = torch.optim.Adam(
search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay
search_model.get_alphas(),
lr=xargs.arch_learning_rate,
betas=(0.5, 0.999),
weight_decay=xargs.arch_weight_decay,
)
logger.log("w-optimizer : {:}".format(w_optimizer))
logger.log("a-optimizer : {:}".format(a_optimizer))
@@ -146,11 +193,17 @@ def main(xargs):
api = API(xargs.arch_nas_dataset)
logger.log("{:} create API = {:} done".format(time_string(), api))
last_info, model_base_path, model_best_path = logger.path("info"), logger.path("model"), logger.path("best")
last_info, model_base_path, model_best_path = (
logger.path("info"),
logger.path("model"),
logger.path("best"),
)
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
if last_info.exists(): # automatically resume from previous checkpoint
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
logger.log(
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
)
last_info = torch.load(last_info)
start_epoch = last_info["epoch"]
checkpoint = torch.load(last_info["last_checkpoint"])
@@ -161,11 +214,17 @@ def main(xargs):
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
logger.log(
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
last_info, start_epoch
)
)
else:
logger.log("=> do not find the last-info file : {:}".format(last_info))
start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {-1: search_model.genotype()}
start_epoch, valid_accuracies, genotypes = (
0,
{"best": -1},
{-1: search_model.genotype()},
)
# start training
start_time, search_time, epoch_time, total_epoch = (
@@ -176,16 +235,27 @@ def main(xargs):
)
for epoch in range(start_epoch, total_epoch):
w_scheduler.update(epoch, 0.0)
need_time = "Time Left: {:}".format(convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
)
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
search_model.set_tau(xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1))
search_model.set_tau(
xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1)
)
logger.log(
"\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}".format(
epoch_str, need_time, search_model.get_tau(), min(w_scheduler.get_lr())
)
)
search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5 = search_func(
(
search_w_loss,
search_w_top1,
search_w_top5,
valid_a_loss,
valid_a_top1,
valid_a_top5,
) = search_func(
search_loader,
network,
criterion,
@@ -217,7 +287,9 @@ def main(xargs):
find_best = False
genotypes[epoch] = search_model.genotype()
logger.log("<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]))
logger.log(
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
)
# save checkpoint
save_path = save_checkpoint(
{
@@ -282,29 +354,52 @@ if __name__ == "__main__":
parser.add_argument("--search_space_name", type=str, help="The search space name.")
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, help="The number of channels.")
parser.add_argument("--num_cells", type=int, help="The number of cells in one stage.")
parser.add_argument(
"--num_cells", type=int, help="The number of cells in one stage."
)
parser.add_argument(
"--track_running_stats",
type=int,
choices=[0, 1],
help="Whether use track_running_stats or not in the BN layer.",
)
parser.add_argument("--config_path", type=str, help="The path of the configuration.")
parser.add_argument(
"--config_path", type=str, help="The path of the configuration."
)
parser.add_argument(
"--model_config",
type=str,
help="The path of the model configuration. When this arg is set, it will cover max_nodes / channels / num_cells.",
)
# architecture leraning rate
parser.add_argument("--arch_learning_rate", type=float, default=3e-4, help="learning rate for arch encoding")
parser.add_argument("--arch_weight_decay", type=float, default=1e-3, help="weight decay for arch encoding")
parser.add_argument(
"--arch_learning_rate",
type=float,
default=3e-4,
help="learning rate for arch encoding",
)
parser.add_argument(
"--arch_weight_decay",
type=float,
default=1e-3,
help="weight decay for arch encoding",
)
parser.add_argument("--tau_min", type=float, help="The minimum tau for Gumbel")
parser.add_argument("--tau_max", type=float, help="The maximum tau for Gumbel")
# log
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers (default: 2)")
parser.add_argument("--save_dir", type=str, help="Folder to save checkpoints and log.")
parser.add_argument(
"--arch_nas_dataset", type=str, help="The path to load the architecture dataset (tiny-nas-benchmark)."
"--workers",
type=int,
default=2,
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--save_dir", type=str, help="Folder to save checkpoints and log."
)
parser.add_argument(
"--arch_nas_dataset",
type=str,
help="The path to load the architecture dataset (tiny-nas-benchmark).",
)
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
parser.add_argument("--rand_seed", type=int, help="manual seed")