Ejemplo n.º 1
0
def random_search():
    # define fitness and individual type
    creator.create('FitnessMax', base.Fitness, weights=(1, ))
    creator.create('Individual', Chromosome, fitness=creator.FitnessMax)

    # create individuals (genotype space)
    toolbox = Toolbox()
    toolbox.register('gene', Gene, pset, args.head_length)
    toolbox.register('individual', creator.Individual, toolbox.gene,
                     args.num_genes)
    toolbox.register('population', tools.initRepeat, list, toolbox.individual)

    # translate individuals into computation graph (phenotype space)
    toolbox.register('comp_graph', cell_graph.generate_comp_graph)

    # evaluation function
    def evaluate(indv):
        _, comp_graph = toolbox.comp_graph(indv)
        net = build_model(comp_graph)
        acc = train_model(net)
        fit = acc[1].item()
        return fit,

    toolbox.register('evaluate', evaluate)

    # population size and best individual
    pop = toolbox.population(n=args.pop_size)
    hof = tools.HallOfFame(args.hof)

    # simple_random algorithm
    pop, log = simple_random(pop, toolbox, hall_of_fame=hof, verbose=False)

    # save and draw best individual
    for i, best in enumerate(hof):
        agraph, comp_graph = cell_graph.generate_comp_graph(best)
        cell_graph.save_graph(agraph,
                              args.dir + '/4-3-seed-4/best/indv_{}'.format(i))
        cell_graph.draw_graph(agraph,
                              args.dir + '/4-3-seed-4/best/indv_{}'.format(i))
        with open(args.dir + '/4-3-seed-4/best/indv_{}/code.pkl'.format(i),
                  'wb') as f:
            pickle.dump(repr(best), f)

    # save population graphs
    for i, p in enumerate(pop):
        agraph, comp_graph = cell_graph.generate_comp_graph(p)
        cell_graph.save_graph(agraph,
                              args.dir + '/4-3-seed-4/pop/indv_{}'.format(i))
        cell_graph.draw_graph(agraph,
                              args.dir + '/4-3-seed-4/pop/indv_{}'.format(i))
        with open(args.dir + '/4-3-seed-4/pop/indv_{}/code.pkl'.format(i),
                  'wb') as f:
            pickle.dump(repr(p), f)

    # save accuracy record
    with open(args.dir + '/4-3-seed-4/best/record.pkl', 'wb') as f:
        pickle.dump(log, f)
Ejemplo n.º 2
0
def random_sample():
    # define fitness and individual type
    creator.create('FitnessMax', base.Fitness, weights=(1, ))
    creator.create('Individual', Chromosome, fitness=creator.FitnessMax)

    # create individuals (genotype space)
    toolbox = Toolbox()
    toolbox.register('gene', Gene, pset, args.head_length)
    toolbox.register('individual', creator.Individual, toolbox.gene,
                     args.num_genes)
    toolbox.register('population', tools.initRepeat, list, toolbox.individual)

    # translate individuals into computation graph (phenotype space)
    toolbox.register('comp_graph', cell_graph.generate_comp_graph)

    # sample population
    pop = toolbox.population(n=args.pop_size)

    # save population graphs
    for i, p in enumerate(pop):
        agraph, comp_graph = cell_graph.generate_comp_graph(p)
        cell_graph.save_graph(agraph, args.dir + '/indv_{}'.format(i))
        cell_graph.draw_graph(agraph, args.dir + '/indv_{}'.format(i))
        with open(args.dir + '/indv_{}/code.pkl'.format(i), 'wb') as f:
            pickle.dump(repr(p), f)
Ejemplo n.º 3
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    graph = [AGraph(g) for g in glob.glob('comp_graphs/new/*.dot')]
    _, comp_graph = cell_graph.generate_comp_graph(graph)
    conf = arch_config(comp_graph=comp_graph,
                       channels=args.init_channels,
                       repeat_list=args.layers,
                       classes=CIFAR_CLASSES)
    model = get_net(conf)
    model = model.cuda()

    state_dict = torch.load(args.model_path)['state_dict']
    if isinstance(state_dict, torch.nn.DataParallel):
        state_dict = state_dict.module
    model.load_state_dict(state_dict)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data,
                             train=False,
                             download=True,
                             transform=test_transform)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
Ejemplo n.º 4
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    graph = [AGraph(g) for g in glob.glob('comp_graphs/new/*.dot')]
    _, comp_graph = cell_graph.generate_comp_graph(graph)
    conf = arch_config(comp_graph=comp_graph,
                       channels=args.init_channels,
                       repeat_list=args.layers,
                       classes=CIFAR_CLASSES)
    model = get_net(conf)
    model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    train_data = dset.CIFAR10(root=args.data,
                              train=True,
                              download=True,
                              transform=train_transform)

    num_train = len(train_data)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        shuffle=False,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True,
        num_workers=2)

    valid_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        shuffle=False,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(
            indices[split:num_train]),
        pin_memory=True,
        num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    best_acc = 0
    for epoch in range(args.epochs):
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc, valid_obj = infer(valid_queue, model, criterion)
        logging.info('valid_acc %f', valid_acc)

        is_best = False
        if valid_acc > best_acc:
            best_acc = valid_acc
            is_best = True

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)

        scheduler.step()
Ejemplo n.º 5
0
def search_model():
    # define fitness and individual type
    creator.create('FitnessMax', base.Fitness, weights=(1,))
    creator.create('Individual', Chromosome, fitness=creator.FitnessMax)

    # create individuals (genotype space)
    toolbox = Toolbox()
    toolbox.register('gene', Gene, pset, args.head_length)
    toolbox.register('individual', creator.Individual, toolbox.gene, args.num_genes)
    toolbox.register('population', tools.initRepeat, list, toolbox.individual)

    # translate individuals into computation graph (phenotype space)
    toolbox.register('comp_graph', cell_graph.generate_comp_graph)

    # evaluation function
    def evaluate(indv):
        _, comp_graph = toolbox.comp_graph(indv)
        net = build_model(comp_graph)
        acc = train_model(net)
        #size = count_parameters(net)
        fit = acc[1].item() #+ 1 / expit(size)
        return fit,

    # evaluate and select
    toolbox.register('evaluate', evaluate)
    toolbox.register('select', tools.selRoulette)

    # recombine and mutate
    #toolbox.register('cx_1p', cross_one_point, pb=args.cx_pb)
    toolbox.register('cx_gene', cross_gene, pb=args.cx_pb[0])
    toolbox.register('cx_2p', cross_two_point, pb=args.cx_pb[1])
    toolbox.register('mut_uniform', mutate_uniform, pset=pset, pb=args.mu_pb)
    toolbox.register('mut_invert_program', invert_program, pb=args.invert_pb)
    #toolbox.register('mut_invert_cell', invert_cell, pb=args.invert_pb)
    toolbox.register('mut_transpose_program', transpose_program, pb=args.transpose_pb)
    toolbox.register('mut_transpose_cell', transpose_cell, pb=args.transpose_pb)

    # evolution statistics
    stats = tools.Statistics(key=lambda ind: ind.fitness.values[0])
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    # population size and best individual (hall of fame)
    pop = toolbox.population(n=args.pop_size)
    hof = tools.HallOfFame(args.hof)

    # call gep_simple evolutionary algorithm
    pop, log = gep_simple(pop,
                          toolbox,
                          n_generations=args.num_gen,
                          n_elites=args.elites,
                          stats=stats,
                          hall_of_fame=hof,
                          verbose=True)
    print('\n', log)

    # save and draw best individuals graphs
    for i, best in enumerate(hof):
        graph, _ = cell_graph.generate_comp_graph(best)
        cell_graph.save_graph(graph, args.path+'/best/indv_{}'.format(i))
        cell_graph.draw_graph(graph, args.path+'/best/indv_{}'.format(i))

    # save population graphs
    for i, p in enumerate(pop):
        graph, _ = cell_graph.generate_comp_graph(p)
        cell_graph.save_graph(graph, args.path+'/pop/indv_{}'.format(i))

    # save stats record
    with open(args.path+'/best/stats.pkl', 'wb') as f:
        pickle.dump(log, f,)