Exemplo n.º 1
0
def eval_genomes(genomes, config):

    if torch.cuda.is_available():
        gpu = True
        print("Running on GPU!")
    else:
        gpu = False
        print("Running on CPU!")

    lrfile = open("lr.txt", "r")
    tmp = lrfile.readline().rstrip('\n')
    lr = float(tmp)
    tmp = lrfile.readline().rstrip('\n')
    delta = float(tmp)
    lrfile.close()

    #genomes_id_set = set()

    j = 0
    for genome_id, genome in genomes:
        j += 1

        #setup the network, use saved net
        #if genome_id in net_dict:
        #    net = net_dict[genome_id]
        #else:
        net = evaluate_torch.Net(config, genome)

        if gpu:
            net.cuda()

        criterion = nn.CrossEntropyLoss(
        )  # use a Classification Cross-Entropy loss
        optimizer = optim.SGD(net.parameters(), lr, momentum=0.9)

        #Evalute the fitness before trainning
        evaluate_batch_size = 100
        start = int(random() * (len(trainloader) - evaluate_batch_size))

        fit = eval_fitness(net, trainloader, evaluate_batch_size,
                           torch_batch_size, start, gpu)

        comp = open("comp.csv", "a")
        comp.write('{0},{1:3.3f},'.format(j, fit))
        print('Before: {0}: {1:3.3f}'.format(j, fit))
        ###

        losses_len = 100
        losses = np.array([0.0] * losses_len)

        #train the network
        epoch = 0
        running_loss = 0.0
        num_loss = 0
        last_running_loss = 0.0
        training = True
        train_epoch = 40
        while training and epoch < train_epoch:  # loop over the dataset multiple times
            #for epoch in range(10):
            epoch += 1

            for i, data in enumerate(trainloader, 0):
                # get the inputs
                inputs, labels = data

                # wrap them in Variable
                if gpu:
                    inputs, labels = Variable(inputs).cuda(), Variable(
                        labels).cuda()
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = net(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                # record the losses
                running_loss += loss.data.item()
                num_loss += 1

                # print statistics
                if i % 50 == 49:  # print every 200 mini-batches
                    print('[%d, %4d] loss: %.3f' %
                          (epoch, i + 1, running_loss / (i + 1)))

            print("Epoch {0:d}, Average loss:{1:.5f}".format(
                epoch, running_loss / num_loss))

            if ((abs(last_running_loss - running_loss) / num_loss < delta)
                    or (last_running_loss != 0) and
                (running_loss > last_running_loss)):
                training = False
                print("Stop trainning")
                break
                #print(abs(last_running_loss - running_loss))
            last_running_loss = running_loss
            running_loss = 0.0
            num_loss = 0
        print('Finished Training')

        #evaluate the fitness

        net.write_back_parameters(genome)

        evaluate_batch_size = 0
        start = 0
        fitness_evaluate = eval_fitness(net, trainloader, evaluate_batch_size,
                                        torch_batch_size, start, gpu)

        test_batch_size = 0
        start = 0
        fitness_test = eval_fitness(net, testloader, test_batch_size,
                                    torch_batch_size, start, gpu)

        genome.fitness = fitness_evaluate
        print('After: {0:3.3f}, {1:3.3f}, {2}'.format(fitness_evaluate,
                                                      fitness_test, genome_id))
        comp.write('{0:3.3f},{1:3.3f},{2},{3:3.6f},{4:3.6f}\n'.format(
            fitness_evaluate, fitness_test, genome_id, lr, delta))
        comp.close
Exemplo n.º 2
0
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(False))

# Run until a solution is found.
winner = p.run(eval_genomes)

# Run for up to 300 generations.
# pe = neat.ThreadedEvaluator(4, eval_genomes)
# winner = p.run(pe.evaluate)
# pe.stop()

# Display the winning genome.
#print('\nBest genome:\n{!s}'.format(winner))

net = evaluate_torch.Net(config, winner)
if gpu:
    net.cuda()

final_train_epoch = 40

for epoch in range(final_train_epoch):

    # train the winner for some epoche
    lrfile = open("lr.txt", "r")
    tmp = lrfile.readline().rstrip('\n')
    lr = float(tmp)
    lrfile.close()

    criterion = nn.CrossEntropyLoss(
    )  # use a Classification Cross-Entropy loss
Exemplo n.º 3
0
def eval_genomes(genomes, config):

    j = 0
    global cur_generation
    cur_generation += 1
    for genome_id, genome in genomes:
        j += 1
        if has_evaled.__contains__(genome_id):
            print('{0}: {1:3.3f} {2}'.format(j,genome.fitness,genome_id))
            continue
        else:
            has_evaled[genome_id] = 1
#        if genome.nodes.__contains__(301):
#            print('genome in_nodes ', genome.nodes[301].in_nodes)
#        else: continue             
        #evaluate_batch_size = 1000
        #evaluate_batch_size = 80*12 * 2
        evaluate_batch_size = 400 * 2
        
        hit_count = 0
        #start = int(random() * (len(trainloader) - evaluate_batch_size * torch_batch_size))
        start = 0
        i = 0
        #"""
        state = True

        net = evaluate_torch.Net(config, genome)        
        
        if random() > 1.0:
            print('prune')
            if random() > 0.5:
                del_node, del_connects, state = net.prune_one_filter()
            
            else:
                del_node, del_connects, state = net.prune_fc_weight()
        else: state = False                                        
        state_dict = fine_tune.retrain(net.state_dict(), 2*int(state) + 20 + int(cur_generation/5) )
        
        state_key = [ k for k,v in state_dict.items()]
         
        for idx, p in enumerate(net.parameters()):
            p.data = state_dict[ state_key[idx] ]
            #state_dict[ state_key[i] ] = p.data
            #print(p.data == state_dict[ state_key[idx] ])        
        
#        for idx, p in enumerate(net.parameters()):
#            
#            print(p.data == state_dict[ state_key[idx] ])  
#            if idx >= 0:
#                break
        net.write_back_parameters(genome)
    
    #fine_tune.retrain(net.state_dict(), 0)
        #"""
        for num, data in enumerate(testloader, start):
            i += 1
            # 得到输入数据
            inputs, labels = data

            # 包装数据
            inputs, labels = Variable(inputs), Variable(labels)
            try:
                        
                net = evaluate_torch.Net(config, genome)
                if i == 1:
#                    for idx, p in enumerate(net.parameters()):
                        #p.data = state_dict[ state_key[idx] ]
                        #state_dict[ state_key[i] ] = p.data
#                        if len(p.data.size()) !=2:
#                            continue
                        #print(' cur_data ')
                        #print(p.data[:2])
                        
                        #print('true_dict ')
                        #print(state_dict[ state_key[idx] ][:2])
                        
#                        print(p.data[:] == state_dict[ state_key[idx] ][:])  
#                        print('')
#                        if idx >= 0:
#                            
#                            break
                    pass
                    #fine_tune.retrain(net.state_dict(), 0)
                #fine_tune.retrain(net.state_dict(), 10)
                
                #net.write_back_parameters(genome)
                
#                if state == True and random() > 0.97:
#                    
#                    if random() > 0.5:
#                        del_node, del_connects, state = net.prune_one_filter()
#                        
#                        #if state == True and random() > 0.97:                
#                            #print('--prune a filter ---')
#                            #del_node, del_connects, state = net.prune_fc_weight()        
#    #                        if state == True:
#                                #print('--prune a fc weight ---')
#    #                        else: print('cannot prune a fc weight')                
#    #                    else: print('prune a filter failed') 
#                    else:
#                        del_node, del_connects, state = net.prune_fc_weight()
                        
                outputs = net.forward(inputs)
                
                #print(net)

                #_, predicted = torch.max(outputs.data, 1)
                                
                max_prob, predicted = torch.max(outputs.data, 1)
                #print(predicted)                        
                hit_count += (predicted == labels).sum().item()

            except Exception as e:
                print(e)
                genome.fitness = 0
            if (i == evaluate_batch_size - 1):
                break

        genome.fitness = hit_count / (evaluate_batch_size * torch_batch_size)
        #genome.fitness = val
        print('{0}: {1:3.3f} {2}'.format(j,genome.fitness,genome_id))
def eval_genome(genome_id, genome, config):
    if has_evaled.__contains__(genome_id):
        #print('{0}: {1:3.3f} {2}'.format(j,genome.fitness,genome_id))
        return genome
        #return genome.fitness
    else:
        has_evaled[genome_id] = 1
    evaluate_batch_size = 400 * 2
    hit_count = 0
    start = 0
    i = 0
    #return 0.45
    state = True

    net = evaluate_torch.Net(config, genome)

    if random() > 1.0:
        print('prune')
        if random() > 0.5:
            del_node, del_connects, state = net.prune_one_filter()

        else:
            del_node, del_connects, state = net.prune_fc_weight()

    else:
        state = False

    state_dict = fine_tune.retrain(
        net.state_dict(), 2 * int(state) + 20 + int(cur_generation / 5))

    state_key = [k for k, v in state_dict.items()]

    for idx, p in enumerate(net.parameters()):
        p.data = state_dict[state_key[idx]]

    net.write_back_parameters(genome)

    for num, data in enumerate(testloader, start):
        i += 1
        # 得到输入数据
        inputs, labels = data

        # 包装数据
        inputs, labels = Variable(inputs), Variable(labels)
        try:

            net = evaluate_torch.Net(config, genome)

            outputs = net.forward(inputs)

            #print(net)
            #_, predicted = torch.max(outputs.data, 1)

            max_prob, predicted = torch.max(outputs.data, 1)
            #print(predicted)
            hit_count += (predicted == labels).sum().item()

        except Exception as e:
            print(e)
            genome.fitness = 0
        if (i == evaluate_batch_size - 1):
            break

    genome.fitness = hit_count / (evaluate_batch_size * torch_batch_size)
    return genome
Exemplo n.º 5
0
def eval_genomes(genomes, config):

    global gpu
    global first_time
    global best_on_test_set

    best_every_generation = list()

    if torch.cuda.is_available():
        gpu = True
        print("Running on GPU!")
    else:
        gpu = False
        print("Running on CPU!")

    """
    lrfile= open("lr.txt", "r")
    tmp = lrfile.readline().rstrip('\n')
    lr = float(tmp)
    tmp = lrfile.readline().rstrip('\n')
    delta = float(tmp)
    tmp = lrfile.readline().rstrip('\n')
    max_epoch = int(tmp)
    lrfile.close()
    """

    #genomes_id_set = set()

    j = 0
    for genome_id, genome in genomes:
        j += 1

        #setup the network, use saved net
        #if genome_id in net_dict:
        #    net = net_dict[genome_id]
        #else:

        #net = evaluate_torch.Net(config, genome)

        # load lr and epoch
        lrfile = open("lr.txt", "r")
        tmp = lrfile.readline().rstrip('\n')
        lr = float(tmp)
        tmp = lrfile.readline().rstrip('\n')
        max_epoch = int(tmp)
        lrfile.close()

        print(first_time)
        if first_time:
            net = evaluate_torch.Net(config, genome, False)
        else:
            net = evaluate_torch.Net(config, genome, True)

        if gpu:
            net.cuda()

        criterion = nn.CrossEntropyLoss()  # use a Classification Cross-Entropy loss
        optimizer = optim.SGD(net.parameters(), lr, momentum=0.9, weight_decay=5e-4)

        #Evalute the fitness before trainning
        evaluate_batch_size = 0
        start = 0

        fit = eval_fitness(net, evaluateloader, evaluate_batch_size, torch_batch_size, start, gpu)

        comp = open("comp.csv", "a")
        comp.write('{0},{1:3.3f},'.format(j, fit))
        print('Before: {0}: {1:3.3f}'.format(j, fit))

        #train the network
        epoch = 0
        lr_total_reduce_times = 3  # times lr reduce to its 0.1
        lr_reduce_times = 0  # current times lr reduced

        precision_count = 0
        precision_count_max = 5
        best_precision = 0.0

        evaluate_and_print_interval = 10

        training = True
        train_epoch = max_epoch
        while training and epoch < train_epoch:  # loop over the dataset multiple times
        #for epoch in range(10):
            net.train()
            epoch += 1
            running_loss = 0.0
            correct = 0
            total = 0

            #cur_lr = get_adjusted_lr(epoch)
            #optimizer = optim.SGD(net.parameters(), cur_lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
            #print('Epoch {0}: {1:1.6f}'.format(epoch, cur_lr))

            print('Epoch: %d' % epoch)

            if (train_epoch > lr_total_reduce_times) and (epoch % (train_epoch // lr_total_reduce_times) == 0):
                lr /= 10
                optimizer = optim.SGD(net.parameters(), lr, momentum=0.9)
                print("Learning rate set to: {0:1.8f}".format(lr))

            mixup = True  # If use mixup or not

            for i, data in enumerate(trainloader, 0):
                # get the inputs
                inputs, labels = data

                # wrap them in Variable
                if gpu:
                    inputs, labels = inputs.to("cuda"), labels.to("cuda")
                else:
                    inputs, labels = inputs.to("cpu"), labels.to("cpu")

                # Mixup
                if mixup:
                    inputs, labels_a, labels_b, lam = mixup_data(inputs, labels, 1.)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = net(inputs)
                if mixup:
                    loss = mixup_criterion(criterion, outputs, labels_a, labels_b, lam)
                else:
                    loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                # record the losses
                running_loss += loss.data.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()
                #num_loss += 1

                # print statistics
                if i % 100 == 0:  # print every 100 mini-batches
                    print(i, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                          % (running_loss / (i + 1), 100. * correct / total, correct, total))

                # print statistics
                #if i % 50 == 49:  # print every 200 mini-batches
                    #print('[%d, %4d] loss: %.3f' % (epoch, i + 1, running_loss / i))
            """
            print("Epoch {0:d}, Average loss:{1:.5f}".format(epoch, running_loss / num_loss))
            if (delta > 0):
                if ((abs(last_running_loss - running_loss)/num_loss < delta) or
                    (last_running_loss != 0) and (running_loss > last_running_loss)):
                    training = False
                    print("Stop trainning")
                    break;
                #print(abs(last_running_loss - running_loss))

            last_running_loss = running_loss
            running_loss = 0.0
            num_loss = 0
            """
            # print precision every 10 epoch

            if epoch % evaluate_and_print_interval == (evaluate_and_print_interval - 1):
                fitness_evaluate = eval_fitness(net, evaluateloader, 0, torch_batch_size, 0, gpu)
                fitness_test = eval_fitness(net, testloader, 0, torch_batch_size, 0, gpu)
                print('Epoch {3:d}: {0:3.3f}, {1:3.3f}, {2}'.format(fitness_evaluate, fitness_test, genome_id, epoch))
                ep = open("epoch.csv", "a")
                ep.write("{0}, {1:d}, {2:3.3f}, {3:3.3f}, {4:3.6f}\n".format(genome_id, epoch, fitness_evaluate, fitness_test, lr))
                ep.close()
            # reload run parameters

        print('Finished Training')

        # Write back parameters to NEAT
        # net.write_back_parameters(genome)

        fitness_train = eval_fitness(net, trainloader, 0, torch_batch_size, 0, gpu)
        fitness_evaluate = eval_fitness(net, evaluateloader, 0, torch_batch_size, 0, gpu)
        fitness_test = eval_fitness(net, testloader, 0, torch_batch_size, 0, gpu)

        """

        print("Write parameters to genome.")
        net.write_back_parameters(genome)

        fitness_train = eval_fitness(net, trainloader, 0, torch_batch_size, 0, gpu)
        fitness_evaluate = eval_fitness(net, evaluateloader, 0, torch_batch_size, 0, gpu)
        fitness_test = eval_fitness(net, testloader, 0, torch_batch_size, 0, gpu)
        print('After write back: {0:3.3f}, {1:3.3f}, {2:3.3f}, {3}\n'.format(fitness_train, fitness_evaluate, fitness_test, genome_id))

        
        for module in net.children():
            for block in module:
                if isinstance(block, evaluate_torch.cnn_block):
                    #print(block.conv1.bias.data)
                    print(block.conv2.bias.data)
                elif isinstance(block, evaluate_torch.fc_block):
                    print(block.fc.bias.data)
        print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
        
        print("Create net from genome.")
        net2 = evaluate_torch.Net(config, genome, True)
       
        for module in net2.children():
            for block in module:
                if isinstance(block, evaluate_torch.cnn_block):
                    #print(block.conv1.bias.data)
                    print(block.conv2.bias.data)
                elif isinstance(block, evaluate_torch.fc_block):
                    print(block.fc.bias.data)
        
        if gpu:
            net2.cuda()

        fitness_train = eval_fitness(net2, trainloader, 0, torch_batch_size, 0, gpu)
        fitness_evaluate = eval_fitness(net2, evaluateloader, 0, torch_batch_size, 0, gpu)
        fitness_test = eval_fitness(net2, testloader, 0, torch_batch_size, 0, gpu)

        print('After reloaded: {0:3.3f}, {1:3.3f}, {2:3.3f}, {3}\n'.format(fitness_train, fitness_evaluate, fitness_test, genome_id))
        """

        #save the best net on test set
        if fitness_test > best_on_test_set:
            best_on_test_set = fitness_test
            torch.save(net, "best.pkl")

        best_every_generation.append((fitness_train, fitness_evaluate, fitness_test, genome_id, lr))

        genome.fitness = fitness_evaluate
        print('After: {0:3.3f}, {1:3.3f}, {2:3.3f}, {3}\n'.format(fitness_train, fitness_evaluate, fitness_test, genome_id))
        comp.write('{0:3.3f},{1:3.3f},{2:3.3f},{3},{4:3.6f}\n'.format(fitness_train, fitness_evaluate, fitness_test, genome_id, lr))
        comp.close
    if first_time:
        first_time = False

    best = 0.0
    best_id = 0
    for i in range(len(best_every_generation)):
        if best_every_generation[i][0] > best:
            best = best_every_generation[i][0]
            best_id = i

    res = open("result.csv", "a")
    res.write('{0:3.3f},{1:3.3f},{2:3.3f},{3},{4:3.6f}\n'.format(best_every_generation[best_id][0],
                                                                 best_every_generation[best_id][1],
                                                                 best_every_generation[best_id][2],
                                                                 best_every_generation[best_id][3],
                                                                 best_every_generation[best_id][4]))
    res.close()