Exemplo n.º 1
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global student_model
    global vae_optimizer
    student_model = models.CAE()  #.to(device)
    vae_optimizer = optim.Adam(student_model.parameters(),
                               lr=0.0001)  #, amsgrad=True)
    student_model.train()
    print("len of takssample is", len(task_samples))
    accuracies = np.zeros((iterations, len(task_samples)))
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        print(randPerm, nReps)
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(
                task_samples[skill]))  #.to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 61326), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))

        if batch_idx % 1 == 0:
            values = 0
            for i in range(0, len(task_samples)):
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(61326)
                #print('MSE IS',i,mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights = helper_functions.unFlattenNetwork(
                    sample, shapes)
                loadWeights_cifar(final_weights, net)
                if i >= 6:
                    load_individual_class([i], [0, 1, 2, 3])
                else:
                    load_individual_class([i], [6, 7, 8, 9])
                Avg_Accuracy = test()
                accuracies[batch_idx, i] = Avg_Accuracy
                if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                    values = values + 1
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                break
    stage = stage + 1
    return accuracies
Exemplo n.º 2
0
    model.conv1.weight.data = torch.from_numpy(weights_to_load[0]).cuda()
    model.conv1.bias.data = torch.from_numpy(weights_to_load[1]).cuda()
    model.conv2.weight.data = torch.from_numpy(weights_to_load[2]).cuda()
    model.conv2.bias.data = torch.from_numpy(weights_to_load[3]).cuda()
    model.fc1.weight.data = torch.from_numpy(weights_to_load[4]).cuda()
    model.fc1.bias.data = torch.from_numpy(weights_to_load[5]).cuda()
    model.fc2.weight.data = torch.from_numpy(weights_to_load[6]).cuda()
    model.fc2.bias.data = torch.from_numpy(weights_to_load[7]).cuda()
    return model


print("device is ", device)
#model = models.Net().to(device)
#model_reset= models.Net().to(device)
#optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
student_model = models.CAE().to(
    device)  #nn.DataParallel(models.CAE().to(device))
teacher_model = models.CAE().to(
    device)  #nn.DataParallel(models.CAE().to(device))
vae_optimizer = optim.Adam(student_model.parameters(), lr=0.0001)
lam = 0.001
Actual_Accuracy = []
threshold_batchid = []
Actual_task_net_weights = []
# win = vis.line(
# X=np.array([0]),
# Y=np.array([0]),
# win="test",
# name='Line1',
# )
#####################################################################################################################
# For Viewing the test/train images-just for confirmation
Exemplo n.º 3
0
    Flat_input,net_shapes=helper_functions.flattenNetwork(model.cpu())
    final_skill_sample.append(Flat_input)
    if len(task_samples)==0:
        accuracies = CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,100)
    else:
        accuracies = CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,300)
    return accuracies

######################################################################################################
#                                   GLOBAL VARIABLES
######################################################################################################
#net=models.Net().to(device)
net_reset=models.Net().to(device)
Actual_Accuracy=[]
criterion = nn.CrossEntropyLoss()
student_model=models.CAE()#.to(device)
teacher_model=models.CAE().to(device)
vae_optimizer = optim.Adam(student_model.parameters(), lr = 0.0001)#, amsgrad=True)
lam = 0.001
Actual_Accuracy=[]
threshold_batchid=[]
#biased training variables
nSamples = 10
nBiased = min(nSamples,10)
trainBias = 0.5
minReps = 1
nReps = 30
stage=0
addRepsTotal = nReps*minReps
#####################################################################################################################
#                                              CAE TRAIN AND CIFAR TEST                                             #