예제 #1
0
def FLATTEN_WEIGHTS_TRAIN_VAE(task_samples,model):
    final_skill_sample=[]
    Flat_input,net_shapes=helper_functions.flattenNetwork(model.cpu())
    final_skill_sample.append(Flat_input)
    if len(task_samples) < 7:
        CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,200)
    else:
        CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,200)
예제 #2
0
def FLATTEN_WEIGHTS_TRAIN_VAE(task_samples,model):
    final_skill_sample=[]
    Flat_input,net_shapes=helper_functions.flattenNetwork(model.cpu())
    final_skill_sample.append(Flat_input)
    Actual_task_net_weights.append(Flat_input)
    if len(task_samples)==0:
        accuracies = CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,100)
    else:
        accuracies = CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,300)
    return accuracies
def FLATTEN_WEIGHTS_TRAIN_VAE(task_samples, model):
    final_skill_sample = []
    Flat_input, net_shapes = helper_functions.flattenNetwork(model.cpu())
    SUM.append(sum(Flat_input))
    normalize_sum = sum(Flat_input)
    Flat_input = [float(i) / normalize_sum for i in Flat_input]
    final_skill_sample.append(Flat_input)
    if len(task_samples) == 0:
        CAE_AE_TRAIN(net_shapes, task_samples + final_skill_sample, 100)
    else:
        CAE_AE_TRAIN(net_shapes, task_samples + final_skill_sample, 300)
def FLATTEN_WEIGHTS_TRAIN_VAE(task_samples,model):
    final_skill_sample=[]
    Flat_input,net_shapes=helper_functions.flattenNetwork(model.cpu())
    final_skill_sample.append(Flat_input)
    Actual_task_net_weights.append(Flat_input)
    vis.line(X=np.array(range(0,len(Flat_input))),Y=Flat_input,win='win_task_network',name='Task_net_'+str(len(task_samples)),opts=options_task_network,update='append')
    if len(task_samples)==0:
        accuracies = CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,300)
    else:
        accuracies = CAE_AE_TRAIN(net_shapes,task_samples+final_skill_sample,300)
    return accuracies
예제 #5
0
def FLATTEN_WEIGHTS_TRAIN_VAE(task_samples, model):
    final_skill_sample = []
    Flat_input, net_shapes = helper_functions.flattenNetwork(model.cpu())
    final_skill_sample.append(Flat_input)
    print("LENGTH OF ACTUAL MODEL IS", len(Flat_input))
    if len(task_samples) == 0:
        accuracies = CAE_AE_TRAIN(net_shapes,
                                  task_samples + final_skill_sample, 50)
    else:
        accuracies = CAE_AE_TRAIN(net_shapes,
                                  task_samples + final_skill_sample, 250)
    return accuracies
예제 #6
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global Skill_Mu
    global student_model
    global vae_optimizer
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='Acc_skills_' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='MSE_skills_' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='Cosine_Similarity',
                           title='Cosine_Similarity_' + str(len(task_samples)))
    options_cosine_org_orginal = dict(fillarea=True,
                                      width=400,
                                      height=400,
                                      xlabel='Iterations',
                                      ylabel='Cosine_Similarity',
                                      title='Cosine_Sim_wrt_org_' +
                                      str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='Loss_skills_' + str(len(task_samples)),
                   name='Loss_skills' + str(len(task_samples)),
                   opts=options)
    # win_2 = vis.line(X=np.array([0]),Y=np.array([0]),win='Acc_skills_'+str(len(task_samples)),name='Acc_skills_'+str(len(task_samples)),opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='MSE_skills_' + str(len(task_samples)),
                       name='MSE_skills_' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='Cosine_Sim ' + str(len(task_samples)),
                           name='Cosine_similarity_' + str(len(task_samples)),
                           opts=options_mse_org)
    win_cosine_org_orginal = vis.line(
        X=np.array([0]),
        Y=np.array([0]),
        win='Cosine_Sim_orginal ' + str(len(task_samples)),
        name='Cosine_sim_wrt_org' + str(len(task_samples)),
        opts=options_cosine_org_orginal)

    splitted_input = []
    skills = []
    total_resend = 0
    task_samples_copy = task_samples
    total = len(task_samples)
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for t_input in range(0, len(task_samples)):
        task_sample_append = np.concatenate(
            (task_samples[t_input],
             task_samples[t_input][-diff_count[t_input]:]))
        splitted_input = np.array_split(task_sample_append, split_size)
        for i in range(0, len(splitted_input)):
            # if(len(task_samples[t_input])==842407):
            #     splitted_input[i]=np.concatenate((splitted_input[i], [0.05]))
            # else:
            #     splitted_input[i]=np.concatenate((splitted_input[i], [0.05]))
            print(len(splitted_input[i]))
            skills.append(splitted_input[i])

    task_samples = skills
    student_model.train()
    iterations = iterations
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,total*3,minReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, input_size), recons_x, hidden_representation,
                lam)
            Skill_Mu[skill] = hidden_representation
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            cosine = F.cosine_similarity(recons_x, data.view(-1, input_size))
            if cosine <= 0.991:
                resend.append(s)
        print("RESEND List", resend)
        total_resend = total_resend + len(resend)
        for s in resend:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, input_size), recons_x, hidden_representation,
                lam)
            Skill_Mu[skill] = hidden_representation
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
        if batch_idx % 1 == 0:
            m = 0
            n = split_size
            values = 0
            for i in range(0, int(len(task_samples_copy))):
                collect_data_1 = []
                sample = []
                for k in range(m, n):
                    mu1 = Skill_Mu[k][0]
                    mini_task_sample = student_model.decoder(mu1).cpu()
                    task_sample = mini_task_sample.data.numpy().reshape(
                        input_size)
                    sample = np.concatenate([sample, task_sample])
                #print("in cae test len is",len(sample))
                # if len(Actual_task_net_weights[i])==841893:
                #     sample=np.concatenate([sample,task_sample[0:-3]])
                # else:
                sample = sample[0:-diff_count[i]]
                m = m + split_size
                n = n + split_size
                model_z = models.NNPolicy(1, 256, games_config[i])
                FI, net_shapes = helper_functions.flattenNetwork(model_z.cpu())
                final_weights = helper_functions.unFlattenNetwork(
                    torch.from_numpy(sample).float(), net_shapes)
                #print("lksfkfjf",diff_count,len(sample),len(task_samples_copy[i]))
                model_x = loadWeights_cifar(final_weights, model_z)
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(
                        task_samples_copy[i])).data.numpy())
                mse_orginal = mean_squared_error(sample,
                                                 Actual_task_net_weights[i])
                # sample=sample[:-diff_count[i]]
                b = torch.FloatTensor(
                    task_samples_copy[i]).data.numpy()  #[:-diff_count[i]]
                b1 = torch.FloatTensor(Actual_task_net_weights[i]).data.numpy()
                COSINE_SIMILARITY = dot(sample, b) / (norm(sample) * norm(b))
                COSINE_SIMILARITY_wrt_orginal = dot(
                    sample, b1) / (norm(sample) * norm(b1))
                if COSINE_SIMILARITY >= 0.998:
                    torch.save(
                        model_x.state_dict(), './New_Games/' +
                        str(games[i][0:10]) + '_' + str(total) + '_' +
                        str(COSINE_SIMILARITY) + '_' + str(batch_idx) + '.pt')
                    values = values + 1
                # else:
                #     if COSINE_SIMILARITY>=0.998:
                #         torch.save(model_x.state_dict(),'./Latest_Cae/'+str(games[i][0:10])+'_'+str(total)+'_'+str(COSINE_SIMILARITY)+'_'+str(batch_idx)+'.pt')
                #         values=values+1
                #torch.save(model_x.state_dict(),'./Latest_Cae/'+str(games[i][0:10])+'_'+str(total)+'_'+str(COSINE_SIMILARITY)+'.pt')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([COSINE_SIMILARITY]),
                         win=win_mse_org,
                         name='Cos_Sim_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([COSINE_SIMILARITY_wrt_orginal]),
                         win=win_cosine_org_orginal,
                         name='Cos_wrt_org_Sim_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
        if values == len(task_samples_copy):
            print("########## \n Batch id is", batch_idx, "\n#########")
            threshold_batchid.append(batch_idx + total_resend)
            break

    return accuracies