Пример #1
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global student_model
    global vae_optimizer
    student_model = models.CAE()  #.to(device)
    vae_optimizer = optim.Adam(student_model.parameters(),
                               lr=0.0001)  #, amsgrad=True)
    student_model.train()
    print("len of takssample is", len(task_samples))
    accuracies = np.zeros((iterations, len(task_samples)))
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        print(randPerm, nReps)
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(
                task_samples[skill]))  #.to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 61326), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))

        if batch_idx % 1 == 0:
            values = 0
            for i in range(0, len(task_samples)):
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(61326)
                #print('MSE IS',i,mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights = helper_functions.unFlattenNetwork(
                    sample, shapes)
                loadWeights_cifar(final_weights, net)
                if i >= 6:
                    load_individual_class([i], [0, 1, 2, 3])
                else:
                    load_individual_class([i], [6, 7, 8, 9])
                Avg_Accuracy = test()
                accuracies[batch_idx, i] = Avg_Accuracy
                if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                    values = values + 1
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                break
    stage = stage + 1
    return accuracies
Пример #2
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    student_model.train()
    accuracies = np.zeros((iterations, len(task_samples)))
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1, iterations):
        #randPerm = np.random.permutation(len(task_samples))
        randPerm, nReps = helper_functions.biased_permutation(
            stage + 1, nBiased, trainBias, len(task_samples), minReps)
        print(randPerm, nReps)
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21840), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))

        if batch_idx % 1 == 0:
            values = 0
            for i in range(0, len(task_samples)):
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(21840)
                print(
                    'MSE IS', i,
                    mean_squared_error(
                        task_sample.data.numpy(),
                        Variable(torch.FloatTensor(
                            task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights = helper_functions.unFlattenNetwork(
                    sample, shapes)
                loadWeights_mnsit(final_weights, model)
                test_loader = RELOAD_DATASET(idx_permute[i])
                Avg_Accuracy = test(args, model, device, test_loader)
                accuracies[batch_idx, i] = Avg_Accuracy
                if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i] - 1):
                    values = values + 1
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                break
    stage = stage + 1
    return accuracies
Пример #3
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global Skill_Mu
    splitted_input = []
    skills = []
    task_samples_copy = task_samples
    total = len(task_samples)
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for t_input in range(0, len(task_samples)):
        #print("split is",len(task_samples[t_input]))
        splitted_input = np.array_split(task_samples[t_input], 3)
        for i in range(0, len(splitted_input)):
            #print("split is",len(splitted_input[i]))
            if (len(splitted_input[i]) == 20668):
                splitted_input[i] = np.concatenate((splitted_input[i], [0]))
            skills.append(splitted_input[i])
    task_samples = skills
    Skill_Mu = [[] for _ in range(0, len(task_samples))]
    global student_model
    global vae_optimizer
    student_model.train()
    iterations = iterations
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,total*3,minReps)
        print(randPerm, nReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20669), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            #print(float(loss.data[0] * 1000))
            if float(loss.data[0] * 1000) >= 1.00:
                resend.append(s)
        print("RESEND List", resend)
        for s in resend:
            skill = s
            #print("resending skills",resend)
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20669), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()

            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
        if batch_idx % 1 == 0:
            m = 0
            n = 3
            tl = 0
            values = 0
            for i in range(0, int(len(task_samples) / 3)):
                collect_data_1 = []
                global net
                RELOAD_DATASET()
                Avg_Accuracy = 0
                load_individual_class(list(range(i * 10, (i + 1) * 10)), [])
                sample = []
                for k in range(m, n):
                    mu1 = Skill_Mu[k][0]
                    mini_task_sample = student_model.decoder(mu1).cpu()
                    task_sample = mini_task_sample.data.numpy().reshape(20669)
                    sample = np.concatenate([sample, task_sample])
                m = m + 3
                n = n + 3
                final_weights = helper_functions.unFlattenNetwork(
                    torch.from_numpy(sample).float(), shapes)
                loadWeights_cifar(final_weights, net)
                Avg_Accuracy = test()
                print(len(sample), len(task_samples_copy[i]))
                mse = mean_squared_error(
                    sample[0:62006],
                    Variable(torch.FloatTensor(
                        task_samples_copy[i][0:62006])).data.numpy())
                accuracies[batch_idx, i] = Avg_Accuracy
                collect_data_1.extend([
                    batch_idx, i, mse, Avg_Accuracy, Actual_Accuracy[i],
                    len(resend)
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
            #     if total<=7:
            #         if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]-1):
            #             values=values+1
            #     else:
            #         if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]-2):
            #             print("verifying the degrading threshold")
            #             values=values+1
            # if values==total:
            #     print("########## \n Batch id is",batch_idx,"\n#########")
            #     threshold_batchid.append(batch_idx)
            #     break
    final_dataframe_1.columns = [
        'batch_idx', 'skill', 'caluclated_mse', 'Accuracy', 'Actual_Accuracy',
        'Resend_len'
    ]
    final_dataframe_1.to_hdf(
        'MSE_Fixed_Iter/' + str(len(task_samples_copy)) + '_MSE_acc-2-200',
        'key1')
    stage = stage + 3
    return accuracies
Пример #4
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global Skill_Mu
    global lam
    global student_model
    global vae_optimizer
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='CAE_skills' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='CAE_MSE_skills' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='Cosine_Similarity',
                           title='Cosine_Similariyu' + str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='CAE_skills ' + str(len(task_samples)),
                   name='CAE_skills' + str(len(task_samples)),
                   opts=options)
    win_2 = vis.line(X=np.array([0]),
                     Y=np.array([0]),
                     win='CAE_Acc_skills ' + str(len(task_samples)),
                     name='CAE_skills' + str(len(task_samples)),
                     opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='CAE_MSE_skills ' + str(len(task_samples)),
                       name='CAE_MSE_skills' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='Cosine_similarity ' + str(len(task_samples)),
                           name='Cosine_similarity' + str(len(task_samples)),
                           opts=options_mse_org)
    splitted_input = []
    skills = []
    task_samples_copy = task_samples
    total = len(task_samples)
    total_resend = 0
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for t_input in range(0, len(task_samples)):
        splitted_input = np.array_split(task_samples[t_input], 3)
        for i in range(0, len(splitted_input)):
            skills.append(splitted_input[i])
    task_samples = skills
    Skill_Mu = [[] for _ in range(0, len(task_samples))]
    student_model.train()
    iterations = iterations
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,total*3,minReps)
        print(randPerm, nReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20442), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            # recon=torch.FloatTensor(recons_x.cpu()).data.numpy().reshape(20442)
            # b=torch.FloatTensor(task_samples[i]).data.numpy()
            # small_cosine_sim=dot(b, recon)/(norm(b)*norm(recon))
            # if small_cosine_sim<=0.99:
            #     print(small_cosine_sim)
            #     resend.append(s)
            if float(loss.data[0] * 1000000) >= 1.00:
                resend.append(s)
        print("RESEND List", resend)
        total_resend = total_resend + len(resend)
        for s in resend:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20442), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vis.line(X=np.array([batch_idx + 0.5]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            vae_optimizer.step()

            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))

        if batch_idx % 1 == 0:
            m = 0
            n = 3
            tl = 0
            values = 0
            for i in range(0, int(len(task_samples) / 3)):
                collect_data_1 = []
                Avg_Accuracy = 0
                if i >= 6:
                    Train_loader, Test_loader = load_individual_class(
                        [i], [0, 1, 2, 3])
                else:
                    print("++++++++++")
                    Train_loader, Test_loader = load_individual_class(
                        [i], [6, 7, 8, 9])
                sample = []
                for k in range(m, n):
                    mu1 = Skill_Mu[k][0]
                    mini_task_sample = student_model.decoder(mu1).cpu()
                    task_sample = mini_task_sample.data.numpy().reshape(20442)
                    sample = np.concatenate([sample, task_sample])
                m = m + 3
                n = n + 3
                #print('MSE IS',i,mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                final_weights = helper_functions.unFlattenNetwork(
                    torch.from_numpy(sample).float(), shapes)
                model_x = loadWeights_cifar(final_weights, model)
                Avg_Accuracy = test(model_x, Test_loader)
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(
                        task_samples_copy[i])).data.numpy())
                mse_orginal = mean_squared_error(sample,
                                                 Actual_task_net_weights[i])
                b = torch.FloatTensor(task_samples_copy[i]).data.numpy()
                b1 = torch.FloatTensor(Actual_task_net_weights[i]).data.numpy()
                COSINE_SIMILARITY = dot(sample, b) / (norm(sample) * norm(b))
                jacrad_similarity = jaccard_similarity(list(b), sample)
                COSINE_SIMILARITY_wrt_orginal = dot(
                    sample, b1) / (norm(sample) * norm(b1))
                collect_data_1.extend([
                    total, batch_idx, i, mse, mse_orginal, Avg_Accuracy,
                    Actual_Accuracy[i],
                    len(resend), sample,
                    torch.FloatTensor(task_samples[i]).data.numpy(),
                    jacrad_similarity, COSINE_SIMILARITY,
                    COSINE_SIMILARITY_wrt_orginal
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                accuracies[batch_idx, i] = Avg_Accuracy
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([Avg_Accuracy]),
                         win=win_2,
                         name='Acc_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([COSINE_SIMILARITY]),
                         win=win_mse_org,
                         name='Cosine_Similarity_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
                if COSINE_SIMILARITY > 0.993:
                    values = values + 1
            if values == total:
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                threshold_net_updates.append((batch_idx * total) +
                                             total_resend)
                break
    stage = stage + 3
    final_dataframe_1.columns = [
        'no_of_skills', 'batch_idx', 'skill', 'caluclated_mse',
        'mse_wrt_orginal', 'Accuracy', 'Actual_Accuracy', 'Resend_len',
        'sample', 'task_sample', 'jacrad_similarity', 'COSINE_SIMILARITY',
        'COSINE_SIMILARITY_wrt_orginal'
    ]
    final_dataframe_1.to_hdf(
        'Collected_Data/' + str(len(task_samples)) + '_data', 'key1')
    return accuracies
Пример #5
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='CAE_skills' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='CAE_MSE_skills' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='MSE_Orginal',
                           title='CAE_MSE_WRT_Orginal_skills' +
                           str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='CAE_skills ' + str(len(task_samples)),
                   name='CAE_skills' + str(len(task_samples)),
                   opts=options)
    win_2 = vis.line(X=np.array([0]),
                     Y=np.array([0]),
                     win='CAE_Acc_skills ' + str(len(task_samples)),
                     name='CAE_skills' + str(len(task_samples)),
                     opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='CAE_MSE_skills ' + str(len(task_samples)),
                       name='CAE_MSE_skills' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='CAE_MSE_Org_skills ' + str(len(task_samples)),
                           name='CAE_MSE_Orgskills' + str(len(task_samples)),
                           opts=options_mse_org)
    student_model.train()
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        #print(randPerm,nReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21840), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            #options_lgnd = dict(fillarea=True, legend=['Loss_Skill_'+str(skill)])
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')  #,opts=options_lgnd)
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            if float(loss.data[0] * 10000) >= 1.00:
                resend.append(s)
        print("RESEND List", resend)
        for s in resend:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21840), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            #options_lgnd = dict(fillarea=True, legend=['Loss_Skill_'+str(skill)])
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')  #,opts=options_lgnd)
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))

        if batch_idx % 1 == 0:
            values = 0
            for i in range(0, len(task_samples)):
                collect_data_1 = []
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(21840)
                #print('MSE IS',i,mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights = helper_functions.unFlattenNetwork(
                    sample, shapes)
                loadWeights_mnsit(final_weights, model)
                Train_loader, Test_loader = RELOAD_DATASET(idx_permute[i])
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                mse_orginal = mean_squared_error(sample,
                                                 Actual_task_net_weights[i])
                Avg_Accuracy = test(model, Test_loader)
                collect_data_1.extend([
                    batch_idx, i, mse, mse_orginal, Avg_Accuracy,
                    Actual_Accuracy[i],
                    len(resend)
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                accuracies[batch_idx, i] = Avg_Accuracy
                if len(task_samples) > 6:
                    if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i] -
                                                        1):
                        values = values + 1
                else:
                    if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                        values = values + 1
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([Avg_Accuracy]),
                         win=win_2,
                         name='Acc_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse_orginal]),
                         win=win_mse_org,
                         name='MSE_Org_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                break
    stage = stage + 1
    final_dataframe_1.columns = [
        'batch_idx', 'skill', 'caluclated_mse', 'mse_wrt_orginal', 'Accuracy',
        'Actual_Accuracy', 'Resend_len'
    ]
    final_dataframe_1.to_hdf(
        'Collected_Data/' + str(len(task_samples)) + '_data', 'key1')
    return accuracies
Пример #6
0
def CAE_AE_TRAIN(shapes,task_samples,iterations):
    global stage
    encoded_task=[]
    global running_losses
    global running_thresholds
    global initial_fmse_threshold
    global total_update_count
    global running_update_counts
    global average_relative_accuracies
    #task_samples=task_samples.to(device)
    student_model.train()
    final_dataframe=pd.DataFrame()
    final_dataframe_1=pd.DataFrame()
    for i in range(0,len(task_samples)-len(Skill_Mu)):
        Skill_Mu.append([])
    curr_batch_avg_loss = 10
    batch_idx = -1
    curr_batch_max_loss = torch.Tensor([10.0])
    
    #while the thresholds have not been met, train
    # while np.all(running_losses[:-(10-len(task_samples))] >= running_thresholds[:-(10-len(task_samples))]):
    # while curr_batch_avg_loss >= initial_fmse_threshold:
    for batch_idx in range(1,iterations):
        
        batch_idx += 1
        train_loss = 0
        randPerm = np.random.permutation(len(task_samples))
        # adjust_learning_rate(vae_optimizer, batch_idx)
        #print("after lr change",vae_optimizer)
        # randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        print(randPerm)
        #sys.exit() 
        # avg_loss = torch.Tensor([0.0])
        total_loss = torch.Tensor([0.0])
        batch_losses = [0.0 for x in range(len(task_samples))]
        for s in randPerm:
            
            total_update_count += 1
            
            collect_data=[]
            skill=s#randint(0,len(task_samples)-1)
            F_s = fisher_matrix[s]
            vae_optimizer.zero_grad()
            data=Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss,con_mse,con_loss,closs_mul_lam = helper_functions.Contractive_loss_function(W, data.view(-1, 21432), recons_x,hidden_representation, lam)
            # loss,con_mse,con_loss,closs_mul_lam = helper_functions.Contractive_FMSE(W, data.view(-1, 21432), recons_x,hidden_representation, lam, F_s)
            # loss = loss*((total_update_count-running_update_counts[s])/total_update_count)
            
            running_update_counts[s] += 1
            Skill_Mu[skill]=hidden_representation
            batch_losses[s] = loss #update the current loss for this network
            # avg_loss += loss
            total_loss += loss
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(batch_idx,loss.data[0],skill ))
        # avg_loss /= len(task_samples)
        curr_batch_avg_loss = total_loss/len(task_samples)

        #get the temporary running losses if we stopped training right here
        tmp_running_losses = [0.0 for x in range(len(task_samples))]
        for idx, x in enumerate(batch_losses):
            tmp_running_losses[idx] = (running_losses[idx] + x)-10

        


        #we need to train more on the skill where the temporary running loss deviates the most from the running threshold
        losses_wrt_thresholds = [0.0 for x in range(len(task_samples))]
        for idx,x in enumerate(batch_losses):
            
            losses_wrt_thresholds[idx] = round((batch_losses[idx].data.numpy()/running_thresholds[idx])[0], 2)

        curr_batch_max_loss = max(batch_losses)
        
        curr_batch_avg_loss.backward()
        vae_optimizer.step()

        if batch_idx %199==0:  
            # accuray_threshlod=[]
            values=0
            initial_fmse_threshold += 0.0004
            avg_relative_accuracy = 0.0
            for i in range(0,len(task_samples)):
                # Avg_Accuracy=0
                #model=models.Net()
                collect_data_1=[]
                mu1=Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample=task_sample.data.numpy().reshape(21432)
                #print('MSE IS',mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights=helper_functions.unFlattenNetwork(sample, shapes)
                loadWeights_mnsit(final_weights,model)
                if i%2==0:
                    load_individual_class([i],[1,3,5,7])
                else:
                    load_individual_class([i],[0,2,4,6])
                Avg_Accuracy= test(args, model, device, test_loader)
                if len(task_samples) == 10:
                    avg_relative_accuracy += round((Avg_Accuracy/Actual_Accuracy[i][len(Actual_Accuracy[i])-1])*100, 2)
                print("ORIGINAL PERFORMANCE: "+str(Actual_Accuracy[i][len(Actual_Accuracy[i])-1])+"%\n")
                print("RELATIVE PERFORMANCE TO ORIGINAL: "+str(round((Avg_Accuracy/Actual_Accuracy[i][len(Actual_Accuracy[i])-1])*100, 2))+"%\n")
                print("LOSS: ", batch_losses[i])
                # if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]):
                values=values+1
                #update running_thresholds and running losses
                running_losses[i] += batch_losses[i]
                
                # if len(task_samples)>=3:
                #     inc =  -(i-(len(task_samples)+1))
                #     running_thresholds[i] = initial_fmse_threshold*np.log(max(3,inc))
                # if len(task_samples) > 2:
                #     running_thresholds = [initial_fmse_threshold*np.log(len(task_samples)*1.3) for x in range(10)]
            print(running_thresholds)
            if len(task_samples) == 10:
                avg_relative_accuracy /= len(task_samples)
                average_relative_accuracies.append(avg_relative_accuracy)
            if values==len(task_samples) and len(task_samples) == 10:
                print("########## \n Batch id is",batch_idx,"\n#########")
                break
       
        
        
        
    stage=stage+1
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    #vae_optimizer = optim.Adam(student_model.parameters(), lr = 0.001)
    global stage
    encoded_task = []
    accuracies = np.zeros((iterations, len(task_samples)))
    #task_samples=task_samples.to(device)
    student_model.train()
    final_dataframe = pd.DataFrame()
    final_dataframe_1 = pd.DataFrame()
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1, iterations):
        train_loss = 0
        randPerm = np.random.permutation(len(task_samples))
        #adjust_learning_rate(vae_optimizer, batch_idx)
        #print("after lr change",vae_optimizer)
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        print(randPerm)
        resend = []
        for s in randPerm:
            #collect_data=[]
            skill = s  #randint(0,len(task_samples)-1)
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21432), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            if float(loss.data[0] * 1000000) >= 1.00:
                resend.append(s)
        print("resending skills", resend)
        # for s in resend:
        #     skill=s
        #     vae_optimizer.zero_grad()
        #     data=Variable(torch.FloatTensor(task_samples[skill])).to(device)
        #     hidden_representation, recons_x = student_model(data)
        #     W = student_model.state_dict()['fc2.weight']
        #     loss,con_mse,con_loss,closs_mul_lam = helper_functions.Contractive_loss_function(W,
        #         data.view(-1, 21432), recons_x,hidden_representation, lam)
        #     Skill_Mu[skill]=hidden_representation
        #     loss.backward()
        #     vae_optimizer.step()
        if batch_idx % 1 == 0:
            # accuray_threshlod=[]
            values = 0
            for i in range(0, len(task_samples)):
                # Avg_Accuracy=0
                #model=models.Net()
                collect_data_1 = []
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(21432)
                #print('MSE IS',mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights = helper_functions.unFlattenNetwork(
                    sample * SUM[i], shapes)
                loadWeights_mnsit(final_weights, model)
                if i >= 6:
                    load_individual_class([i], [1, 2, 3, 4])
                else:
                    load_individual_class([i], [6, 7, 8, 9])
                Avg_Accuracy = test(args, model, device, test_loader)
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                collect_data_1.extend([
                    batch_idx, i, mse, Avg_Accuracy, Actual_Accuracy[i],
                    len(resend)
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                accuracies[batch_idx, i] = Avg_Accuracy
                if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                    values = values + 1
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                break
    stage = stage + 1
    final_dataframe_1.columns = [
        'batch_idx', 'skill', 'caluclated_mse', 'Accuracy', 'Actual_Accuracy',
        'Resend_len'
    ]
    final_dataframe_1.to_hdf(
        'MSE_Iter/' + str(len(task_samples)) + '_MSE_fixed_iter', 'key1')
    return accuracies
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='CAE_skills' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='CAE_MSE_skills' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='MSE_Orginal',
                           title='CAE_MSE_WRT_Orginal_skills' +
                           str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='CAE_skills ' + str(len(task_samples)),
                   name='CAE_skills' + str(len(task_samples)),
                   opts=options)
    win_2 = vis.line(X=np.array([0]),
                     Y=np.array([0]),
                     win='CAE_Acc_skills ' + str(len(task_samples)),
                     name='CAE_skills' + str(len(task_samples)),
                     opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='CAE_MSE_skills ' + str(len(task_samples)),
                       name='CAE_MSE_skills' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='CAE_MSE_Org_skills ' + str(len(task_samples)),
                           name='CAE_MSE_Orgskills' + str(len(task_samples)),
                           opts=options_mse_org)
    total_resend = 0
    total = len(task_samples)
    accuracies = np.zeros((iterations, len(task_samples)))
    #task_samples=task_samples.to(device)
    student_model.train()
    final_dataframe = pd.DataFrame()
    final_dataframe_1 = pd.DataFrame()
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1, iterations):
        train_loss = 0
        randPerm = np.random.permutation(len(task_samples))
        #adjust_learning_rate(vae_optimizer, batch_idx)
        #print("after lr change",vae_optimizer)
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        print(randPerm)
        resend = []
        for s in randPerm:
            #collect_data=[]
            skill = s  #randint(0,len(task_samples)-1)
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21432), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')  #,opts=options_lgnd)
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            if float(loss.data[0] * 1000000) >= 1.00:
                resend.append(s)
        print("resending skills", resend)
        total_resend = total_resend + len(resend)
        for s in resend:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21432), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')  #,opts=options_lgnd)
            vae_optimizer.step()
        if batch_idx % 1 == 0:
            values = 0
            for i in range(0, len(task_samples)):
                collect_data_1 = []
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(21432)
                #print('MSE IS',mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                mse_orginal = mean_squared_error(sample,
                                                 Actual_task_net_weights[i])
                final_weights = helper_functions.unFlattenNetwork(
                    sample, shapes)
                model_x = loadWeights_mnsit(final_weights, model)
                if i >= 6:
                    Train_loader, Test_loader = load_individual_class(
                        [i], [1, 2, 3, 4])
                else:
                    Train_loader, Test_loader = load_individual_class(
                        [i], [6, 7, 8, 9])
                Avg_Accuracy = test(model_x, Test_loader)
                b = torch.FloatTensor(task_samples[i]).data.numpy()
                b1 = torch.FloatTensor(Actual_task_net_weights[i]).data.numpy()
                COSINE_SIMILARITY = dot(sample, b) / (norm(sample) * norm(b))
                COSINE_SIMILARITY_wrt_orginal = dot(
                    sample, b1) / (norm(sample) * norm(b1))
                collect_data_1.extend([
                    total, batch_idx, i, mse, mse_orginal, Avg_Accuracy,
                    Actual_Accuracy[i],
                    len(resend), sample,
                    torch.FloatTensor(task_samples[i]).data.numpy(),
                    COSINE_SIMILARITY, COSINE_SIMILARITY_wrt_orginal
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                accuracies[batch_idx, i] = Avg_Accuracy
                if COSINE_SIMILARITY > 0.99:
                    values = values + 1
                # if len(task_samples)>6:
                #     if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]-1):
                #         values=values+1
                # else:
                #     if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]):
                #         values=values+1
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([Avg_Accuracy]),
                         win=win_2,
                         name='Acc_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse_orginal]),
                         win=win_mse_org,
                         name='MSE_Org_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                threshold_net_updates.append((batch_idx * total) +
                                             total_resend)
                break

    stage = stage + 1
    final_dataframe_1.columns = [
        'no_of_skills', 'batch_idx', 'skill', 'caluclated_mse',
        'mse_wrt_orginal', 'Accuracy', 'Actual_Accuracy', 'Resend_len',
        'sample', 'task_sample', 'COSINE_SIMILARITY',
        'COSINE_SIMILARITY_wrt_orginal'
    ]
    final_dataframe_1.to_hdf(
        'Collected_Data/' + str(len(task_samples)) + '_data', 'key1')
    return accuracies
Пример #9
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global Skill_Mu
    global lam
    global student_model
    global vae_optimizer
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='CAE_skills' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='CAE_MSE_skills' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='MSE_Orginal',
                           title='CAE_MSE_WRT_Orginal_skills' +
                           str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='CAE_skills ' + str(len(task_samples)),
                   name='CAE_skills' + str(len(task_samples)),
                   opts=options)
    win_2 = vis.line(X=np.array([0]),
                     Y=np.array([0]),
                     win='CAE_Acc_skills ' + str(len(task_samples)),
                     name='CAE_skills' + str(len(task_samples)),
                     opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='CAE_MSE_skills ' + str(len(task_samples)),
                       name='CAE_MSE_skills' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='CAE_MSE_Org_skills ' + str(len(task_samples)),
                           name='CAE_MSE_Orgskills' + str(len(task_samples)),
                           opts=options_mse_org)
    splitted_input = []
    skills = []
    task_samples_copy = task_samples
    total = len(task_samples)
    total_resend = 0
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for t_input in range(0, len(task_samples)):
        splitted_input = np.array_split(task_samples[t_input], 3)
        for i in range(0, len(splitted_input)):
            skills.append(splitted_input[i])
    task_samples = skills
    Skill_Mu = [[] for _ in range(0, len(task_samples))]
    # student_model=models.SPLIT_CIFAR_CAE_TWO_SKILLS()#.to(device)
    # vae_optimizer = optim.Adam(student_model.parameters(), lr = 0.0001)#, amsgrad=True)
    student_model.train()
    iterations = iterations
    # if total>=8:
    #     print("Decreasing the Lamda")
    #     lam=0.0001
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,total*3,minReps)
        print(randPerm, nReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            #print(data)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20442), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            #print(float(loss.data[0] * 1000))
            if float(loss.data[0] * 10000) >= 1.00:
                resend.append(s)
        print("RESEND List", resend)
        total_resend = total_resend + len(resend)
        # if len(resend)>1:
        for s in resend:
            skill = s
            #print("resending skills",resend)
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20442), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vis.line(X=np.array([batch_idx + 0.5]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            vae_optimizer.step()

            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
    # else:
    #     print("The Length of resend list is ",len(resend))
    #     for s in randPerm:
    #         skill=s
    #         vae_optimizer.zero_grad()
    #         data=Variable(torch.FloatTensor(task_samples[skill])).to(device)
    #         #print(data)
    #         hidden_representation, recons_x = student_model(data)
    #         W = student_model.state_dict()['fc2.weight']
    #         loss,con_mse,con_loss,closs_mul_lam = helper_functions.Contractive_loss_function(W,
    #             data.view(-1, 20442), recons_x,hidden_representation, lam)
    #         Skill_Mu[skill]=hidden_representation
    #         loss.backward()
    #         vae_optimizer.step()
    #         print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(batch_idx,loss.data[0],skill ))

        if batch_idx % 1 == 0:
            m = 0
            n = 3
            tl = 0
            values = 0
            for i in range(0, int(len(task_samples) / 3)):
                collect_data_1 = []
                Avg_Accuracy = 0
                if i >= 6:
                    Train_loader, Test_loader = load_individual_class(
                        [i], [0, 1, 2, 3])
                else:
                    print("++++++++++")
                    Train_loader, Test_loader = load_individual_class(
                        [i], [6, 7, 8, 9])
                sample = []
                for k in range(m, n):
                    mu1 = Skill_Mu[k][0]
                    mini_task_sample = student_model.decoder(mu1).cpu()
                    task_sample = mini_task_sample.data.numpy().reshape(20442)
                    sample = np.concatenate([sample, task_sample])
                m = m + 3
                n = n + 3
                #print('MSE IS',i,mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                final_weights = helper_functions.unFlattenNetwork(
                    torch.from_numpy(sample).float(), shapes)
                model_x = loadWeights_cifar(final_weights, model)
                Avg_Accuracy = test(model_x, Test_loader)
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(
                        task_samples_copy[i])).data.numpy())
                mse_orginal = mean_squared_error(sample,
                                                 Actual_task_net_weights[i])
                collect_data_1.extend([
                    batch_idx, i, mse, mse_orginal, Avg_Accuracy,
                    Actual_Accuracy[i],
                    len(resend)
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                accuracies[batch_idx, i] = Avg_Accuracy
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([Avg_Accuracy]),
                         win=win_2,
                         name='Acc_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse_orginal]),
                         win=win_mse_org,
                         name='MSE_Org_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
                if total <= 7:
                    if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                        values = values + 1
                else:
                    if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                        print("verifying the degrading threshold")
                        values = values + 1

            if values == total:
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                threshold_net_updates.append((batch_idx * total) +
                                             total_resend)
                break
    stage = stage + 3
    print(final_dataframe_1.head(5))
    final_dataframe_1.columns = [
        'batch_idx', 'skill', 'caluclated_mse', 'mse_wrt_orginal', 'Accuracy',
        'Actual_Accuracy', 'Resend_len'
    ]
    final_dataframe_1.to_hdf(
        'Collected_Data/' + str(len(task_samples)) + '_data', 'key1')
    return accuracies
Пример #10
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global Skill_Mu
    global student_model
    global vae_optimizer
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='Acc_skills_' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='MSE_skills_' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='Cosine_Similarity',
                           title='Cosine_Similarity_' + str(len(task_samples)))
    options_cosine_org_orginal = dict(fillarea=True,
                                      width=400,
                                      height=400,
                                      xlabel='Iterations',
                                      ylabel='Cosine_Similarity',
                                      title='Cosine_Sim_wrt_org_' +
                                      str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='Loss_skills_' + str(len(task_samples)),
                   name='Loss_skills' + str(len(task_samples)),
                   opts=options)
    # win_2 = vis.line(X=np.array([0]),Y=np.array([0]),win='Acc_skills_'+str(len(task_samples)),name='Acc_skills_'+str(len(task_samples)),opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='MSE_skills_' + str(len(task_samples)),
                       name='MSE_skills_' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='Cosine_Sim ' + str(len(task_samples)),
                           name='Cosine_similarity_' + str(len(task_samples)),
                           opts=options_mse_org)
    win_cosine_org_orginal = vis.line(
        X=np.array([0]),
        Y=np.array([0]),
        win='Cosine_Sim_orginal ' + str(len(task_samples)),
        name='Cosine_sim_wrt_org' + str(len(task_samples)),
        opts=options_cosine_org_orginal)

    splitted_input = []
    skills = []
    total_resend = 0
    task_samples_copy = task_samples
    total = len(task_samples)
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for t_input in range(0, len(task_samples)):
        task_sample_append = np.concatenate(
            (task_samples[t_input],
             task_samples[t_input][-diff_count[t_input]:]))
        splitted_input = np.array_split(task_sample_append, split_size)
        for i in range(0, len(splitted_input)):
            # if(len(task_samples[t_input])==842407):
            #     splitted_input[i]=np.concatenate((splitted_input[i], [0.05]))
            # else:
            #     splitted_input[i]=np.concatenate((splitted_input[i], [0.05]))
            print(len(splitted_input[i]))
            skills.append(splitted_input[i])

    task_samples = skills
    student_model.train()
    iterations = iterations
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,total*3,minReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, input_size), recons_x, hidden_representation,
                lam)
            Skill_Mu[skill] = hidden_representation
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            cosine = F.cosine_similarity(recons_x, data.view(-1, input_size))
            if cosine <= 0.991:
                resend.append(s)
        print("RESEND List", resend)
        total_resend = total_resend + len(resend)
        for s in resend:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, input_size), recons_x, hidden_representation,
                lam)
            Skill_Mu[skill] = hidden_representation
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
        if batch_idx % 1 == 0:
            m = 0
            n = split_size
            values = 0
            for i in range(0, int(len(task_samples_copy))):
                collect_data_1 = []
                sample = []
                for k in range(m, n):
                    mu1 = Skill_Mu[k][0]
                    mini_task_sample = student_model.decoder(mu1).cpu()
                    task_sample = mini_task_sample.data.numpy().reshape(
                        input_size)
                    sample = np.concatenate([sample, task_sample])
                #print("in cae test len is",len(sample))
                # if len(Actual_task_net_weights[i])==841893:
                #     sample=np.concatenate([sample,task_sample[0:-3]])
                # else:
                sample = sample[0:-diff_count[i]]
                m = m + split_size
                n = n + split_size
                model_z = models.NNPolicy(1, 256, games_config[i])
                FI, net_shapes = helper_functions.flattenNetwork(model_z.cpu())
                final_weights = helper_functions.unFlattenNetwork(
                    torch.from_numpy(sample).float(), net_shapes)
                #print("lksfkfjf",diff_count,len(sample),len(task_samples_copy[i]))
                model_x = loadWeights_cifar(final_weights, model_z)
                mse = mean_squared_error(
                    sample,
                    Variable(torch.FloatTensor(
                        task_samples_copy[i])).data.numpy())
                mse_orginal = mean_squared_error(sample,
                                                 Actual_task_net_weights[i])
                # sample=sample[:-diff_count[i]]
                b = torch.FloatTensor(
                    task_samples_copy[i]).data.numpy()  #[:-diff_count[i]]
                b1 = torch.FloatTensor(Actual_task_net_weights[i]).data.numpy()
                COSINE_SIMILARITY = dot(sample, b) / (norm(sample) * norm(b))
                COSINE_SIMILARITY_wrt_orginal = dot(
                    sample, b1) / (norm(sample) * norm(b1))
                if COSINE_SIMILARITY >= 0.998:
                    torch.save(
                        model_x.state_dict(), './New_Games/' +
                        str(games[i][0:10]) + '_' + str(total) + '_' +
                        str(COSINE_SIMILARITY) + '_' + str(batch_idx) + '.pt')
                    values = values + 1
                # else:
                #     if COSINE_SIMILARITY>=0.998:
                #         torch.save(model_x.state_dict(),'./Latest_Cae/'+str(games[i][0:10])+'_'+str(total)+'_'+str(COSINE_SIMILARITY)+'_'+str(batch_idx)+'.pt')
                #         values=values+1
                #torch.save(model_x.state_dict(),'./Latest_Cae/'+str(games[i][0:10])+'_'+str(total)+'_'+str(COSINE_SIMILARITY)+'.pt')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([COSINE_SIMILARITY]),
                         win=win_mse_org,
                         name='Cos_Sim_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([COSINE_SIMILARITY_wrt_orginal]),
                         win=win_cosine_org_orginal,
                         name='Cos_wrt_org_Sim_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
        if values == len(task_samples_copy):
            print("########## \n Batch id is", batch_idx, "\n#########")
            threshold_batchid.append(batch_idx + total_resend)
            break

    return accuracies
Пример #11
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    global stage
    global Skill_Mu
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Iterations',
                   ylabel='Loss',
                   title='CAE_skills' + str(len(task_samples)))
    options_2 = dict(fillarea=True,
                     width=400,
                     height=400,
                     xlabel='Iterations',
                     ylabel='Accuracy',
                     title='Acc_skills_' + str(len(task_samples)))
    options_mse = dict(fillarea=True,
                       width=400,
                       height=400,
                       xlabel='Iterations',
                       ylabel='MSE',
                       title='MSE_skills_' + str(len(task_samples)))
    options_mse_org = dict(fillarea=True,
                           width=400,
                           height=400,
                           xlabel='Iterations',
                           ylabel='Cosine_Similarity',
                           title='Cosine_Similarity_' + str(len(task_samples)))
    win = vis.line(X=np.array([0]),
                   Y=np.array([0.005]),
                   win='Loss_skills_' + str(len(task_samples)),
                   name='Loss_skills' + str(len(task_samples)),
                   opts=options)
    win_2 = vis.line(X=np.array([0]),
                     Y=np.array([0]),
                     win='Acc_skills_' + str(len(task_samples)),
                     name='Acc_skills_' + str(len(task_samples)),
                     opts=options_2)
    win_mse = vis.line(X=np.array([0]),
                       Y=np.array([0]),
                       win='MSE_skills_' + str(len(task_samples)),
                       name='MSE_skills_' + str(len(task_samples)),
                       opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),
                           Y=np.array([0]),
                           win='Cosine_Sim ' + str(len(task_samples)),
                           name='Cosine_similarity_' + str(len(task_samples)),
                           opts=options_mse_org)
    splitted_input = []
    skills = []
    total_resend = 0
    task_samples_copy = task_samples
    total = len(task_samples)
    final_dataframe_1 = pd.DataFrame()
    accuracies = np.zeros((iterations, len(task_samples)))
    for t_input in range(0, len(task_samples)):
        #print("split is",len(task_samples[t_input]))
        splitted_input = np.array_split(task_samples[t_input], 3)
        for i in range(0, len(splitted_input)):
            #print("split is",len(splitted_input[i]))
            if (len(splitted_input[i]) == 20668):
                splitted_input[i] = np.concatenate((splitted_input[i], [0]))
            skills.append(splitted_input[i])
    task_samples = skills
    Skill_Mu = [[] for _ in range(0, len(task_samples))]
    global student_model
    global vae_optimizer
    student_model.train()
    iterations = iterations
    for batch_idx in range(1, iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,total*3,minReps)
        print(randPerm, nReps)
        resend = []
        for s in randPerm:
            skill = s
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20669), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
            #print(float(loss.data[0] * 1000))
            if float(loss.data[0] * 1000000) >= 1.00:
                resend.append(s)
        print("RESEND List", resend)
        total_resend = total_resend + len(resend)
        for s in resend:
            skill = s
            #print("resending skills",resend)
            vae_optimizer.zero_grad()
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 20669), recons_x, hidden_representation, lam)
            Skill_Mu[skill] = hidden_representation
            vis.line(X=np.array([batch_idx]),
                     Y=np.array([loss.item()]),
                     win=win,
                     name='Loss_Skill_' + str(skill),
                     update='append')
            loss.backward()
            vae_optimizer.step()

            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))
        if batch_idx % 1 == 0:
            m = 0
            n = 3
            values = 0
            for i in range(0, int(len(task_samples) / 3)):
                collect_data_1 = []
                RELOAD_DATASET()
                Avg_Accuracy = 0
                Train_loader, Test_loader = load_individual_class(
                    list(range(i * 10, (i + 1) * 10)), [])
                sample = []
                for k in range(m, n):
                    mu1 = Skill_Mu[k][0]
                    mini_task_sample = student_model.decoder(mu1).cpu()
                    task_sample = mini_task_sample.data.numpy().reshape(20669)
                    sample = np.concatenate([sample, task_sample])
                m = m + 3
                n = n + 3
                final_weights = helper_functions.unFlattenNetwork(
                    torch.from_numpy(sample).float(), shapes)
                model_x = loadWeights_cifar(final_weights, model)
                Avg_Accuracy = test(model_x, Test_loader)
                mse = mean_squared_error(
                    sample[0:62006],
                    Variable(torch.FloatTensor(
                        task_samples_copy[i][0:62006])).data.numpy())
                mse_orginal = mean_squared_error(sample[0:62006],
                                                 Actual_task_net_weights[i])
                b = torch.FloatTensor(task_samples_copy[i]).data.numpy()
                b1 = torch.FloatTensor(Actual_task_net_weights[i]).data.numpy()
                COSINE_SIMILARITY = dot(sample[0:62006], b[0:62006]) / (
                    norm(sample[0:62006]) * norm(b[0:62006]))
                COSINE_SIMILARITY_wrt_orginal = dot(
                    sample[0:62006],
                    b1[0:62006]) / (norm(sample[0:62006]) * norm(b1[0:62006]))
                collect_data_1.extend([
                    total, batch_idx, i, mse, mse_orginal, Avg_Accuracy,
                    Actual_Accuracy[i],
                    len(resend), COSINE_SIMILARITY,
                    COSINE_SIMILARITY_wrt_orginal
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                final_dataframe_1 = pd.concat([
                    final_dataframe_1,
                    pd.DataFrame(collect_data_1).transpose()
                ])
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([Avg_Accuracy]),
                         win=win_2,
                         name='Acc_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([mse]),
                         win=win_mse,
                         name='MSE_Skill_' + str(i),
                         update='append')
                vis.line(X=np.array([batch_idx]),
                         Y=np.array([COSINE_SIMILARITY]),
                         win=win_mse_org,
                         name='Cos_Sim_Skill_' + str(i),
                         update='append')  #,opts=options_lgnd)
                if (COSINE_SIMILARITY) >= (0.996):
                    values = values + 1
                # if total<=7:
                #     if (Avg_Accuracy)>=(Actual_Accuracy[i]):
                #         values=values+1
                # else:
                #     if (Avg_Accuracy)>=(Actual_Accuracy[i]):
                #         print("verifying the degrading threshold")
                #         values=values+1
            if values == total:
                print("########## \n Batch id is", batch_idx, "\n#########")
                threshold_batchid.append(batch_idx)
                threshold_net_updates.append((batch_idx * total) +
                                             total_resend)
                break
    final_dataframe_1.columns = [
        'no_of_skills', 'batch_idx', 'skill', 'caluclated_mse',
        'mse_wrt_orginal', 'Accuracy', 'Actual_Accuracy', 'Resend_len',
        'COSINE_SIMILARITY', 'COSINE_SIMILARITY_wrt_orginal'
    ]
    final_dataframe_1.to_hdf(
        'Collected_Data/' + str(len(task_samples_copy)) + '_test_data.hdf',
        'key1')
    stage = stage + 3
    return accuracies
def CAE_AE_TRAIN(shapes,task_samples,iterations):
    names=[]
    global total_update_count
    global running_update_count
    for legend in range(0,len(task_samples)):
        names.append('Loss_Skill_'+str(legend))
    print("Lengend names",names)
    options = dict(fillarea=True,width=400,height=400,xlabel='Iterations',ylabel='Loss',title='CAE_skills'+str(len(task_samples)))
    options_2 = dict(fillarea=True,width=400,height=400,xlabel='Iterations',ylabel='Accuracy',title='CAE_skills'+str(len(task_samples)))
    options_mse = dict(fillarea=True,width=400,height=400,xlabel='Iterations',ylabel='MSE',title='CAE_MSE_skills'+str(len(task_samples)))
    options_mse_org = dict(fillarea=True,width=400,height=400,xlabel='Iterations',ylabel='MSE_Orginal',title='CAE_MSE_WRT_Orginal_skills'+str(len(task_samples)))
    win = vis.line(X=np.array([0]),Y=np.array([0.005]),win='CAE_skills '+str(len(task_samples)),name='CAE_skills'+str(len(task_samples)),opts=options)
    win_2 = vis.line(X=np.array([0]),Y=np.array([0]),win='CAE_Acc_skills '+str(len(task_samples)),name='CAE_skills'+str(len(task_samples)),opts=options_2)
    win_mse = vis.line(X=np.array([0]),Y=np.array([0]),win='CAE_MSE_skills '+str(len(task_samples)),name='CAE_MSE_skills'+str(len(task_samples)),opts=options_mse)
    win_mse_org = vis.line(X=np.array([0]),Y=np.array([0]),win='CAE_MSE_Org_skills '+str(len(task_samples)),name='CAE_MSE_Orgskills'+str(len(task_samples)),opts=options_mse_org)
    global stage
    student_model.train()
    accuracies = np.zeros((iterations,len(task_samples)))
    for i in range(0,len(task_samples)-len(Skill_Mu)):
        Skill_Mu.append([])
    for batch_idx in range(1,iterations):
        randPerm = np.random.permutation(len(task_samples))
        #randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        #print(randPerm,nReps)
        for s in randPerm:
            skill=s
            vae_optimizer.zero_grad()
            data=Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss,con_mse,con_loss,closs_mul_lam = helper_functions.Contractive_loss_function(W, 
                data.view(-1, 21840), recons_x,hidden_representation, lam)
            Skill_Mu[skill]=hidden_representation
            if len(task_samples)>6:
                print("Updating the loss")
                loss=loss*(total_update_count-running_update_count[skill])/total_update_count
            total_update_count=total_update_count+1
            running_update_count[skill]=running_update_count[skill]+1
            loss.backward()
            #options_lgnd = dict(fillarea=True, legend=['Loss_Skill_'+str(skill)])
            vis.line(X=np.array([batch_idx]),Y=np.array([loss.item()]),win=win,name='Loss_Skill_'+str(skill),update='append')#,opts=options_lgnd)
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(batch_idx,loss.data[0],skill ))

        if batch_idx %1==0:
            values=0
            for i in range(0,len(task_samples)):
                mu1=Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample=task_sample.data.numpy().reshape(21840)
                #print('MSE IS',i,mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights=helper_functions.unFlattenNetwork(sample, shapes)
                loadWeights_mnsit(final_weights,model)
                test_loader=RELOAD_DATASET(idx_permute[i])
                mse=mean_squared_error(sample,Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                mse_orginal=mean_squared_error(sample,Actual_task_net_weights[i])
                Avg_Accuracy= test(args, model, device, test_loader)
                accuracies[batch_idx,i] = Avg_Accuracy
                if len(task_samples)>6:
                    if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]):
                        values=values+1
                else:
                    if round(Avg_Accuracy+0.5)>=int(Actual_Accuracy[i]-1):
                        values=values+1
                vis.line(X=np.array([batch_idx]),Y=np.array([Avg_Accuracy]),win=win_2,name='Acc_Skill_'+str(i),update='append')
                vis.line(X=np.array([batch_idx]),Y=np.array([mse]),win=win_mse,name='MSE_Skill_'+str(i),update='append')
                vis.line(X=np.array([batch_idx]),Y=np.array([mse_orginal]),win=win_mse_org,name='MSE_Org_Skill_'+str(i),update='append')#,opts=options_lgnd)
            if values==len(task_samples):
                print("########## \n Batch id is",batch_idx,"\n#########")
                threshold_batchid.append(batch_idx)
                break
    stage=stage+1
    return accuracies
Пример #13
0
def CAE_AE_TRAIN(shapes, task_samples, iterations):
    batch_idx = 0
    global stage
    encoded_task = []

    global running_update_counts
    global total_update_count
    #task_samples=task_samples.to(device)
    student_model.train()
    final_dataframe = pd.DataFrame()
    final_dataframe_1 = pd.DataFrame()
    for i in range(0, len(task_samples) - len(Skill_Mu)):
        Skill_Mu.append([])
    # for batch_idx in range(1,iterations):
    while True:

        batch_idx += 1
        train_loss = 0
        randPerm = np.random.permutation(len(task_samples))
        # adjust_learning_rate(vae_optimizer, batch_idx)
        #print("after lr change",vae_optimizer)
        # randPerm,nReps = helper_functions.biased_permutation(stage+1,nBiased,trainBias,len(task_samples),minReps)
        print(randPerm)
        #sys.exit()
        for s in randPerm:
            collect_data = []
            skill = s  #randint(0,len(task_samples)-1)
            vae_optimizer.zero_grad()
            # F_s = fisher_matrix[s]
            data = Variable(torch.FloatTensor(task_samples[skill])).to(device)
            hidden_representation, recons_x = student_model(data)
            W = student_model.state_dict()['fc2.weight']
            loss, con_mse, con_loss, closs_mul_lam = helper_functions.Contractive_loss_function(
                W, data.view(-1, 21432), recons_x, hidden_representation, lam)
            # loss,con_mse,con_loss,closs_mul_lam = helper_functions.Contractive_FMSE(W, data.view(-1, 21432), recons_x,hidden_representation, lam, F_s)
            # loss = loss*((total_update_count-running_update_counts[s])/total_update_count)
            total_update_count += 1
            running_update_counts[s] += 1
            Skill_Mu[skill] = hidden_representation
            loss.backward()
            vae_optimizer.step()
            print('Train Iteration: {},tLoss: {:.6f},picked skill {}'.format(
                batch_idx, loss.data[0], skill))

        # if batch_idx %1==0:
        if True:

            # accuray_threshlod=[]
            values = 0
            for i in range(0, len(task_samples)):
                # Avg_Accuracy=0
                #model=models.Net()
                collect_data_1 = []
                mu1 = Skill_Mu[i][0]
                task_sample = student_model.decoder(mu1).cpu()
                sample = task_sample.data.numpy().reshape(21432)
                #print('MSE IS',mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy()))
                #mse=mean_squared_error(task_sample.data.numpy(),Variable(torch.FloatTensor(task_samples[i])).data.numpy())
                final_weights = helper_functions.unFlattenNetwork(
                    sample, shapes)
                loadWeights_mnsit(final_weights, model)
                if i % 2 == 0:
                    load_individual_class([i], [1, 3, 5, 7])
                else:
                    load_individual_class([i], [0, 2, 4, 6])
                Avg_Accuracy = test(args, model, device, test_loader)
                if round(Avg_Accuracy + 0.5) >= int(Actual_Accuracy[i]):
                    values = values + 1
            if values == len(task_samples):
                print("########## \n Batch id is", batch_idx, "\n#########")
                break
    stage = stage + 1