Пример #1
0
def FSCloss(data, mu):
    bs = shape(data)[0]
    pd_data = torch.pow(torch.cdist(data.detach(), data.detach()), 2)
    pd_mu = torch.pow(
        torch.cdist(mu, mu, compute_mode='donot_use_mm_for_euclid_dist'), 2)
    ad_data = torch.sum(pd_data.detach()) / (bs**2)
    ad_mu = torch.sum(pd_mu) / (bs**2)
    wtf = torch.pow(
        torch.log(1 + pd_data / ad_data) - torch.log(1 + pd_mu / ad_mu), 2)
    loss = torch.sum(wtf) / bs
    return loss
Пример #2
0
def Grad_CAM(vae, model_dict, key, pp_mode=True):

    if pp_mode:
        gradcam = GradCAMpp(model_dict)
    else:
        gradcam = GradCAM(model_dict)

    src_loader = torch.utils.data.DataLoader(vae.dataset,
                                             batch_size=1,
                                             shuffle=False)
    # test mode
    for i, (data, label) in enumerate(src_loader):
        pre_class, mask = gradcam(data, label)
        data = data.detach().clone().numpy()
        data = (data[0] - np.min(data[0])) / (np.max(data[0]) -
                                              np.min(data[0]))
        mask = mask.detach().clone().numpy()

        fig, ax = plt.subplots(3, 1, figsize=(16, 12))
        ax[0].set_title('L')
        ax[1].set_title('C')
        ax[2].set_title('R')
        for j in range(3):
            ax[j].plot(range(len(mask[0])), mask[0], label='Grad_CAM')
            ax[j].plot(range(len(data[j])), data[j], label='input_data')
        plt.tight_layout()
        if pp_mode:
            plt.savefig('../result/VAE-score/' + key + '/Grad_CAMpp/' +
                        vae.dataset.filenames[i].split('.csv')[0] + '.png')
        else:
            plt.savefig('../result/VAE-score/' + key + '/Grad_CAM/' +
                        vae.dataset.filenames[i].split('.csv')[0] + '.png')

        plt.close()
Пример #3
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    # Process input_data to be sent the model.
    words_data_X = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, words_data_X)

    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    model.eval()
    output = np.squeeze(model(data.detach()))

    if output > 0.5:
        result = 1
    else:
        result = 0

    return result
Пример #4
0
 def transformer(data):
     target = data.detach().cpu().numpy().copy() - image_min_value
     for i in range(0, data.shape[0]):
         image = target[i, :].reshape((image_wh, image_wh))
         transformed = r(PIL.Image.fromarray(image))
         transformed = np.array(transformed)
         target[i, :] = transformed.reshape((1, image_wh * image_wh))
     return torch.from_numpy(target + image_min_value).to(data.device)
Пример #5
0
def pc2open3d(data):
	if torch.is_tensor(data): data = data.detach().cpu().numpy()
	if len(data.shape) == 2:
		pc = o3d.geometry.PointCloud()
		pc.points = o3d.utility.Vector3dVector(data)
		return pc
	else:
		print("Error in the shape of data given to Open3D!, Shape is ", data.shape)
Пример #6
0
def test(model, num, epoch, data, target):

    net.eval()

    test_loss = 0
    correct = 0

    loss = nn.MSELoss()

    data = Variable(torch.Tensor(data.reshape(data.shape[0], -1, 1)),
                    requires_grad=True)
    target = Variable(torch.Tensor(target.reshape(target.shape[0], -1, 1)),
                      requires_grad=True)
    output = model(data)
    test_loss += abs(loss(target, output).data.item())
    pred = output.data

    grah_x = []
    grah_y = []
    grah_p = []
    #print( data )
    #print( pred )
    #print( target )
    for i in range(len(pred)):
        for j in range(len(pred[i])):
            if np.abs(pred[i][j] - target.data[i][j]) < 0.01:
                correct += 1
        #correct += pred.eq(target.data).sum()
    total = len(pred) * len(pred[0])
    pred = np.squeeze(np.array(pred))
    #print(target)

    test_loss /= total
    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, total, 100. * correct / total))
    if num == (epoch - 1):

        plt.plot(np.squeeze(data.detach().numpy()),
                 np.squeeze(target.detach().numpy()),
                 label='Target')
        plt.plot(np.squeeze(data.detach().numpy()),
                 np.array(pred),
                 label='training')
        plt.legend()
        plt.show()
    def random(self, data, rndness, vel=40):
        # no grad, completely random attack
        factor = torch.full((400, 128), rndness)
        rndarray = torch.bernoulli(factor).to(self.device)
        perturbed_input = data.detach().clone()  # copy data
        perturbed_input[0][1] = data[0][1] + vel * rndarray

        perturbed_input = torch.clamp(perturbed_input, min=0, max=128)
        return perturbed_input
    def notes_by_col(self, data, data_grad, notes):
        pos_data_grad = torch.clamp(data_grad, min=0)  # positive values
        perturbed_input = data.detach().clone()  # copy data
        nonzero_x = torch.unique(torch.nonzero(perturbed_input[0][1]))

        for column in nonzero_x:  # nonzero column
            idx = torch.topk(pos_data_grad[0][1][column], k=notes,
                             dim=0)[1]  # top k gradients
            perturbed_input[0][1][column][idx] += 70

        perturbed_input = torch.clamp(perturbed_input, min=0, max=128)
        return perturbed_input
    def extract_data(self, model):
        np_data = []
        for info in tqdm(self.img_label_list):
            image_path, label_name = info.split(' ')
            #img = self.loader(os.path.join(self.root, image_path))
            img = Image.open(os.path.join(self.root, image_path))

            data = model(self.test_transform(img).to(self.device).unsqueeze(0))

            np_data.append(data.detach().cpu().numpy())
        np.save('MS1M', np.array(np_data))
        print(np_data)
    def chord_attack(self, data, data_grad, dur, vel=40):
        # gpu tensor to cpu numpy
        data1 = data.detach().cpu().clone().numpy()
        data_grad1 = data_grad.detach().cpu().clone().numpy()

        chords = Detector(data1, dur).run()
        signs = np.sign(data_grad1)
        pos_signs = np.where(signs < 0.0, 0.0, signs)
        perturbed_input = data1 + np.multiply(chords, pos_signs * vel)

        # cpu numpy to gpu tensor
        perturbed_input = torch.tensor(perturbed_input,
                                       dtype=torch.float).to(self.device)
        return torch.clamp(perturbed_input, min=0, max=128)
Пример #11
0
    def vis_attn_maps(data, Ms, n, tag, step):
        """
        Visualize the attention maps with original images

        Args:
            data (Tensor): the input data
            Ms (Tensor): the output attention maps
            n (int): number of visualized examples
            tag (str): tag for tensorboard
            step (int): global step
        """
        if n >= data.shape[0]:
            data_vis = data.detach().cpu().numpy()
            maps_vis = Ms.detach().cpu().numpy()
        else:
            data_vis = data.detach().cpu().numpy()[:n]
            maps_vis = Ms.detach().cpu().numpy()[:n]
        data_vis = images_denorm(data_vis)
        # transpose to numpy image format
        data_vis = data_vis.transpose((0, 2, 3, 1))
        attn_fig = plot_attn_maps(data_vis, maps_vis)
        writer.add_figure(tag, attn_fig, step)
        image_fig = plot_attn_maps_or_images(data_vis, maps_vis)
        writer.add_figure(tag, image_fig, step)
def symbolic_fgs(data, grad, eps=0.3, clipping=True):
    '''
    FGSM attack.
    '''
    # signed gradien
    normed_grad = grad.detach().sign()

    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = data.detach() + scaled_grad
    if clipping:
        adv_x = torch.clamp(adv_x, 0, 1)
    return adv_x
    def melody_no_change(self, data, data_grad, dur, vel=40):
        data1 = data.detach().cpu().clone().numpy()
        data_grad1 = data_grad.detach().cpu().clone().numpy()

        melody_np = self.last_nonzero(data1[0][1], axis=1)
        melody_np = melody_np.squeeze()
        chords = Detector(data1, dur).run()
        for time, melody_note in enumerate(melody_np):
            if melody_note == -1:
                continue
            chords[0, 1, time, melody_note + 1:] = 0
        signs = np.sign(data_grad1)
        pos_signs = np.where(signs < 0.0, 0.0, signs)
        perturbed_input = data1 + np.multiply(chords, pos_signs * vel)

        # cpu numpy to gpu tensor
        perturbed_input = torch.tensor(perturbed_input,
                                       dtype=torch.float).to(self.device)
        return torch.clamp(perturbed_input, min=0, max=128)
Пример #14
0
def get_predictions(df, idxs=(0, -1)):
    data = torch.tensor(df[idxs[0]:idxs[1]].values, dtype=torch.float)
    pred = model(data).detach().numpy()
    data = data.detach().numpy()

    data_df = pd.DataFrame(data, columns=df.columns)
    pred_df = pd.DataFrame(pred, columns=df.columns)

    # Unnormalize
    unnormalized_data_df = utils.custom_unnormalize(data_df)
    unnormalized_pred_df = utils.custom_unnormalize(pred_df)

    # Handle variables with discrete distributions
    unnormalized_pred_df['N90Constituents'] = unnormalized_pred_df['N90Constituents'].round()
    uniques = unnormalized_data_df['ActiveArea'].unique()
    utils.round_to_input(unnormalized_pred_df, uniques, 'ActiveArea')

    data = unnormalized_data_df.values
    pred = unnormalized_pred_df.values

    return data, pred, unnormalized_data_df, unnormalized_pred_df
Пример #15
0
def run_fid(data, sample):
    assert data.max() <=1 and  data.min() >= 0
    assert sample.max() <=1 and  sample.min() >= 0
    data = 2*data - 1
    if data.shape[1] == 1:
        data = data.repeat(1,3,1,1)
    data = data.detach()      
    with torch.no_grad():
        iss, _, _, acts_real = inception_score(data, cuda=True, batch_size=32, resize=True, splits=10, return_preds=True)
    sample = 2*sample - 1
    if sample.shape[1] == 1:
        sample = sample.repeat(1,3,1,1)
    sample = sample.detach()

    with torch.no_grad():
        issf, _, _, acts_fake = inception_score(sample, cuda=True, batch_size=32, resize=True, splits=10, return_preds=True)
    # idxs_ = np.argsort(np.abs(acts_fake).sum(-1))[:1800] # filter the ones with super large values
    # acts_fake = acts_fake[idxs_]
    m1, s1 = calculate_activation_statistics(acts_real)
    m2, s2 = calculate_activation_statistics(acts_fake)
    fid_value = calculate_frechet_distance(m1, s1, m2, s2)
    return fid_value
Пример #16
0
def IMU_Grad_CAM(vae, model_dict, key, save_path, pp_mode=True):

    if pp_mode:
        gradcam = GradCAMpp(model_dict)
    else:
        gradcam = GradCAM(model_dict)

    src_loader = torch.utils.data.DataLoader(vae.dataset,
                                             batch_size=1,
                                             shuffle=False)
    # test mode
    for i, (data, label) in enumerate(src_loader):
        pre_class, mask = gradcam(data, label)
        data = data.detach().clone().numpy()
        data = (data[0] - np.min(data[0])) / (np.max(data[0]) -
                                              np.min(data[0]))
        mask = mask.detach().clone().numpy()

        fig, ax = plt.subplots(3, 3, figsize=(24, 24))
        for j in range(9):
            ax[j % 3][j // 3].set_title(vae.dataset.columns[j])
            ax[j % 3][j // 3].plot(range(len(mask[0])),
                                   mask[0],
                                   label='Grad_CAM')
            ax[j % 3][j // 3].plot(range(len(data[j])),
                                   data[j],
                                   label='input_data')
        plt.tight_layout()

        if pp_mode:
            plt.savefig(save_path + '/Grad_CAMpp/' +
                        vae.dataset.filenames[i].split('.csv')[0] + '.png')
        else:
            plt.savefig(save_path + '/Grad_CAM/' +
                        vae.dataset.filenames[i].split('.csv')[0] + '.png')

        plt.close()
Пример #17
0
        set_label = set_label[index]

        # obtain the output label of T
        with torch.no_grad():
            # outputs = original_net(data)
            if opt.dataset == 'azure':
                outputs = cal_azure_proba(clf, data)
                label = cal_azure(clf, data)
            else:
                outputs = original_net(data)
                _, label = torch.max(outputs.data, 1)
                outputs = F.softmax(outputs, dim=1)
            # _, label = torch.max(outputs.data, 1)
        # print(label)

        output = netD(data.detach())
        prob = F.softmax(output, dim=1)
        # print(torch.sum(outputs) / 500.)
        errD_prob = mse_loss(prob, outputs, reduction='mean')
        errD_fake = criterion(output, label) + errD_prob * opt.beta
        D_G_z1 = errD_fake.mean().item()
        errD_fake.backward()

        errD = errD_fake
        optimizerD.step()

        del output, errD_fake

        ############################
        # (2) Update G network:
        ###########################
Пример #18
0
 def export_tensor(self, rel_path: str, data: torch.Tensor):
     path = os.path.join(self.helper.dirs.export, rel_path + ".pth")
     os.makedirs(os.path.dirname(path), exist_ok=True)
     torch.save(data.detach().cpu().numpy(), path)
Пример #19
0
    gnet.zero_grad()
    data = torch.from_numpy(data).cuda()# pick data
    data_old = Variable(data)
    _,f1,_ = enet(data_old.detach()) # pick feature map
    g_f1 = model.get_feature(f1,feature_d, batch_size)
    g_sampler = Variable((torch.randn([batch_size,noise_d,hidden_d,hidden_d])-0.2)*0.5).cuda()
    g_f1_output = gnet(torch.cat([g_f1,g_sampler],1)).detach() # generate data
    # c_v = model.set_condition(g_f1.data, 1,batch_size=batch_size)
    c_v = Variable(torch.from_numpy(model.set_label_ve(label).astype('int'))).cuda()  # d_false_decision = dnet(torch.cat([g_f1_output, c_v], 1))# get false decision
    d_false_decision = dnet(torch.cat([g_f1_output,c_v],1))# get false decision

    d_false_error = criterion(d_false_decision, Variable(torch.zeros(batch_size)).cuda())

    data, label = mm.batch_next(batch_size, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], shuffle=True)
    data = Variable(torch.from_numpy(data).cuda())
    _, f1, _ = enet(data.detach())
    g_f1 = model.get_feature(f1,feature_d,batch_size)
    # c_v = model.set_condition(g_f1.data, 1, batch_size = batch_size)
    c_v = Variable(torch.from_numpy(model.set_label_ve(label).astype('float32'))).cuda()  # d_false_decision = dnet(torch.cat([g_f1_output, c_v], 1))# get false decision  # d_real_decision = dnet(torch.cat([data, c_v], 1))# get right decision
    d_real_decision = dnet(torch.cat([data,c_v],1))# get right decision
    d_real_error = criterion(d_real_decision,Variable(torch.ones(batch_size)).cuda())
    # D for fake data
    error = d_real_error + d_false_error
    error.backward()
    d_net_optimizer.step()

    # G Part
    dnet.zero_grad()
    gnet.zero_grad()
    # gnet_noise.zero_grad()
    data, label = mm.batch_next(batch_size,[0,1,2,3,4,5,6,7,8,9],shuffle=True)
Пример #20
0
        model = module(nodes)
        learn = basic_train.Learner(
            data=db,
            model=model,
            loss_func=loss_func,
            true_wd=True,
            bn_wd=False,
        )
        learn.model_dir = grid_search_folder + model_folder + '/' + 'models/'
        learn.load(saved_model_fname)
        learn.model.eval()

        idxs = (0, -1)  # Pick events to compare
        data = torch.tensor(test[idxs[0]:idxs[1]].values, dtype=torch.float)
        pred = model(data).detach().numpy()
        data = data.detach().numpy()

        data_df = pd.DataFrame(data, columns=test.columns)
        pred_df = pd.DataFrame(pred, columns=test.columns)

        # Unnormalize
        unnormalized_data_df = utils.custom_unnormalize(data_df)
        unnormalized_pred_df = utils.custom_unnormalize(pred_df)

        # Handle variables with discrete distributions
        unnormalized_pred_df['N90Constituents'] = unnormalized_pred_df[
            'N90Constituents'].round()
        uniques = unnormalized_data_df['ActiveArea'].unique()
        utils.round_to_input(unnormalized_pred_df, uniques, 'ActiveArea')

        data = unnormalized_data_df.values
Пример #21
0
        loss.backward()
        optimizer.step()
        total_iter += 1
        if total_iter % record_pnt == 0:
            mean_loss = running_loss / float(record_pnt)
            print(mean_loss)
            running_loss = 0.

            model.eval()
            with torch.no_grad():
                samples, _ = add_augment(model.sample(100), reverse=True)
                samples = samples.to("cpu").numpy()
                plt.scatter(samples[:, 0], samples[:, 1], s=5)

                x = data.detach().to(device)
                y, _ = model(x)
                x, _ = add_augment(x, reverse=True)
                y, _ = add_augment(y, reverse=True)
                x, y = x.to("cpu").numpy(), y.to("cpu").numpy()
                plt.scatter(y[:, 0], y[:, 1], s=5)
                plt.scatter(x[:, 0], x[:, 1], s=5)
                # x = samples[:,0]
                # y = samples[:,1]
                # k = kde.gaussian_kde([x,y])
                # xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
                # zi = k(np.vstack([xi.flatten(), yi.flatten()]))
                # plt.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=plt.cm.Greens_r)

                fig_filename = os.path.join(dirname, "moon" + str(total_iter))
                plt.savefig(fig_filename)
Пример #22
0
if __name__ == "__main__":

    WORKER_SIZE = 2
    BATCH_SIZE = 20

    kwargs = {'num_workers': WORKER_SIZE, 'pin_memory': True}
    train_loader = torch.utils.data.DataLoader(OPLoader(
        WORKER_SIZE, BATCH_SIZE),
                                               batch_size=WORKER_SIZE,
                                               shuffle=False,
                                               **kwargs)

    for batch_idx, (data, label) in enumerate(train_loader):
        data = data.flatten(0, 1)
        label = label.flatten(0, 1)

        time.sleep(2)
        print("BatchIDX: " + str(batch_idx), data.shape, label.shape)

        for i in range(0, data.shape[0]):
            img_viz = data.detach().cpu().numpy().copy()[i, 0, :, :]
            cv2.putText(img_viz, str(batch_idx), (0, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
            cv2.imshow("img", img_viz + 0.5)
            cv2.waitKey(15)
            break

        #break

        pass
Пример #23
0
d_in_demension = 1
# dnet = model.D_Net_conv(ngpu,d_in_demension).cuda()

g_net_optimizer = optim.Adam(gnet.parameters(), lr=1e-4)
# d_net_optimizer = optim.Adam(dnet.parameters(), lr = 1e-5)

num_epoches = 10
check_points = 5000
criterion = nn.BCELoss()

for batch_index, (data, label) in enumerate(train_loader):
    # dnet.zero_grad()
    data, label = data.cuda(), label.cuda()
    data, label = Variable(data), Variable(label)
    _, f1 = enet(data.detach())
    # print(np.shape(owntool.extract(f1)))
    g_f1 = f1.cuda()

    # D Part
    # g_sampler = Variable(torch.randn([batch_size,1,hidden_d,hidden_d])).cuda()

    zeroinput = Variable(torch.zeros([batch_size, 1, hidden_d,
                                      hidden_d])).cuda()
    g_f1_output = gnet(torch.cat([g_f1, zeroinput], 1))

    g_sampler2 = Variable(torch.randn([batch_size, 1, hidden_d,
                                       hidden_d])).cuda()
    g_f1_output1 = gnet(torch.cat([g_f1, g_sampler2], 1))

    g_sampler3 = Variable(torch.randn([batch_size, 1, hidden_d,
Пример #24
0
    n_hist_pred, _, _ = plt.hist(pred[:, kk], color=colors[0], label='Output', alpha=alph, bins=bin_edges)
    plt.suptitle(train_x.columns[kk])
    plt.xlabel(variable_list[kk] + ' ' + unit_list[kk])
    plt.ylabel('Number of events')
    # ms.sciy()
    plt.yscale('log')
    plt.legend()
    fig_name = 'hist_%s' % train.columns[kk]
    plt.savefig(curr_save_folder + fig_name)

# Residuals
residual_strings = [r'$(p_{T,recon} - p_{T,true}) / p_{T,true}$',
                    r'$(\eta_{recon} - \eta_{true}) / \eta_{true}$',
                    r'$(\phi_{recon} - \phi_{true}) / \phi_{true}$',
                    r'$(E_{recon} - E_{true}) / E_{true}$']
residuals = (pred.detach().numpy() - data.detach().numpy()) / data.detach().numpy()
range = (-.1, .1)
#range=None
for kk in np.arange(len(test.keys())):
    plt.figure()
    n_hist_pred, bin_edges, _ = plt.hist(
        residuals[:, kk], label='Residuals', linestyle=line_style[0], alpha=alph, bins=200, range=range)
    plt.suptitle('Residuals of %s' % train.columns[kk])
    plt.xlabel(residual_strings[kk])  # (train.columns[kk], train.columns[kk], train.columns[kk]))
    plt.ylabel('Number of jets')
    ms.sciy()
    # plt.yscale('log')
    # rms = utils.nanrms(residuals[:, kk])
    std = np.std(residuals[:, kk])
    std_err = utils.std_error(residuals[:, kk])
    mean = np.nanmean(residuals[:, kk])
Пример #25
0
def sample(model, n=125000):
    noise = nr.randn(n, 10)
    data = model.decode(torch.Tensor(noise))
    data = data.detach().numpy()
    return data
Пример #26
0
criterion = nn.BCELoss()

for epoch in (range(1, num_epoches)):
    # print("give me a clue")
    data, label = mnist.train.next_batch(batch_size)
    dnet.zero_grad()
    gnet.zero_grad()

    data = Variable(torch.from_numpy(data)).cuda()
    data.data.resize_(batch_size, 1, 28, 28)

    # data, label = Variable(data), Variable(label)
    c_v = Variable(
        torch.from_numpy(model.set_label_ve(label).astype('float32'))).cuda()

    _, _, f2 = enet(data.detach())
    # f2 = torch.mean(f2,1)
    # print(np.shape(owntool.extract(f2)))
    g_f2 = f2.cuda()

    # D Part
    g_sampler = Variable(torch.randn([batch_size, 1, hidden_d,
                                      hidden_d])).cuda()
    # print(g_sampler.data.tolist()[0][0][0][0])
    g_f2_output = gnet(torch.cat([g_f2, g_sampler], 1)).detach()
    d_real_decision = dnet(data)
    d_real_error = criterion(d_real_decision,
                             Variable(torch.ones(batch_size)).cuda())
    # D for fake data
    d_false_decision = dnet(g_f2_output)
    d_false_error = criterion(d_false_decision,
        for inst in range(len(data)):

            cm = met.confusion_matrix(lbl_num[inst,0,:,:].flatten(), clas[inst,0,:,:].flatten())

            TP = cm[1][1]
            FP = cm[0][1]
            FN = cm[1][0]
            TN = cm[0][0]

            Recall.append(TP / (TP + FN))
            Precision.append(TP / (TP + FP))
            F1.append((2 * TP) / (2 * TP + FP + FN))        

            plt.figure(figsize = [7,7])        # viualization + final image saving 
            plt.imshow(data.detach().cpu()[inst,0,:,:] ,'gray')
            plt.title('Image')
            plt.show()
            # # filename = '/content/drive/MyDrive/UNET/Output_images/Final/Orig_{}.png'.format(inst+1)
            # # cv2.imwrite(filename, np.uint8(data.detach().cpu()[inst,0,:,:]))

            plt.figure(figsize = [7,7])
            plt.imshow(lbl_num[inst,0,:,:] ,'gray')
            plt.title('Label')
            plt.show()
            # # filename = '/content/drive/MyDrive/UNET/Output_images/Final/Mask_{}.png'.format(inst+1)
            # # cv2.imwrite(filename, np.uint8(lbl_num[inst,0,:,:])*255)

            plt.figure(figsize = [7,7])
            plt.imshow(clas[inst,0,:,:], 'gray')
            plt.title('Output')
def make_plots(model, train_x, train_y, test_x, test_y, curr_save_folder, model_name):
  unit_list = ['[GeV]', '[rad]', '[rad]', '[GeV]']
  variable_list = [r'$p_T$', r'$\eta$', r'$\phi$', r'$E$']
  line_style = ['--', '-']
  colors = ['orange', 'c']
  markers = ['*', 's']

  model.to('cpu')

  # Histograms
  idxs = (0, 100000)  # Choose events to compare
  data = torch.tensor(test_x[idxs[0]:idxs[1]].values, dtype = torch.float)
  pred = model(data).detach().numpy()
  pred = np.multiply(pred, train_x.std().values)
  pred = np.add(pred, train_x.mean().values)
  data = np.multiply(data, train_x.std().values)
  data = np.add(data, train_x.mean().values)

  alph = 0.8
  n_bins = 50
  for kk in np.arange(4):
      plt.figure(kk + 4)
      n_hist_data, bin_edges, _ = plt.hist(data[:, kk], color=colors[1], label='Input', alpha=1, bins=n_bins)
      n_hist_pred, _, _ = plt.hist(pred[:, kk], color=colors[0], label='Output', alpha=alph, bins=bin_edges)
      plt.suptitle(train_x.columns[kk])
      plt.xlabel(variable_list[kk] + ' ' + unit_list[kk])
      plt.ylabel('Number of events')
      ms.sciy()
      # plt.yscale('log')
      plt.legend()
      fig_name = model_name + '_hist_%s' % train_x.columns[kk]
      plt.savefig(curr_save_folder + fig_name)


  residual_strings = [r'$(p_{T,out} - p_{T,in}) / p_{T,in}$',
                          r'$(\eta_{out} - \eta_{in}) / \eta_{in}$',
                          r'$(\phi_{out} - \phi_{in}) / \phi_{in}$',
                          r'$(E_{out} - E_{in}) / E_{in}$']
  residuals = (pred - data.detach().numpy()) / data.detach().numpy()
  range = (-.02, .02)
  for kk in np.arange(4):
      plt.figure()
      n_hist_pred, bin_edges, _ = plt.hist(
          residuals[:, kk], label='Residuals', linestyle=line_style[0], alpha=alph, bins=100, range=range)
      plt.suptitle('Residuals of %s' % train_x.columns[kk])
      plt.xlabel(residual_strings[kk])  # (train_x.columns[kk], train_x.columns[kk], train_x.columns[kk]))
      plt.ylabel('Number of jets')
      ms.sciy()
      #plt.yscale('log')
      std = np.std(residuals[:, kk])
      std_err = utils.std_error(residuals[:, kk])
      mean = np.nanmean(residuals[:, kk])
      sem = stats.sem(residuals[:, kk], nan_policy='omit')
      ax = plt.gca()
      plt.text(.75, .8, 'Mean = %f$\pm$%f\n$\sigma$ = %f$\pm$%f' % (mean, sem, std, std_err), bbox={'facecolor': 'white', 'alpha': 0.7, 'pad': 10},
              horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=18)
      fig_name = model_name + '_residual_%s' % train_x.columns[kk]
      plt.savefig(curr_save_folder + fig_name)

  res_df = pd.DataFrame({'pt': residuals[:, 0], 'eta': residuals[:, 1], 'phi': residuals[:, 2], 'E': residuals[:, 3]})
  save = True

  # Generate a custom diverging colormap
  cmap = sns.diverging_palette(10, 220, as_cmap=True)
  #cmap = 'RdBu'
  norm = mpl.colors.Normalize(vmin=-1, vmax=1, clip=False)
  mappable = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)

  group = ['pt', 'eta', 'phi', 'E']

  label_kwargs = {'fontsize': 20}
  title_kwargs = {"fontsize": 11}
  mpl.rcParams['lines.linewidth'] = 1
  mpl.rcParams['xtick.labelsize'] = 12
  mpl.rcParams['ytick.labelsize'] = 12
  group_arr = res_df.values
  corr = res_df.corr()
  qs = np.quantile(group_arr, q=[.0025, .9975], axis=0)
  ndim = qs.shape[1]
  ranges = [tuple(qs[:, kk]) for kk in np.arange(ndim)]
  figure = corner(group_arr, range=ranges, plot_density=True, plot_contours=True, no_fill_contours=False, #range=[range for i in np.arange(ndim)],
                  bins=50, labels=group, label_kwargs=label_kwargs, #truths=[0 for kk in np.arange(qs.shape[1])],
                  show_titles=True, title_kwargs=title_kwargs, quantiles=(0.16, 0.84),
                  # levels=(1 - np.exp(-0.5), .90), fill_contours=False, title_fmt='.2e')
                  levels=(1 - np.exp(-0.5), .90), fill_contours=False, title_fmt='.1e')

  # # Extract the axes
  axes = np.array(figure.axes).reshape((ndim, ndim))
  # Loop over the diagonal
  linecol = 'r'
  linstyl = 'dashed'
  # Loop over the histograms
  for yi in np.arange(ndim):
      for xi in np.arange(yi):
          ax = axes[yi, xi]
          # Set face color according to correlation
          ax.set_facecolor(color=mappable.to_rgba(corr.values[yi, xi]))
  cax = figure.add_axes([.87, .4, .04, 0.55])
  cbar = plt.colorbar(mappable, cax=cax, format='%.1f', ticks=np.arange(-1., 1.1, 0.2))
  cbar.ax.set_ylabel('Correlation', fontsize=20)

  if save:
      fig_name = 'corner_3d.png'
      plt.savefig(curr_save_folder + fig_name)
Пример #29
0
                    plt.ylabel('Number of events')
                    # ms.sciy()
                    plt.yscale('log')
                    plt.legend()
                    fig_name = 'hist_%s' % train.columns[kk]
                    plt.savefig(curr_save_folder + fig_name)

                # Residuals
                residual_strings = [
                    r'$(p_{T,recon} - p_{T,true}) / p_{T,true}$',
                    r'$(\eta_{recon} - \eta_{true}) / \eta_{true}$',
                    r'$(\phi_{recon} - \phi_{true}) / \phi_{true}$',
                    r'$(E_{recon} - E_{true}) / E_{true}$'
                ]
                residuals = (pred -
                             data.detach().numpy()) / data.detach().numpy()
                range = (-.1, .1)
                #range=None
                for kk in np.arange(len(test.keys())):
                    plt.figure()
                    n_hist_pred, bin_edges, _ = plt.hist(
                        residuals[:, kk],
                        label='Residuals',
                        linestyle=line_style[0],
                        alpha=alph,
                        bins=200,
                        range=range)
                    plt.suptitle('Residuals of %s' % train.columns[kk])
                    plt.xlabel(
                        residual_strings[kk]
                    )  # (train.columns[kk], train.columns[kk], train.columns[kk]))
Пример #30
0
    def __getitem__(self, index):
        path = self.paths[index]
        all_data = np.load(path)
        sequence = all_data['images'][:self.required_length]
        names = all_data['names'][:self.required_length].tolist()
        return torch.from_numpy(sequence), names

    def __len__(self):
        return len(self.paths)


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(
        description='PyTorch Magnetic field Prediction Model')
    parser.add_argument('--input_length', type=int, default=12)
    parser.add_argument('--predict_length', type=int, default=6)
    parser.add_argument('--total_length', type=int, default=24)
    args = parser.parse_args()

    path = '../datasets/train'
    dataloader = DataLoader(dataset=SunspotData(path, configs=args),
                            num_workers=0,
                            batch_size=2,
                            shuffle=True)
    for i in range(5):
        print('======> epoch %d' % i)
        for j, data in enumerate(dataloader):
            print(data.detach().cpu().numpy())