コード例 #1
0
def show_reconstruction(model, img, n_images, args, num):

    model.eval()
    x = img
    
    _, x_recon = model(x)
    data_o = np.concatenate([x.cpu().data])
    data_r = np.concatenate([x_recon.cpu().data])
    data = np.concatenate([data_r])

    img = combine_images(np.transpose(data_o, [0, 2, 3, 1]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args + str(num)+ "_" +"real.png")


    img = combine_images(np.transpose(data_r, [0, 2, 3, 1]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args + str(num)+ "_" + "recon.png")

    rel = cv2.imread(args + str(num)+ "_" + "real.png", 0)
    mask = cv2.imread(args + str(num)+ "_" + "recon.png", 0)
    #draw_mask_edge_on_image_cv2(rel, mask, str(num))
        
    img = combine_images(np.transpose(data, [0, 2, 3, 1]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args + str(num)+ "_" + "recon.png")
        # print()
        # print('Reconstructed images are saved to %s/real_and_recon_color.png' % args.save_dir)
        # print('-' * 70)
        # plt.figure()
        # plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png", )) #matplot imshow() WILL AUTO CHANGE ONE CHANNEL IMG TO COLOR ONE
    plt.show()
コード例 #2
0
def start_message(message):
    bot.send_message(message.chat.id, 'Привет, ты написал мне /start')
    room_id = message.chat.id
    #bot.send_message(message.chat.id, 'Привет, ты написал мне /start')
    #bot.send_photo(chat_id=room_id, photo=open('images/resized-2C.png', 'rb'))
    player_cards = get_random_cards()
    score = get_score(player_cards)
    bot.send_message(message.chat.id,
                     'Вы набрали: %s!' % score,
                     reply_markup=markup)
    combine_images(player_cards, 'player.png')
    bot.send_photo(chat_id=room_id, photo=open('player.png', 'rb'))
    TEST = 'ffffff'
    print(TEST)
コード例 #3
0
def show_reconstruction(model, test_loader, n_images, args):
    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image
    import numpy as np
    distance = torch.zeros((1, )).cuda()
    adversary = GradientSignAttack(model, loss_fn=caps_loss, eps=0.3)
    model.eval()
    for x, y in test_loader:
        y = torch.zeros(y.size(0), 10).scatter_(1, y.view(-1, 1), 1.)
        x, y = Variable(x.cuda()), Variable(y.cuda())
        x_adv = adversary.perturb(x, args.lam_recon, y)
        _, x_recon = model(x_adv)
        distance = compute_distance(x_adv, x_recon, distance)
        data = np.concatenate([x.cpu().data, x_recon.cpu().data])
        img = combine_images(np.transpose(data, [0, 2, 3, 1]))
        image = img * 255
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                     "/real_and_recon.png")
        print()
        print('Reconstructed images are saved to %s/real_and_recon.png' %
              args.save_dir)
        print('-' * 70)
        plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png", ))
        plt.show()
        break
    distance = distance[1:]
    print(distance)
コード例 #4
0
    def show_reconstruction(self, test_loader, n_images):
        import matplotlib.pyplot as plt
        from utils import combine_images
        from PIL import Image
        import numpy as np

        self.model.eval()
        for x, _ in test_loader:
            if self.use_cuda:
                x = Variable(x[:min(n_images, x.size(0))].cuda(),
                             volatile=True)
            else:
                x = Variable(x[:min(n_images, x.size(0))], volatile=True)
            _, x_recon = self.model(x)
            data = np.concatenate([x.data.cpu(), x_recon.data.cpu()])
            img = combine_images(np.transpose(data, [0, 2, 3, 1]))
            image = img * 255
            Image.fromarray(image.astype(np.uint8)).save(self.config.save_dir +
                                                         "/real_and_recon.png")
            print()
            print('Reconstructed images are saved to %s/real_and_recon.png' %
                  config.save_dir)
            print('-' * 70)
            plt.imshow(plt.imread(config.save_dir + "/real_and_recon.png", ))
            plt.show()
            break
コード例 #5
0
def test_transformer(config, netG, train_iterators, monitor, param_file):

    netG_A2B = netG['netG_A2B']

    train_iterator_src, train_iterator_trg = train_iterators

    # Load boundary image to get Variable shapes
    bod_map_A = train_iterator_src.next()[0]
    bod_map_B = train_iterator_trg.next()[0]
    real_bod_map_A = nn.Variable(bod_map_A.shape)
    real_bod_map_B = nn.Variable(bod_map_B.shape)
    real_bod_map_A.persistent, real_bod_map_B.persistent = True, True

    ################### Graph Construction ####################
    # Generator
    with nn.parameter_scope('netG_transformer'):
        with nn.parameter_scope('netG_A2B'):
            fake_bod_map_B = netG_A2B(
                real_bod_map_A, test=True,
                norm_type=config["norm_type"])  # (1, 15, 64, 64)
    fake_bod_map_B.persistent = True

    # load parameters of networks
    with nn.parameter_scope('netG_transformer'):
        with nn.parameter_scope('netG_A2B'):
            nn.load_parameters(param_file)

    monitor_vis = nm.MonitorImage('result',
                                  monitor,
                                  interval=config["test"]["vis_interval"],
                                  num_images=1,
                                  normalize_method=lambda x: x)

    # Test
    i = 0
    iter_per_epoch = train_iterator_src.size // config["test"]["batch_size"] + 1

    if config["num_test"]:
        num_test = config["num_test"]
    else:
        num_test = train_iterator_src.size

    for _ in range(iter_per_epoch):
        bod_map_A = train_iterator_src.next()[0]
        bod_map_B = train_iterator_trg.next()[0]
        real_bod_map_A.d, real_bod_map_B.d = bod_map_A, bod_map_B

        # Generate fake images
        fake_bod_map_B.forward(clear_buffer=True)

        i += 1

        images_to_visualize = [
            real_bod_map_A.d, fake_bod_map_B.d, real_bod_map_B.d
        ]
        visuals = combine_images(images_to_visualize)
        monitor_vis.add(i, visuals)

        if i > num_test:
            break
コード例 #6
0
def manipulate_latent(model, data, args):
    x_true, y_true = data

    index = np.argmax(y_true, 1) == args.digit
    number = np.random.randint(low=0, high=sum(index) - 1)
    x, y = x_true[index][number], y_true[index][number]
    x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
    noise = np.zeros([1, 10, 16])
    x_recons = []

    # Change params of vect in 0.05 steps. See also [1]
    for dim in range(16):
        for r in [
                -0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25
        ]:
            tmp = np.copy(noise)
            tmp[:, :, dim] = r
            x_recon = model.predict([x, y, tmp])
            x_recons.append(x_recon)

    x_recons = np.concatenate(x_recons)

    img = utils.combine_images(x_recons, height=16)
    image = img * 255
    Image.fromarray(image.astype(
        np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
    print('Manipulated result saved to %s/manipulate-%d.png' %
          (args.save_dir, args.digit))
コード例 #7
0
def test_rotated_images(model, data):
    data2 = load_rotated_mnist()
    x_test, y_test = data2
    print("x_test_shape :", x_test.shape)
    print("y_test_shape :", y_test.shape)
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print("y_pred_test :", y_pred.shape)
    print("x_recon_test :", x_recon.shape)
    print('-' * 50)
    print(
        'Test acc:',
        np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-' * 50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show()
コード例 #8
0
def manipulate_latent(model, data, args):
    print('-' * 30 + 'Begin: manipulate' + '-' * 30)
    x_test, y_test = data
    index = np.argmax(y_test, 1) == args.digit
    number = np.random.randint(low=0, high=sum(index) - 1)
    x, y = x_test[index][number], y_test[index][number]
    x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
    noise = np.zeros([1, 2, 16])  # 10->2
    x_recons = []
    for dim in range(16):
        for r in [
                -0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25
        ]:
            tmp = np.copy(noise)
            tmp[:, :, dim] = r
            x_recon = model.predict([x, y, tmp])
            x_recons.append(x_recon)

    x_recons = np.concatenate(x_recons)

    img = combine_images(x_recons, height=16)
    #    image = img*255
    #    Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)

    #    Image.fromarray(img).save(args.save_dir + '/manipulate-%d.png' % args.digit)
    #    save_images(x_recons,image_manifold_size(x_recons.shape[0]), args.save_dir + '/manipulate-%d.png' % args.digit)

    img_path = args.save_dir + '/manipulate-%d.png' % args.digit
    scipy.misc.imsave(
        img_path,
        img)  # directly save the combned reconstructed  BIGGER image!
    print('manipulated result saved to %s/manipulate-%d.png' %
          (args.save_dir, args.digit))
    print('-' * 30 + 'End: manipulate' + '-' * 30)
コード例 #9
0
def show_reconstruction(model, test_loader, n_images, args):
    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image
    import numpy as np

    model.eval()
    for x, _, _ in test_loader:

        # x = x.float()
        # x = x/256
        x = x.view_as(torch.Tensor(100, 1, 98, 60))

        x = Variable(x[:min(n_images, x.size(0))].cuda(), volatile=True)
        _, x_recon = model(x)
        data = np.concatenate([x.data, x_recon.data])
        img = combine_images(np.transpose(data, [0, 2, 3, 1]))
        image = img * 255
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                     "/real_and_recon.png")
        print()
        print('Reconstructed images are saved to %s/real_and_recon.png' %
              args.save_dir)
        print('-' * 70)
        plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png", ))
        plt.show()
        break
コード例 #10
0
def test(model, data, sample):
    x_test, y_test = data
    #print "x_test shape is: ",x_test.shape
    angle = 0
    sample = sample
    one_hots = np.diag(np.ones(10))
    x_test_new = np.array([x_test[sample] for i in xrange(9)])
    for i in xrange(0,9):
        a = mc.imrotate(x_test[sample,:,:,0],angle)
        x_test_new[i,:,:,0] = a
        angle = angle + 10
    digitcaps, y_pred, x_recon = model.predict([x_test_new, one_hots], batch_size=9)
    #print('-'*50)
    #print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))*1.0/y_test.shape[0])
    #print "shape is ",digitcaps.shape
    label = np.argmax(y_test[sample])
    #print "labels is: ",y_test[sample],"   ",label
    for i in xrange(0,9):
	print np.dot(digitcaps[0,label,:],digitcaps[i,label,:])/(np.linalg.norm(digitcaps[0,label,:])*np.linalg.norm(digitcaps[i,label,:]))
    #print digitcaps[:,label,:]
    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([np.array([x_test_new[0]]),x_recon]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon_fmnist_rot.png")
    #print()
    #print('Reconstructed images are saved to ./real_and_recon_fmnist_rot.png')
    print('-'*50)
コード例 #11
0
def manipulate_latent(model, data, args):
    print('-' * 30 + 'Begin: manipulate' + '-' * 30)
    x_test, y_test = data
    index = np.argmax(y_test, 1) == args.digit
    number = np.random.randint(low=0, high=sum(index) - 1)
    x, y = x_test[index][number], y_test[index][number]
    x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
    noise = np.zeros([1, 10, 16])
    x_recons = []
    for dim in range(16):
        for r in [
                -0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25
        ]:
            tmp = np.copy(noise)
            tmp[:, :, dim] = r
            x_recon = model.predict([x, y, tmp])
            x_recons.append(x_recon)

    x_recons = np.concatenate(x_recons)

    img = combine_images(x_recons, height=16)
    image = img * 255
    Image.fromarray(image.astype(
        np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
    print('manipulated result saved to %s/manipulate-%d.png' %
          (args.save_dir, args.digit))
    print('-' * 30 + 'End: manipulate' + '-' * 30)
コード例 #12
0
ファイル: capsulenet.py プロジェクト: lkolezhuk/LunaCapsNet
def test(model, data, args):
    print('-' * 30 + 'Begin: test' + '-' * 30)

    x_test, y_test = data
    print('Testing on {0} images'.format(len(y_test)))
    print(np.argmax(y_test, axis=1))
    y_pred, x_recon = model.predict(x_test)

    true_lab = []
    pred_lab = []
    TP = 0
    FP = 0
    TN = 0
    FN = 0
    for i in range(0, len(y_pred)):
        t = np.argmax(y_test[i])
        p = np.argmax(y_pred[i])
        print("GT: " + str(t) + " Pred: " + str(p) + "--------- GT: " +
              str(y_test[i]) + " Pred: " + str(y_pred[i]))

        true_lab.append(t)
        pred_lab.append(p)

        if t == p == 1:
            TP += 1
        if p == 1 and t != p:
            FP += 1
        if t == p == 0:
            TN += 1
        if p == 0 and t != p:
            FN += 1
        print("GT: " + str(t) + " Pred: " + str(p) + "--------- GT: " +
              str(y_test[i]) + " Pred: " + str(y_pred[i]))

    from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix

    # conf_mat = confusion_matrix(y_test, y_pred)

    print("Accuracy {0}".format(
        accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1))))

    roc = roc_auc_score(true_lab, pred_lab)
    print("TP: {0}, FP: {1}, TN: {2}, FN: {3}".format(TP, FP, TN, FN))
    print("Accuracy: {0}".format((TP + TN) / (TP + TN + FP + FN)))
    print("AUC ROC : " + str(roc))
    print("F1 score: {0}".format(2 * TP / (2 * TP + FP + FN)))
    print("Sensitivity(TPR): {0}, Specificity(TNR): {1}".format(
        TP / (TP + FN), TN / (FP + TN)))
    print("PPV(Precision): {0}".format(TP / (TP + FP)))
    print("NPV: {0}".format(TN / (TN + FN)))

    img = utils.combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
    rescaled = (255.0 / img.max() * (img - img.min())).astype(np.uint8)
    Image.fromarray(rescaled).save(args.save_dir + "/real_and_recon.png")
    print()
    print('Reconstructed images are saved to %s/real_and_recon.png' %
          args.save_dir)
    print('-' * 30 + 'End: test' + '-' * 30)
def test(model, data, args):

    time_date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
    filename = time_date + '_capsule_network' + '_batch_size=' + str(
        args.batch_size) + '_epochs=' + str(args.epochs) + '_ntr=' + str(
            x_train.shape[0]) + '_nch=' + str(x_train.shape[3])
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-' * 30 + 'Begin: test' + '-' * 30)
    img = combine_images(
        np.concatenate([np.abs(x_test[:50]),
                        np.abs(x_recon[:50])]))
    image = cm.hot(img) * 255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                 "/real_and_recon4.png")
    print('Reconstructed images are saved to %s/real_and_recon4.png' %
          args.save_dir)
    print('-' * 30 + 'End: test' + '-' * 30)

    predicted_classes = y_pred
    predicted_classes = np.asarray(predicted_classes)
    b = np.zeros_like(predicted_classes)
    b[np.arange(len(predicted_classes)), predicted_classes.argmax(1)] = 1
    predicted_classes = np.argmax(np.round(b), axis=1)
    b = np.zeros_like(y_test)
    b[np.arange(len(y_test)), y_test.argmax(1)] = 1
    y_test = np.argmax(np.round(b), axis=1)
    class_names = ['Unresolved', 'FRI', 'FRII']
    from sklearn.metrics import classification_report
    print(
        classification_report(y_test,
                              predicted_classes,
                              target_names=class_names,
                              digits=4))
    os.chdir(args.save_dir)
    filename1 = filename + '_classification_report.txt'
    f = open(filename1, 'w')
    f.write(
        classification_report(y_test,
                              predicted_classes,
                              target_names=class_names,
                              digits=4))

    f = open(filename1, 'w')
    f.write(
        classification_report(y_test,
                              predicted_classes,
                              target_names=class_names,
                              digits=4))

    ############# Architecture

    with open(filename + '_architecture.txt', 'w') as fh:
        model.summary(print_fn=lambda x: fh.write(x + '\n'))

    with open(filename + '_architecture.txt', 'w') as fh:
        model.summary(print_fn=lambda x: fh.write(x + '\n'))
コード例 #14
0
def show_reconstruction(model, test_loader, n_images, args):

    model.eval()
    for x, _ in test_loader:
        x = Variable(x[:min(n_images, x.size(0))].cuda(), volatile=True)
        _, x_recon = model(x)
        print(x.shape)
        data_o = np.concatenate([x.cpu().data])
        data_r = np.concatenate([x_recon.cpu().data])
        data = np.concatenate([data_r])

        img = combine_images(np.transpose(data_o, [0, 2, 3, 1]))
        image = img * 255
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real.png")

        img = combine_images(np.transpose(data_r, [0, 2, 3, 1]))
        image = img * 255
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/recon.png")

        rel = cv2.imread(args.save_dir + "/real.png", 0)
        mask = cv2.imread(args.save_dir + "/recon.png", 0)
        draw_mask_edge_on_image_cv2(rel, mask)
        
        img = combine_images(np.transpose(data, [0, 2, 3, 1]))
        image = img * 255
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/recon.png")
        print()
        print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
        print('-' * 70)
        im_gray = cv2.imread(args.save_dir + "/real_and_recon.png", cv2.IMREAD_GRAYSCALE)
        im_color = cv2.applyColorMap(im_gray, cv2.COLORMAP_HSV) # cv2 applycolorap can change an one-channel to color one
        plt.figure()
        plt.imshow(im_color, cmap='jet_r')
        Image.fromarray(im_color.astype(np.uint8)).save(args.save_dir + "/real_and_recon_color.png")
        # print()
        # print('Reconstructed images are saved to %s/real_and_recon_color.png' % args.save_dir)
        # print('-' * 70)
        # plt.figure()
        # plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png", )) #matplot imshow() WILL AUTO CHANGE ONE CHANNEL IMG TO COLOR ONE
        plt.show()
        break
コード例 #15
0
 def save_output_image(self, samples, image_name):
     """
     Visualizing and saving images in the .png format 
     :param samples: images to be visualized
     :param image_name: name of the saved .png file
     """
     if not os.path.exists(args.save_dir + "/images"):
         os.makedirs(args.save_dir + "/images")
     img = combine_images(samples)
     img = img * 255
     Image.fromarray(img.astype(np.uint8)).save(args.save_dir + "/images/" +
                                                image_name + ".png")
     print(image_name, "Image saved.")
コード例 #16
0
def test(config, netG, train_iterator, monitor, param_file):
    # Load image and boundary image to get Variable shapes
    img, bod_map, bod_map_resize = train_iterator.next()

    real_img = nn.Variable(img.shape)
    real_bod_map = nn.Variable(bod_map.shape)
    real_bod_map_resize = nn.Variable(bod_map_resize.shape)

    ################### Graph Construction ####################
    # Generator
    with nn.parameter_scope('netG_decoder'):
        fake_img = netG(real_bod_map, test=False)
    fake_img.persistent = True

    # load parameters of networks
    with nn.parameter_scope('netG_decoder'):
        nn.load_parameters(param_file)

    monitor_vis = nm.MonitorImage('result',
                                  monitor,
                                  interval=config["test"]["vis_interval"],
                                  num_images=4,
                                  normalize_method=lambda x: x)

    # Test
    i = 0
    iter_per_epoch = train_iterator.size // config["test"]["batch_size"] + 1

    if config["num_test"]:
        num_test = config["num_test"]
    else:
        num_test = train_iterator.size

    for _ in range(iter_per_epoch):
        img, bod_map, bod_map_resize = train_iterator.next()

        real_img.d = img
        real_bod_map.d = bod_map
        real_bod_map_resize.d = bod_map_resize

        # Generate fake image
        fake_img.forward(clear_buffer=True)

        i += 1

        images_to_visualize = [real_bod_map_resize.d, fake_img.d, img]
        visuals = combine_images(images_to_visualize)
        monitor_vis.add(i, visuals)

        if i > num_test:
            break
コード例 #17
0
ファイル: capsulenet.py プロジェクト: danood/CapsNet-Keras
def test(model, data, args):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*30 + 'Begin: test' + '-'*30)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
    print()
    print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
    print('-' * 30 + 'End: test' + '-' * 30)
    plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
    plt.show()
コード例 #18
0
def test(model, data, batch_size, args):

    x_test, y_test = data
    #y_test = np.argmax(y_test,axis=1)

    y_pred, x_recon = model.predict(x_test, batch_size=batch_size)

    print('-' * 30 + 'Begin: test' + '-' * 30)
    print(
        'Test acc:',
        np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])

    img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                 "/real_and_recon.png")
    print()
    print('Reconstructed images are saved to %s/real_and_recon.png' %
          args.save_dir)
    print('-' * 30 + 'End: test' + '-' * 30)
    plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
    plt.show()

    y_pred = np.argmax(y_pred, axis=1)
    y_test = np.argmax(y_test, axis=1)

    # Print classification report

    print("Classification Report")

    print(['exC', 'mddC', 'nC ', 'mC', 'mduC'])
    print('')
    print(" Test Accuracy : " + str(accuracy_score(y_test, y_pred)))

    print("")

    print('')
    #y_test = np.argmax(y_test,axis=1)
    #y_pred = np.argmax(y_pred, axis=1)
    #print(classification_report(y_true,y_pred,digits=5))
    print(
        classification_report(
            y_test, y_pred, target_names=['exC', 'mddC', 'nC ', 'mC', 'mduC']))

    print('')

    cnf_matrix = confusion_matrix(y_test, y_pred)
    print(cnf_matrix)

    print('')
コード例 #19
0
def test(model, data, args):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*30 + 'Begin: test' + '-'*30)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    img = combine_images(np.concatenate([x_test[::10][:50],x_recon[::10][:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
    print()
    print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
    print('-' * 30 + 'End: test' + '-' * 30)
    plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
    plt.show()
コード例 #20
0
def test(model, data, args):

    # Create an augmentation function and cache augmented samples
    # to be displayed later
    x_augmented = []

    def test_generator_with_augmentation(x, batch_size, shift_range,
                                         rotation_range):
        test_datagen = ImageDataGenerator(width_shift_range=shift_range,
                                          height_shift_range=shift_range,
                                          rotation_range=rotation_range)
        generator = test_datagen.flow(x, batch_size=batch_size, shuffle=False)
        while 1:
            x_batch = generator.next()
            x_augmented.extend(x_batch)
            yield (x_batch)

    # Run predictions
    test_batch_size = 100
    x_true, y_true = data
    generator = test_generator_with_augmentation(x_true, test_batch_size,
                                                 args.shift_fraction,
                                                 args.rotation_range)
    y_pred, x_recon = model.predict_generator(generator=generator,
                                              steps=len(x_true) //
                                              test_batch_size)

    # Print different metrics using the top score
    y_true = np.argmax(y_true, 1)
    y_pred = np.argmax(y_pred, 1)

    print('Confusion matrix:\n', confusion_matrix(y_true, y_pred))
    print('\nAccuracy: ', accuracy_score(y_true, y_pred))
    print('Recall: ', recall_score(y_true, y_pred, average='weighted'))
    print('Precision: ', precision_score(y_true, y_pred, average='weighted'))
    print('F1-Score: ', f1_score(y_true, y_pred, average='weighted'))

    img = utils.combine_images(np.concatenate([x_augmented[:50],
                                               x_recon[:50]]))
    image = img * 255

    print('\nReconstructed images are saved to %s/real_and_recon.png' %
          args.save_dir)
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                 "/real_and_recon.png")
    plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
    plt.show()
コード例 #21
0
ファイル: capsulenet.py プロジェクト: dlazares/miniplaces
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show()
コード例 #22
0
ファイル: dcnet.py プロジェクト: xuzf2016/Multi-level-DCNet
def test(model, data, args):
    x_test, y_test = data
    print('Testing the model...')
    y_pred, x_recon = model.predict(x_test, batch_size=100)

    print(
        'Test Accuracy: ',
        100.0 * np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) /
        (1.0 * y_test.shape[0]))

    img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                 "/real_and_recon.png")
    print()
    print('Reconstructed images are saved to %s/real_and_recon.png' %
          args.save_dir)
    plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
    plt.show()
コード例 #23
0
def test(model):
    generator = captcha_generator('../recaptcha_capsnet_keras/recaptcha', 1,
                                  args.batch_size, False)
    x_test, y_test = next(generator)
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-' * 50)
    print(
        'Test acc:',
        np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])
    print('Test loss:', margin_loss(y_test, y_pred))
    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-' * 50)
コード例 #24
0
def show_reconstruction(model, test_loader, n_images, args):
    import matplotlib.pyplot as plt
    from utils import combine_images, save_image  # in utils.py save_image is the function that can change one-channel tensor to channels, but still black and white
    from PIL import Image
    import numpy as np
    import cv2
    import os.path
    import glob

    model.eval()
    for x, _ in test_loader:
        x = Variable(x[:min(n_images, x.size(0))].cuda(), volatile=True)
        _, x_recon = model(x)
        data = np.concatenate([x.cpu().data, x_recon.cpu().data])
        img = combine_images(np.transpose(data, [0, 2, 3, 1]))
        image = img * 255
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir +
                                                     "/real_and_recon.png")
        print()
        print('Reconstructed images are saved to %s/real_and_recon.png' %
              args.save_dir)
        print('-' * 70)
        im_gray = cv2.imread(args.save_dir + "/real_and_recon.png",
                             cv2.IMREAD_GRAYSCALE)
        im_color = cv2.applyColorMap(
            im_gray, cv2.COLORMAP_HSV
        )  # cv2 applycolorap can change an one-channel to color one
        plt.figure()
        plt.imshow(im_color)
        Image.fromarray(im_color.astype(
            np.uint8)).save(args.save_dir + "/real_and_recon_color.png")
        print()
        print('Reconstructed images are saved to %s/real_and_recon_color.png' %
              args.save_dir)
        print('-' * 70)
        plt.figure()
        plt.imshow(plt.imread(
            args.save_dir + "/real_and_recon.png",
        ))  #matplot imshow() WILL AUTO CHANGE ONE CHANNEL IMG TO COLOR ONE
        plt.show()
        break
コード例 #25
0
def test(model, data):
    x_test, y_test = data
    one_hots = np.diag(np.ones(10))
    x_test_new = np.array([x_test[0] for i in xrange(10)])
    y_pred, x_recon = model.predict([x_test_new, one_hots], batch_size=10)
    #print('-'*50)
    #print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))*1.0/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([np.array([x_test_new[0]]), x_recon]))
    image = img * 255
    Image.fromarray(image.astype(
        np.uint8)).save("real_and_recon_fmnist_rot.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon_fmnist_rot.png')
    print('-' * 50)
    plt.imshow(plt.imread("real_and_recon_fmnist_rot.png", ))
    plt.show()
コード例 #26
0
def show_reconstruction(model, test_loader, n_images, args):
    import matplotlib
    matplotlib.use('agg')
    from utils import combine_images
    import numpy as np
    import cv2

    model.eval()
    for x, _ in test_loader:
        with torch.no_grad():
            x = Variable(x[:min(n_images, x.size(0))].cuda())
        _, x_recon = model(x)
        data = np.concatenate([x.data.cpu(), x_recon.data.cpu()])
        img = combine_images(np.transpose(data, [0, 2, 3, 1]))
        image = (img * 255).astype(np.uint8)
        cv2.imwrite(args.save_dir + "/output.png", image)
        print()
        print('Reconstructed images are saved to %s/output.png' %
              args.save_dir)
        print('-' * 70)
        cv2.imshow('Image', image)
        cv2.waitKey(0)
        break
コード例 #27
0
ファイル: capsulenet.py プロジェクト: danood/CapsNet-Keras
def manipulate_latent(model, data, args):
    print('-'*30 + 'Begin: manipulate' + '-'*30)
    x_test, y_test = data
    index = np.argmax(y_test, 1) == args.digit
    number = np.random.randint(low=0, high=sum(index) - 1)
    x, y = x_test[index][number], y_test[index][number]
    x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
    noise = np.zeros([1, 10, 16])
    x_recons = []
    for dim in range(16):
        for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
            tmp = np.copy(noise)
            tmp[:,:,dim] = r
            x_recon = model.predict([x, y, tmp])
            x_recons.append(x_recon)

    x_recons = np.concatenate(x_recons)

    img = combine_images(x_recons, height=16)
    image = img*255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
    print('manipulated result saved to %s/manipulate-%d.png' % (args.save_dir, args.digit))
    print('-' * 30 + 'End: manipulate' + '-' * 30)
コード例 #28
0
ファイル: capsulenet.py プロジェクト: b1434145/acer_intern
def show_reconstruction(model, test_loader, n_images, args):
    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image
    import numpy as np

    model.eval()
    for x, _ in test_loader:
        x = Variable(x[:min(n_images, x.size(0))].cuda(), volatile=True)
        _, x_recon = model(x)
        data = np.concatenate([x.data.cpu(), x_recon.data.cpu()])
        print(data.shape)
        img = combine_images(np.transpose(data, [0, 2, 3, 1]))
        print((img.shape))
        image = img * 255
        print((image.shape))
        Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
        print()
        print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
        print('-' * 70)
        plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png", ))
        plt.show()
        # print('hello')
        break
コード例 #29
0
    digits = np.where(y_test == 1)[1]
    for i, num in enumerate(digits):
        num = int(num)
        if flags[num]:
            continue
        else:
            flags[num] = 1
            index[num] = i
        if np.all(flags):
            break

    x_deform_test = np.array([affine(x) for x in x_test])
    print(index)
    print(x_test[index].shape)
    input_img = np.concatenate([x_test[index], x_deform_test[index]])
    input_img = combine_images(input_img, height=2, width=10)
    input_img = input_img * 255
    Image.fromarray(input_img.astype(np.uint8)).save(args.save_dir +
                                                     '/input.png')

    model.load_weights(args.weights1)
    _, x_recon = eval_model.predict(x_test, batch_size=100)
    _, x_deform_recon = eval_model.predict(x_deform_test, batch_size=100)
    recon_img = np.concatenate([x_recon[index], x_deform_recon[index]])
    recon_img = combine_images(recon_img, height=2, width=10)
    recon_img = recon_img * 255
    Image.fromarray(recon_img.astype(np.uint8)).save(args.save_dir +
                                                     '/recon.png')

    model.load_weights(args.weights2)
    _, x_l1_recon = eval_model.predict(x_test, batch_size=100)
コード例 #30
0
def train(config, netG, netD, solver_netG, solver_netD, train_iterator,
          monitor):

    if config["train"][
            "feature_loss"] and config["train"]["feature_loss"]["lambda"] > 0:
        print(
            f'Applying VGG feature Loss, weight: {config["train"]["feature_loss"]["lambda"]}.'
        )
        with_feature_loss = True
    else:
        with_feature_loss = False

    # Load image and boundary image to get Variable shapes
    img, bod_map, bod_map_resize = train_iterator.next()

    real_img = nn.Variable(img.shape)
    real_bod_map = nn.Variable(bod_map.shape)
    real_bod_map_resize = nn.Variable(bod_map_resize.shape)

    ################### Graph Construction ####################
    # Generator
    with nn.parameter_scope('netG_decoder'):
        fake_img = netG(real_bod_map, test=False)
    fake_img.persistent = True

    fake_img_unlinked = fake_img.get_unlinked_variable()

    # Discriminator
    with nn.parameter_scope('netD_decoder'):
        pred_fake = netD(F.concatenate(real_bod_map_resize,
                                       fake_img_unlinked,
                                       axis=1),
                         test=False)
        pred_real = netD(F.concatenate(real_bod_map_resize, real_img, axis=1),
                         test=False)
    real_target = F.constant(1, pred_fake.shape)
    fake_target = F.constant(0, pred_real.shape)

    ################### Loss Definition ####################
    # for Generator
    gan_loss_G = gan_loss(pred_fake, real_target)
    gan_loss_G.persistent = True

    weight_L1 = config["train"]["weight_L1"]
    L1_loss = recon_loss(fake_img_unlinked, real_img)
    L1_loss.persistent = True
    loss_netG = gan_loss_G + weight_L1 * L1_loss

    if with_feature_loss:
        feature_loss = vgg16_perceptual_loss(127.5 * (fake_img_unlinked + 1.),
                                             127.5 * (real_img + 1.))
        feature_loss.persistent = True
        loss_netG += feature_loss * config["train"]["feature_loss"]["lambda"]

    # for Discriminator
    loss_netD = (gan_loss(pred_real, real_target) +
                 gan_loss(pred_fake, fake_target)) * 0.5

    ################### Setting Solvers ####################
    # for Generator
    with nn.parameter_scope('netG_decoder'):
        solver_netG.set_parameters(nn.get_parameters())

    # for Discrimintar
    with nn.parameter_scope('netD_decoder'):
        solver_netD.set_parameters(nn.get_parameters())

    ################### Create Monitors ####################
    interval = config["monitor"]["interval"]
    monitors_G_dict = {
        'loss_netG': loss_netG,
        'loss_gan': gan_loss_G,
        'L1_loss': L1_loss
    }

    if with_feature_loss:
        monitors_G_dict.update({'vgg_feature_loss': feature_loss})

    monitors_G = MonitorManager(monitors_G_dict, monitor, interval=interval)

    monitors_D_dict = {'loss_netD': loss_netD}
    monitors_D = MonitorManager(monitors_D_dict, monitor, interval=interval)

    monitor_time = nm.MonitorTimeElapsed('time_training',
                                         monitor,
                                         interval=interval)
    monitor_vis = nm.MonitorImage('result',
                                  monitor,
                                  interval=1,
                                  num_images=4,
                                  normalize_method=lambda x: x)

    # Dump training information
    with open(os.path.join(monitor._save_path, "training_info.yaml"),
              "w",
              encoding="utf-8") as f:
        f.write(yaml.dump(config))

    # Training
    epoch = config["train"]["epochs"]
    i = 0
    lr_decay_start_at = config["train"]["lr_decay_start_at"]
    iter_per_epoch = train_iterator.size // config["train"]["batch_size"] + 1
    for e in range(epoch):
        logger.info(f'Epoch = {e} / {epoch}')
        train_iterator._reset()  # rewind the iterator
        if e > lr_decay_start_at:
            decay_coeff = 1.0 - max(0, e - lr_decay_start_at) / 50.
            lr_decayed = config["train"]["lr"] * decay_coeff
            print(f"learning rate decayed to {lr_decayed}")
            solver_netG.set_learning_rate(lr_decayed)
            solver_netD.set_learning_rate(lr_decayed)

        for _ in range(iter_per_epoch):
            img, bod_map, bod_map_resize = train_iterator.next()
            # bod_map_noize = np.random.random_sample(bod_map.shape) * 0.01
            # bod_map_resize_noize = np.random.random_sample(bod_map_resize.shape) * 0.01

            real_img.d = img
            real_bod_map.d = bod_map  # + bod_map_noize
            real_bod_map_resize.d = bod_map_resize  # + bod_map_resize_noize

            # Generate fake image
            fake_img.forward(clear_no_need_grad=True)

            # Update Discriminator
            solver_netD.zero_grad()
            solver_netG.zero_grad()
            loss_netD.forward(clear_no_need_grad=True)
            loss_netD.backward(clear_buffer=True)
            solver_netD.update()

            # Update Generator
            solver_netD.zero_grad()
            solver_netG.zero_grad()
            fake_img_unlinked.grad.zero()
            loss_netG.forward(clear_no_need_grad=True)
            loss_netG.backward(clear_buffer=True)
            fake_img.backward(grad=None)
            solver_netG.update()

            # Monitors
            monitor_time.add(i)
            monitors_G.add(i)
            monitors_D.add(i)

            i += 1

        images_to_visualize = [real_bod_map_resize.d, fake_img.d, img]
        visuals = combine_images(images_to_visualize)
        monitor_vis.add(i, visuals)

        if e % config["monitor"]["save_interval"] == 0 or e == epoch - 1:
            # Save parameters of networks
            netG_save_path = os.path.join(monitor._save_path,
                                          f'netG_decoder_{e}.h5')
            with nn.parameter_scope('netG_decoder'):
                nn.save_parameters(netG_save_path)
            netD_save_path = os.path.join(monitor._save_path,
                                          f'netD_decoder_{e}.h5')
            with nn.parameter_scope('netD_decoder'):
                nn.save_parameters(netD_save_path)
コード例 #31
0
    face_bbox = extend_bbox_by_landmarks(detected_face, facial_landmarks)

    face_attribute_mask = get_mask(face_attribute, facial_landmarks, img.shape)

    face_bbox = extend_bbox(face_bbox, img.shape, 0.25, 0.2)
    cropped_by_face_img = F.crop(PIL.Image.fromarray(
        img), face_bbox[1], face_bbox[0], face_bbox[3] - face_bbox[1], face_bbox[2] - face_bbox[0])

    with open(cfg['vae_cfg_path'], 'r') as f:
        vae_cfg = yaml.safe_load(f)

    model = VanillaVAE(**vae_cfg['model_params'])
    state_dict = torch.load(cfg['vae_model_path'], map_location=torch.device('cpu'))['state_dict']
    model.load_state_dict(state_dict)
    model.eval()

    inference_transform = get_preinference_transforms(cfg)
    restored, _, mu, _ = model.forward(inference_transform(cropped_by_face_img).unsqueeze(0))

    scale = cfg['face_attribute_change_scale']
    generated_img = restore_normalized_img(model.decode(mu - scale * vae_latent_vector).detach()[0])
    generated_img = F.resize(PIL.Image.fromarray(generated_img), cropped_by_face_img.size[::-1])
    generated_img = np.array(generated_img)

    modified_image = combine_images(img, generated_img, face_bbox, face_attribute_mask)

    idx = img_path.index('.')
    new_img_path = img_path[:idx] + '_' + face_attribute + '_scale_' + str(scale) + img_path[idx:]

    cv2.imwrite(new_img_path, cv2.cvtColor(modified_image, cv2.COLOR_RGB2BGR))
コード例 #32
0
from utils import combine_images, get_random_cards
cards = get_random_cards()
combine_images(cards)
コード例 #33
0
        noise = np.zeros([args.batch_size, args.noise_dim])
        dimension = range(0, args.noise_dim, int(args.noise_dim / 10))
        x_recons = []
        for dim in list(dimension):
            for r in [-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1]:
                tmp = np.copy(noise)
                tmp[:, dim] = r
                x_recon = sess.run(
                    [model.fake_img],
                    feed_dict={
                        model.noise_holder: tmp,
                        model.feat_holder: feat,
                        model.isTrain: False
                    })
                x_recon = np.array(x_recon)
                x_recon = np.reshape(x_recon, [
                    args.batch_size, args.img_width, args.img_height,
                    args.channel
                ])
                x_recons.append(x_recon[0, :, :, :])
        x_recons = np.array(x_recons)
        x_recons = np.reshape(
            x_recons,
            [x_recons.shape[0], args.img_width, args.img_height, args.channel])

        img = combine_images(x_recons, height=10)
        image = (img / 2. + 0.5) * 255
        Image.fromarray(image.astype(
            np.uint8)).save(args.save_manipulate_dir +
                            '/manipulate-%d.png' % args.digit)