示例#1
0
def main(args):
    image_paths = read_list(args.image_list)
    for path in image_paths:
        # landmarks_file should have the same prefix as image_file
        landmarks_file = path[:-3] + 'txt'
        im = Image.open(path)
        width, height = im.size
        landmarks = load_landmarks(landmarks_file)
        landmarks[:,1] = height - landmarks[:,1]
        # select contour points
        #contour_points = get_contour_side(landmarks)
        # generate a contour curve with contour points
        hull = ConvexHull(landmarks)
        # draw landmarks
        lm = np.array(im)
        for i in range(landmarks.shape[0]):
            rr, cc = draw.circle(height-landmarks[i,1].astype('int32'), landmarks[i,0].astype('int32'), 5)
            lm[rr, cc, :] = np.array((255, 0, 0))
        # create mask
        mask = np.zeros((height, width))
        rr, cc = draw.polygon(height-landmarks[hull.vertices,1], landmarks[hull.vertices,0], mask.shape)
        mask[rr,cc] = 1

        path = path[:-1] if path[:-1] == '/' else path
        image_name = path[path.rindex('/')+1:-4] + '_contour.png'
        show_result(lm, mask, np.tile((mask!=0)[:,:,np.newaxis], (1,1,3)) * im, save=True, filename='images/'+image_name)

        # add CRF
        prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 +
                               mask[np.newaxis, :, :]*0.1,
                               mask[np.newaxis, :, :]*0.9 +
                               (1-mask)[np.newaxis, :, :]*0.1), axis=0)
        map = CRF(prob, np.array(im))
示例#2
0
def main(args):
    image_paths = read_list(args.image_list)
    for path in image_paths:
        im = open_image(path)
        # resize for memory
        width, height = im.size
        if height > 800:
            im = im.resize((int(800 * width / height), 800))
            width, height = im.size

        # use 2D-FAN detect landmarks
        fa = FaceAlignment(LandmarksType._2D,
                           enable_cuda=True,
                           flip_input=False,
                           use_cnn_face_detector=True)
        try:
            landmarks = fa.get_landmarks(np.array(im))[-1]
            landmarks[:, 1] = height - landmarks[:, 1]
        except:
            continue

        # generate a contour curve with contour points
        hull = ConvexHull(landmarks)
        # draw landmarks
        lm = np.array(im)
        for i in range(landmarks.shape[0]):
            rr, cc = draw.circle(height - landmarks[i, 1].astype('int32'),
                                 landmarks[i, 0].astype('int32'), 5)
            lm[rr, cc, :] = np.array((255, 0, 0))
        # create mask
        mask = np.zeros((height, width))
        rr, cc = draw.polygon(height - landmarks[hull.vertices, 1],
                              landmarks[hull.vertices, 0], mask.shape)
        mask[rr, cc] = 1

        save = True if args.save == 'True' else False
        path = path[:-1] if path[:-1] == '/' else path
        image_name = path[path.rindex('/') + 1:-4] + '_contour_nocrf.png'
        show_result(lm,
                    mask,
                    np.tile((mask != 0)[:, :, np.newaxis], (1, 1, 3)) * im,
                    save=save,
                    filename='images/' + image_name)

        # add CRF
        #prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 + mask[np.newaxis, :, :]*0.1, mask[np.newaxis, :, :]*0.9 + (1-mask)[np.newaxis, :, :]*0.1), axis=0)
        prob = ndimage.gaussian_filter(mask * 1.0, sigma=5)
        prob = np.concatenate(
            ((1 - prob)[np.newaxis, :, :], prob[np.newaxis, :, :]), axis=0)

        map = CRF(prob, np.array(im))
        image_name = path[path.rindex('/') + 1:-4] + '_contour_crf.png'
        show_result(im,
                    map,
                    np.tile((map != 0)[:, :, np.newaxis], (1, 1, 3)) * im,
                    save=save,
                    filename='images/' + image_name)
示例#3
0
        G_losses.append(G_train_loss.data)

        num_iter += 1

    epoch_end_time = time.time()
    per_epoch_ptime = epoch_end_time - epoch_start_time

    print('[%d/%d] - ptime: %.2f, loss_d: %.3f, loss_g: %.3f' %
          ((epoch + 1), opt.train_epoch, per_epoch_ptime,
           torch.mean(torch.FloatTensor(D_losses)),
           torch.mean(torch.FloatTensor(G_losses))))
    fixed_p = root + 'Fixed_results/' + model + str(epoch + 1) + '.png'
    util.show_result(G,
                     Variable(fixed_x_, volatile=True),
                     fixed_y_, (epoch + 1),
                     save=True,
                     path=fixed_p)
    train_hist['per_epoch_ptimes'].append(per_epoch_ptime)

end_time = time.time()
total_ptime = end_time - start_time
train_hist['total_ptime'].append(total_ptime)

print("Avg one epoch ptime: %.2f, total %d epochs ptime: %.2f" %
      (torch.mean(torch.FloatTensor(
          train_hist['per_epoch_ptimes'])), opt.train_epoch, total_ptime))
print("Training finish!... save training results")
torch.save(G.state_dict(), root + model + 'generator_param.pkl')
torch.save(D.state_dict(), root + model + 'discriminator_param.pkl')
with open(root + model + 'train_hist.pkl', 'wb') as f:
        G_losses.append(loss_g_)
        train_hist['G_losses'].append(loss_g_)

        num_iter += 1

    epoch_end_time = time.time()
    per_epoch_ptime = epoch_end_time - epoch_start_time

    print('[%d/%d] - ptime: %.2f, loss_d: %.3f, loss_g: %.3f' %
          ((epoch + 1), opt.train_epoch, per_epoch_ptime, np.mean(
              (D_losses)), np.mean(G_losses)))
    fixed_p = root + 'Fixed_results/' + model + str(epoch + 1) + '.png'
    outputs = sess.run(G, {x: fixed_x_})
    util.show_result(fixed_x_,
                     outputs,
                     fixed_y_, (epoch + 1),
                     save=True,
                     path=fixed_p)
    train_hist['per_epoch_ptimes'].append(per_epoch_ptime)

end_time = time.time()
total_ptime = end_time - start_time
train_hist['total_ptime'].append(total_ptime)

print("Avg. one epoch ptime: %.2f, total %d epochs ptime: %.2f" %
      (np.mean(train_hist['per_epoch_ptimes']), opt.train_epoch, total_ptime))
print("Training finish!... save training results")
with open(root + model + 'train_hist.pkl', 'wb') as f:
    pickle.dump(train_hist, f)

saver.save(sess, root + model + 'params.ckpt')
        G_losses.append(G_train_loss.data)

        num_iter += 1

    epoch_end_time = time.time()
    per_epoch_ptime = epoch_end_time - epoch_start_time

    print('[%d/%d] - ptime: %.2f, loss_d: %.3f, loss_g: %.3f' %
          ((epoch + 1), opt.train_epoch, per_epoch_ptime,
           torch.mean(torch.FloatTensor(D_losses)),
           torch.mean(torch.FloatTensor(G_losses))))
    fixed_p = root + 'Fixed_results/' + model + str(epoch + 1) + '.png'
    util.show_result(G,
                     Variable(fixed_x_.cuda()),
                     fixed_y_, (epoch + 1),
                     save=True,
                     path=fixed_p)
    train_hist['per_epoch_ptimes'].append(per_epoch_ptime)

end_time = time.time()
total_ptime = end_time - start_time
train_hist['total_ptime'].append(total_ptime)

print("Avg one epoch ptime: %.2f, total %d epochs ptime: %.2f" %
      (torch.mean(torch.FloatTensor(
          train_hist['per_epoch_ptimes'])), opt.train_epoch, total_ptime))
print("Training finish!... save training results")
torch.save(G.state_dict(), root + model + 'generator_param.pkl')
torch.save(D.state_dict(), root + model + 'discriminator_param.pkl')
with open(root + model + 'train_hist.pkl', 'wb') as f: