Пример #1
0
def readVideo(vid, clipDuration=64):
    video = []
    clip = []
    i = 0
    cap = cv2.VideoCapture(vid)
    if (cap.isOpened() == False):
        print("Error opening video stream or file")
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            frame = preprocess_input(frame)
            clip.append(frame)
            i += 1
            if (i == clipDuration):
                video.append(clip)
                clip = []
                i = 0
        else:
            break

    return np.asarray(video, dtype=np.float32)
Пример #2
0
    Y_pred = model.predict(X)
    Y_pred = np.argmax(Y_pred, axis=1)
    Y_true = np.argmax(Y, axis=1)
    cm = confusion_matrix(Y_true, Y_pred)

    plt.figure(figsize=figsize)
    ax = sns.heatmap(cm, cmap=cmap, annot=True, square=True)
    ax.set_ylabel('Actual', fontsize=30)
    ax.set_xlabel('Predicted', fontsize=30)
    plt.show()
    

if __name__ == '__main__':
    train_data = load_train()
    X_train, Y_train = separate_train(train_data)
    X_train, Y_train = preprocess_input(X_train, Y_train)
    X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train,
                                                      test_size=0.1,
                                                      random_state=SEED)
    
    # train_model(X_train, X_val, Y_train, Y_val)
    model = load_model('model.h5')

    # To test new data, load the data, separate the features from the labels
    # and preprocess the data. Then change X_val and Y_val to the desired data
    final_loss, final_accuracy = model.evaluate(X_val, Y_val, verbose=0)
    print('Final Loss: {:.4f}, Final Accuracy: {:.4f}'.format(
        final_loss, final_accuracy))
    
    plot_confusion_matrix(X_val, Y_val)
    
Пример #3
0
<<<<<<< HEAD
    if not os.path.isdir(patch_dir):
=======

    if not os.path.exists(patch_dir):
>>>>>>> c5fe2d2caa1e79289dfd72d25fa79a5f5915a4d5
        os.makedirs(patch_dir)
    if args.make_list:
        f = open(os.path.join(patch_dir,'list.txt'), 'w')

    # get all name of data
    image_dir = os.path.join(image_dir, ' ')[0:-1]
    for sample_file in tqdm(glob.glob(image_dir+'*-00128spp.exr')):
        num = sample_file[len(image_dir):sample_file.index('-')]
        gt_file = image_dir+'{}-08192spp.exr'.format(num)

        if args.check_time:
            prev_time = time.time()
        data = preprocess_input(sample_file, gt_file)
        patches = importanceSampling(data, patch_size, n_patches, figure_num)
        cropped = list(crop(data, tuple(pos), patch_size) for pos in patches)

        for i in range(len(cropped)):
            file_name = '{}_{}.pt'.format(num, i)
            torch.save(cropped[i], os.path.join(patch_dir, file_name))
            if args.make_list:
                f.write(file_name+'\n')
        if args.check_time:
            print('Time to get patches from one image', time.time() - prev_time)
    if args.make_list:
        f.close()
Пример #4
0
 nn = test.shape[0]
 if o > 0:
     sub = 'result/%s_%s_' % (net, size) + time.strftime(
         '%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
     if os.path.exists(sub) == False:
         os.makedirs(sub)
     cost_time = []
     for i in range(0, nn):
         path = test.filepath[i]
         fp = test.filename[i].split('.')[0]
         t0 = time.time()
         image = cv2.imread(path)
         w, h = image.shape[:2]
         image = cv2.resize(image, (size, size), cv2.INTER_CUBIC)
         image = image[:, :, ::-1]
         image = preprocess_input(image).reshape(1, size, size, 3)
         pred = infer_model(image)
         pred = pred.numpy().reshape(size, size)
         print('==> ', i, fp, pred.mean())
         pred[pred >= alpha] = 1.
         pred[pred < alpha] = 0
         pred = pred * 255.
         pred = pred.astype(np.uint8)
         #mask=cv2.erode(pred,(7,7))
         mask = cv2.resize(pred, (h, w), cv2.INTER_CUBIC)
         mask[mask > 0] = 255
         cost_time.append(time.time() - t0)
         cv2.imwrite(sub + '/' + fp + '_mask.jpg', mask)
         del (mask, image, pred)
         gc.collect()
     print(' avg cost time :%.6f' % np.array(cost_time).mean())