Example #1
0
def test_wo_pred(model_id, model_path, data_paths_path, batch_size=16, load_epoch=None):

    input_size = 256

    output_classes = 7
    start_time = time.clock()

    model = models.get_model_from_id(model_id, input_size, output_classes)
    if model is None:
        return

    # Load log
    model_path = model_path + model_id
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    log = open(model_path + "/log_testing_" + str(load_epoch) + ".txt", "a")
    log.write("\n\n\nTesting initialised: {:%Y-%m-%d %H:%M:%S}".format(datetime.datetime.now()))

    if load_epoch is not None:
        log.write("\nLoading past model:")
        log.write("\n" + model_path + "/" + model_id + "_" + str(load_epoch) + ".h5")
        model.load_weights(model_path + "/" + model_id + "_" + str(load_epoch) + ".h5")
    else:
        print "ERROR: Need load_epoch number to load"
        return

    model = models.compile_model(model_id, model)

    # get data
    with open(data_paths_path + "test_paths_equalised.txt") as f:
        all_paths = f.readlines()
    random.shuffle(all_paths)  # randomise order every epoch!!
    all_paths = [line.split() for line in all_paths]  # split so x and y split
    X_batch = None
    Y_batch = []
    sum_loss = 0
    sum_acc = 0
    past = 0
    count = 0
    inner_count = 0
    for path in all_paths:
        # print path
        count += 1

        x, y = models.load_input(model_id, path, input_size)
        if X_batch is None:
            X_batch = x
        else:
            if "MVK_12" in model_id:
                X_batch[0] = np.append(X_batch[0], x[0], axis=0)
                X_batch[1] = np.append(X_batch[1], x[1], axis=0)
            else:
                X_batch = np.append(X_batch, x, axis=0)
        Y_batch.append(y)

        if batch_size is not None:
            if (count % batch_size == 0) or (count == len(all_paths)):
                Y_batch = np.eye(output_classes)[Y_batch]

                # train
                loss, acc = model.test_on_batch(X_batch, Y_batch)
                sum_loss += loss
                sum_acc += acc
                inner_count += 1

                # clear batch
                X_batch = None
                Y_batch = []
                if int((float(count) / len(all_paths)) * 100) > past:

                    tr = (len(all_paths) - count) / ((count) / (time.clock() - start_time))
                    print "(%d) [%.5f] Image: %d / %d; TR: %02d:%02d:%02d;" % (
                        past,
                        sum_loss / inner_count,
                        count,
                        len(all_paths),
                        int((tr / 60) / 60),
                        int((tr / 60) % 60),
                        int(tr % 60),
                    )

                    log.write(
                        "\n(%d) [%.5f] Image: %d / %d; TR: %02d:%02d:%02d;"
                        % (
                            past,
                            sum_loss / inner_count,
                            count,
                            len(all_paths),
                            int((tr / 60) / 60),
                            int((tr / 60) % 60),
                            int(tr % 60),
                        )
                    )

                    past += 5

    if batch_size is None:
        loss, acc = model.evaluate(X_batch, Y_batch, batch_size=32)
    else:
        loss = sum_loss / inner_count
        acc = sum_acc / inner_count

    log.write("\n %d samples. \t loss: %.5f \t acc: %.5f" % (count, loss, acc))
    print ("\n %d samples. \t loss: %.5f \t acc: %.5f" % (count, loss, acc))

    log.close()
Example #2
0
def predict(model_id, model_path, data_paths_path, split, batch_size=None, load_epoch=None, layers=['pred'], save_path=None, equalised=False, soft=None):

    start_time = time.clock()

    model, output_classes = models.get_model_from_id(model_id)
    if model is None:
        return

    # Load log
    model_path = model_path + model_id
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    if load_epoch is not None:
        print 'Loading model: ' + model_path + '/' + model_id + '_' + str(load_epoch) + '.h5'
        model.load_weights(model_path + '/' + model_id + '_' + str(load_epoch) + '.h5')
    else:
        if model_id != 'MVK_17_04':
            print 'ERROR: Need load_epoch number to load'
            return

    # model = models.compile_model(model_id, model) # dont need to compile on prediction

    # get data
    if equalised:
        with open(data_paths_path + split + '_paths_equalised.txt') as f:
            all_paths = f.readlines()
    else:
        with open(data_paths_path + split + '_paths.txt') as f:
            all_paths = f.readlines()

    # all_paths = all_paths[:500]

    all_paths = [line.split() for line in all_paths]  # split so x and y split


    for layer_name in layers:
        # model, output_classes = models.get_model_from_id(model_id)
        model = Model(input=model.input, output=model.get_layer(layer_name).output)

        X_batch = None
        Y_batch = []
        Y_gt = None
        Y_pred = None
        past = 0
        count = 0
        inner_count = 0
        for path in all_paths:
            # print path
            count += 1
            cor_path = DRIVE + path[0][path[0].find('/DATASETS/')+1:]
            if path[0] != cor_path:
                # print 'Paths in .txt files seem incorrect'
                # print 'Changed from: '+path[0]
                # print 'Changed to: '+ cor_path
                path[0] = cor_path
            x, y = models.load_input(model_id, path, soft)

            if X_batch is None:
                X_batch = x
            else:
                if 'MVK_12' in model_id:
                    X_batch = [np.append(X_batch[0], x[0], axis=0), np.append(X_batch[1], x[1], axis=0)]
                else:
                    X_batch = np.append(X_batch, x, axis=0)
            Y_batch.append(y)


            if batch_size is not None:
                if (count % batch_size == 0) or (count == len(all_paths)):
                    # Y_batch = np.eye(output_classes)[Y_batch]

                    # train
                    if Y_gt is None:
                        Y_pred = model.predict_on_batch(X_batch)
                        Y_gt = Y_batch
                    else:
                        Y_pred = np.append(Y_pred, model.predict_on_batch(X_batch),axis=0)
                        Y_gt = np.append(Y_gt,Y_batch,axis=0)
                    inner_count += 1

                    # clear batch
                    X_batch = None
                    Y_batch = []

            else:
                Y_pred = model.predict(x)
                Y_gt = np.eye(output_classes)[y]

            if int((float(count) / len(all_paths)) * 100) > past:

                tr = (len(all_paths) - count) / ((count) / (time.clock() - start_time))
                print '(%d) Image: %d / %d; TR: %02d:%02d:%02d;' % (past, count, len(all_paths), int((tr / 60) / 60),
                                                                    int((tr / 60) % 60), int(tr % 60))

                past += 5

        # if batch_size is None:
        #     Y_pred = model.predict_proba(X_batch, batch_size=32)

        # save predictions to file
        all_array = {}
        if save_path is not None:
            print '\nSaving ....'
            save_path += model_id + '_' + str(load_epoch)+'/'
            print save_path
            for p in range(len(Y_gt)):
                path = all_paths[p]
                # write out to npy files
                image_name = path[0].split('/')[len(path[0].split('/')) - 1]

                if not os.path.exists(save_path + layer_name + '/npy/ind/'):
                    os.makedirs(save_path + layer_name + '/npy/ind/')

                np.save(save_path + layer_name + '/npy/ind/' + image_name[:-4] + '.npy', np.squeeze(Y_pred[p]))
                all_array[image_name[:-4]] = [Y_gt[p], Y_pred[p]]

            # np.save(save_path + layer_name + '/npy/'+split+'.npy', all_array)

    return Y_gt, Y_pred