示例#1
0
def evaluate(get_labels=0, bb_only=False):
    # load top model architecture
    if bb_only:
        num_classes = 4
    else:
        num_classes = 38
    top_model = create_model(num_classes=num_classes,
                             weights='best_weights/defrost_all_bb_8.hdf5',
                             activation=None)
    top_model.compile(optimizer=optimizers.Adam(),
                      loss='mean_absolute_error',
                      metrics=['accuracy'])

    # load pickled parts info
    unpickled_test = pickle.load(open('cache/bb_validation.p', 'rb'))

    time_start = time.time()
    if get_labels:
        test_generator = utils.img_parts_generator(part_file_name,
                                                   validation_data_dir,
                                                   batch_size=get_labels,
                                                   unpickled=unpickled_test,
                                                   load_image=True,
                                                   bb_only=bb_only)
    else:
        test_generator = utils.img_parts_generator(part_file_name,
                                                   validation_data_dir,
                                                   batch_size=4500,
                                                   unpickled=unpickled_test,
                                                   load_image=True,
                                                   bb_only=bb_only)
    if get_labels:
        x = []
        y = []
        y_pred = []
        j = 0
        for inp, label in test_generator:

            preds = top_model.predict_on_batch(inp)
            if not x:
                x = inp
                y = label
                y_pred = preds
            else:
                x = np.concatenate((x, inp))
                y = np.concatenate((y, label))
                y_pred = np.concatenate((y_pred, preds))
            j += 1
            if j == 1:
                break
        return x, y, y_pred
    else:
        test_eval = []
        j = 0
        for inp, label in test_generator:
            res = top_model.evaluate(inp, label, verbose=0, batch_size=400)
            test_eval.append(res)
        test_eval = np.mean(test_eval, axis=0)
        print('Loss: {:.4f} Evaluate{:.4f}'.format(test_eval[0], test_eval[1]))
    time_taken = time.time() - time_start
示例#2
0
def train_top_model():

    # load top model architecture
    top_model, _, _ = create_top_model(num_classes=500) 
    top_model.compile(optimizer=optimizers.Adam(),
                loss='categorical_crossentropy', 
                metrics=['accuracy',metrics.top_k_categorical_accuracy])

    best_val_loss = 100

    f = open('console_dumps/{}.txt'.format(exp_name),'w')

    unpickled_train = pickle.load(open('cache/parts_train.p','rb'))
    unpickled_valid = pickle.load(open('cache/parts_validation.p','rb'))
    #unpickled_test = pickle.load(open('cache/parts_test.p','rb'))

    for i in range(epochs):
        time_start = time.time()
        train_generator = utils.img_parts_generator(part_file_name, train_data_dir, batch_size=5000, bottleneck_file='bottleneck/bottleneck_60_train.npy', unpickled=unpickled_train)
        val_generator = utils.img_parts_generator(part_file_name, validation_data_dir, batch_size=5000, bottleneck_file='bottleneck/bottleneck_60_validation.npy', unpickled=unpickled_valid)
        j = 0
        train_eval = []
        print('Training')
        for inp, label in train_generator:
            hist = top_model.fit(inp, label, verbose=0, batch_size=400)
            res = [hist.history['loss'][0], hist.history['acc'][0]]
            train_eval.append(res)
            #print("Epoch: {}/{} Batch (train): {}/{} train_l: {:.4f} train_acc: {:.4f}".format(i+1,epochs,j+1,42320/batch_size, res[0], res[1]))
            j += 1
        # find mean of loss and acc
        train_eval = np.mean(train_eval, axis=0)
        
        val_eval = []
        j = 0
        print('Evaluating validation set')
        for inp, label in val_generator:
            res = top_model.evaluate(inp, label, verbose=0, batch_size=400)
            #print("Epoch: {}/{} Batch (valid): {}/{} val_l: {:.4f} val_acc: {:.4f}".format(i+1,epochs,j+1,3000/batch_size, res[0], res[1]))
            val_eval.append(res)
            j += 1
            if j==5:
                break
        val_eval = np.mean(val_eval, axis=0)
        if val_eval[0] < best_val_loss:
            #print('Saving weights')
            best_val_loss = val_eval[0]
            top_model.save_weights('models/{}_{}_{:.4f}.hdf5'.format(exp_name,i+1,round(best_val_loss)))
        time_taken = time.time() - time_start
        log = 'Epoch: {}, train_l: {:.4f}, train_a: {:.4f}, val_l: {:.4f}, val_a: {:.4f}, time: {:.4f}\n'.format(i+1,train_eval[0], train_eval[1], val_eval[0],val_eval[1], time_taken)
        f.write(log)
        print(log)
    f.close()
示例#3
0
def train_top_model():

    # load top model architecture
    top_model, _, _ = create_top_model(
        num_classes=4,
        activation=None,
        weights='models/v2_top_bb_30_28.0000.hdf5')
    top_model.compile(optimizer=optimizers.Adam(),
                      loss='mean_absolute_error',
                      metrics=['accuracy'])

    print(top_model.summary())

    bottlenecks_train = utils.load_bottlenecks(bottleneck_dir +
                                               'bottleneck_60_train.npy')
    bottlenecks_valid = utils.load_bottlenecks(bottleneck_dir +
                                               'bottleneck_60_validation.npy')

    best_val_loss = 100
    f = open('console_dumps/{}.txt'.format(exp_name), 'w')

    unpickled_train = pickle.load(open('cache/bb_train.p', 'rb'))
    unpickled_valid = pickle.load(open('cache/bb_validation.p', 'rb'))

    for i in range(epochs):
        time_start = time.time()
        train_generator = utils.img_parts_generator(
            part_file_name,
            'train/',
            batch_size=5000,
            bottlenecks=bottlenecks_train,
            unpickled=unpickled_train,
            bb_only=True)
        val_generator = utils.img_parts_generator(
            part_file_name,
            'validation/',
            batch_size=5000,
            bottlenecks=bottlenecks_valid,
            unpickled=unpickled_valid,
            bb_only=True)

        train_eval = []  # stores metrics of training iterations
        #        print('Training')
        for inp, label in train_generator:
            hist = top_model.fit(inp, label, verbose=0, batch_size=400)
            res = [hist.history['loss'][0], hist.history['acc'][0]]
            train_eval.append(res)
        # find mean of loss and acc
        train_eval = np.mean(train_eval, axis=0)

        val_eval = []  # stores metrics of validation iterations
        j = 0
        #        print('Evaluating validation set')
        for inp, label in val_generator:
            res = top_model.evaluate(inp, label, verbose=0, batch_size=400)
            #print("Epoch: {}/{} Batch (valid): {}/{} val_l: {:.4f} val_acc: {:.4f}".format(i+1,epochs,j+1,3000/batch_size, res[0], res[1]))
            val_eval.append(res)
            j += 1
            #if j==5:
            #    break
        val_eval = np.mean(val_eval, axis=0)

        # save weights if current loss beats best loss
        if val_eval[0] < best_val_loss:
            #print('Saving weights')
            best_val_loss = val_eval[0]
            top_model.save_weights('models/{}_{}_{:.4f}.hdf5'.format(
                exp_name, i + 1, round(best_val_loss)))
        time_taken = time.time() - time_start
        log = 'Epoch: {}, train_l: {:.4f}, train_a: {:.4f}, val_l: {:.4f}, val_a: {:.4f}, time: {:.4f}\n'.format(
            i + 1, train_eval[0], train_eval[1], val_eval[0], val_eval[1],
            time_taken)
        f.write(log)
        print(log)
    f.close()
示例#4
0
def defrost_all_parts():

    # load top model architecture
    base_weights = 'best_weights/defrost_everything_init_47_freeze_fixed_weights.03-0.60.hdf5'
    top_weights = 'best_weights/top_bb.hdf5'
    model = create_model(num_classes=4,
                         activation=None,
                         weights=base_weights,
                         weights_output_dim=500,
                         top_weights=top_weights)
    model.compile(optimizer=optimizers.Adam(lr=1e-5),
                  loss='mean_absolute_error',
                  metrics=['accuracy'])
    print(model.summary())
    best_val_loss = 100

    f = open('console_dumps/{}.txt'.format(exp_name), 'w')

    unpickled_train = pickle.load(open('cache/bb_train.p', 'rb'))
    unpickled_valid = pickle.load(open('cache/bb_validation.p', 'rb'))
    #unpickled_test = pickle.load(open('cache/parts_test.p','rb'))

    for i in range(epochs):
        # begin epoch
        time_start = time.time()
        # init  the generators for train and valid
        train_generator = utils.img_parts_generator(part_file_name,
                                                    train_data_dir,
                                                    batch_size=5000,
                                                    load_image=True,
                                                    unpickled=unpickled_train,
                                                    bb_only=True)
        val_generator = utils.img_parts_generator(part_file_name,
                                                  validation_data_dir,
                                                  batch_size=3000,
                                                  load_image=True,
                                                  unpickled=unpickled_valid,
                                                  bb_only=True)

        # j tracks batch in epoch
        j = 0
        train_eval = []  # stores results for each epoch
        for inp, label in train_generator:
            sub_epoch_start = time.time()
            hist = model.fit(inp, label, verbose=1, batch_size=batch_size)
            res = [hist.history['loss'][0], hist.history['acc'][0]]
            train_eval.append(res)
            sub_e_time = time.time() - sub_epoch_start
            print(
                "[train] Epoch: {}/{} Batch: {}/{} train_l: {:.4f} train_acc: {:.4f} time: {:.2f}"
                .format(i + 1, epochs, j + 1, 42320 / batch_size, res[0],
                        res[1], sub_e_time))
            j += 1
        # find mean of loss and acc
        train_eval = np.mean(train_eval, axis=0)

        val_eval = []
        j = 0
        print('Evaluating validation set')
        for inp, label in val_generator:
            print(inp.shape, label.shape)
            res = model.evaluate(inp, label, verbose=1, batch_size=batch_size)
            print(
                "[valid] Epoch: {}/{} Batch: {}/{} val_l: {:.4f} val_acc: {:.4f}"
                .format(i + 1, epochs, j + 1, 3000 / batch_size, res[0],
                        res[1]))
            val_eval.append(res)
            j += 1
            if j == 5:
                break
        val_eval = np.mean(val_eval, axis=0)
        if val_eval[0] < best_val_loss:
            print('Saving weights')
            best_val_loss = val_eval[0]
            model.save_weights('models/{}_{}_{:.4f}.hdf5'.format(
                exp_name, i + 1, round(best_val_loss)))
        time_taken = time.time() - time_start
        train_eval = [0.0, 0.0]
        log = 'Epoch: {}, train_l: {:.4f}, train_a: {:.4f}, val_l: {:.4f}, val_a: {:.4f}, time: {:.4f}\n'.format(
            i + 1, train_eval[0], train_eval[1], val_eval[0], val_eval[1],
            time_taken)
        f.write(log)
        print(log)
    f.close()
print(model.summary())

# In[6]:

print(model.layers[-1].name)

# In[5]:

from keras import backend as K
from utils import img_parts_generator
from KerasDeconv import DeconvNet

# In[6]:

generator = img_parts_generator('parts_info.txt',
                                data_dir='../cropped/validation/',
                                batch_size=10,
                                load_parts=False,
                                load_image=True)

# In[7]:

deconv_net = DeconvNet(model)

for img in generator:
    deconv = deconv_get_deconv(img, 'conv2d_139', 0, 'all')

    # postprocess and save image
    break
    #img.save('results/{}_{}_{}.png'.format(layer_name, feature_to_visualize, visualize_mode))
示例#6
0

part_file_name = 'parts_info.txt'
data_dir = 'validation/'
batch_size = 1
steps = 4
target_dim = None
cache = False
save_path = '../cropped_aligned'
target_angle = 50

generator = img_parts_generator(part_file_name,
                                data_dir,
                                batch_size=20,
                                load_image=True,
                                target_dim=target_dim,
                                cache=False,
                                load_paths=True,
                                load_parts=True,
                                bb_only=False)

body_index = set([2, 4, 5, 10, 11, 15, 16, 17])
head_index = set([3, 6, 7, 8, 9, 12, 13, 14, 18])

bar = Bar('Cropping aligned image', max=3000)
with ThreadPoolExecutor(max_workers=100) as executor:

    for imgs, paths, parts in generator:
        executor.submit(align_crop, imgs, paths, parts)

bar.finish()
示例#7
0
def crop():

    part_file_name = 'parts_info.txt'
    #validation_data_dir = 'validation/'
    batch_size = 100
    target_dim = (299, 299)
    cache = False
    save_path = '../cropped_pred_scale1.1'

    model = create_model(num_classes=4,
                         weights='best_weights/defrost_all_bb_8.hdf5',
                         activation=None)
    print(model.summary())
    model.compile(optimizer='adam',
                  loss='mean_absolute_error',
                  metrics=['accuracy'])

    direcs = ['test/', 'train/']
    nums = [4500, 42320]
    #direcs = ['validation/']
    #nums = [3000]
    for direc, num in zip(direcs, nums):
        gen = img_parts_generator(part_file_name,
                                  direc,
                                  batch_size=batch_size,
                                  load_image=True,
                                  target_dim=target_dim,
                                  cache=False,
                                  load_paths=True,
                                  load_orig_img=True,
                                  bb_only=True)
        bar = Bar('Cropping: ' + direc[:-1], max=num)
        for imgs, orig_imgs, paths, parts in gen:
            preds = model.predict(imgs, batch_size=100, verbose=1)
            for i in range(len(imgs)):
                img = imgs[i]
                orig_img = orig_imgs[i]
                path = paths[i]

                ## Rescaling predicted points to original dimensions
                t_l_point = (preds[i][0], preds[i][1])
                b_r_point = (preds[i][2], preds[i][3])
                #print("Orig: ", parts[i])
                #print("Predicted:", t_l_point, b_r_point)
                b_r_point = scale(b_r_point, orig_img.shape)
                t_l_point = scale(t_l_point, orig_img.shape)

                t_l_point, b_r_point = scale_bounding_box(t_l_point,
                                                          b_r_point,
                                                          orig_img.shape,
                                                          scale=1.1)
                # get bounding boxes
                t_l_x = int(t_l_point[0])
                t_l_y = int(t_l_point[1])
                b_r_x = int(b_r_point[0])
                b_r_y = int(b_r_point[1])
                if (b_r_y > t_l_y and b_r_x > t_l_x):
                    img = crop(orig_img, t_l_x, t_l_y, b_r_x, b_r_y)
                else:
                    img = orig_img
                try:
                    img = resize(img, (299, 299))
                except ValueError:
                    print(img.shape)
                    print(orig_img.shape)
                    print(t_l_x, b_r_x, t_l_y, b_r_y)
                    print(t_l_point, b_r_point)
                    a = orig_img[t_l_y:b_r_y, :]
                    print(a.shape)
                    raise ValueError("Error")
                save_img_path = get_save_path(path, save_path)

                img = Image.fromarray(img)
                img.save(save_img_path)
                bar.next()
            #break
        bar.finish()