Beispiel #1
0
def save_bottleneck_features():
    weights_file = 'best_weights/defrost_everything_init_47_freeze_fixed_weights.03-0.60.hdf5'
    model = create_model(num_classes=500,
                         include_top=False,
                         weights=weights_file)
    # num classes is 500 coz trained weights are with that architecture
    print(model.summary())

    for path, num in zip(['train_vsmall', 'validation', 'train', 'test'],
                         [30, 3000, 42320, 4500]):
        print('Saving ' + path + ' bottlenecks')
        generator = img_parts_generator('parts_info.txt',
                                        data_dir=path + '/',
                                        batch_size=batch_size,
                                        load_image=True)
        bottlenecks = None
        count = 0
        bar = Bar('Extracting ' + path, max=num // batch_size)
        for img_data, _ in generator:
            count += 1
            #print('Processing Batch: ', count)
            batch_bottleneck = model.predict(img_data, batch_size=10)
            if bottlenecks is None:
                bottlenecks = batch_bottleneck
            else:
                bottlenecks = np.concatenate((bottlenecks, batch_bottleneck))
            bar.next()
        bar.finish()
        #bottlenecks = model.predict_generator(generator, num//batch_size, verbose=1)
        save_file = open(bottleneck_dir + exp_name + '_' + path + '.npy', 'wb')
        np.save(save_file, np.array(bottlenecks))
Beispiel #2
0
def evaluate(get_labels=0, bb_only=False):
    # load top model architecture
    if bb_only:
        num_classes = 4
    else:
        num_classes = 38
    top_model = create_model(num_classes=num_classes,
                             weights='best_weights/defrost_all_bb_8.hdf5',
                             activation=None)
    top_model.compile(optimizer=optimizers.Adam(),
                      loss='mean_absolute_error',
                      metrics=['accuracy'])

    # load pickled parts info
    unpickled_test = pickle.load(open('cache/bb_validation.p', 'rb'))

    time_start = time.time()
    if get_labels:
        test_generator = utils.img_parts_generator(part_file_name,
                                                   validation_data_dir,
                                                   batch_size=get_labels,
                                                   unpickled=unpickled_test,
                                                   load_image=True,
                                                   bb_only=bb_only)
    else:
        test_generator = utils.img_parts_generator(part_file_name,
                                                   validation_data_dir,
                                                   batch_size=4500,
                                                   unpickled=unpickled_test,
                                                   load_image=True,
                                                   bb_only=bb_only)
    if get_labels:
        x = []
        y = []
        y_pred = []
        j = 0
        for inp, label in test_generator:

            preds = top_model.predict_on_batch(inp)
            if not x:
                x = inp
                y = label
                y_pred = preds
            else:
                x = np.concatenate((x, inp))
                y = np.concatenate((y, label))
                y_pred = np.concatenate((y_pred, preds))
            j += 1
            if j == 1:
                break
        return x, y, y_pred
    else:
        test_eval = []
        j = 0
        for inp, label in test_generator:
            res = top_model.evaluate(inp, label, verbose=0, batch_size=400)
            test_eval.append(res)
        test_eval = np.mean(test_eval, axis=0)
        print('Loss: {:.4f} Evaluate{:.4f}'.format(test_eval[0], test_eval[1]))
    time_taken = time.time() - time_start
def defrost_all_parts():

    # load top model architecture
    base_weights = 'best_weights/complete_defrost_62.hdf5'
    top_weights = 'best_weights/top_model_parts_50.hdf5'
    model = create_model(num_classes=38,activation=None,weights=base_weights, weights_output_dim=500, top_weights=top_weights) 
    model.compile(optimizer=optimizers.Adam(lr=1e-5),
                loss='mean_absolute_error', metrics=['accuracy'])
    print(model.summary())
    best_val_loss = 100

    f = open('console_dumps/{}.txt'.format(exp_name),'w')

    unpickled_train = pickle.load(open('cache/parts_train.p','rb'))
    unpickled_valid = pickle.load(open('cache/parts_validation.p','rb'))
    #unpickled_test = pickle.load(open('cache/parts_test.p','rb'))

    for i in range(epochs):
        # begin epoch
        time_start = time.time()
        # init  the generators for train and valid
        train_generator = utils.img_parts_generator(part_file_name, train_data_dir, batch_size=5000, load_image=True, unpickled=unpickled_train)
        val_generator = utils.img_parts_generator(part_file_name, validation_data_dir, batch_size=3000, load_image=True, unpickled=unpickled_valid)
        
        # j tracks batch in epoch
        j = 0
        train_eval = [] # stores results for each epoch
        for inp, label in train_generator:
            sub_epoch_start = time.time()
            hist = model.fit(inp, label, verbose=1, batch_size=batch_size)
            res = [hist.history['loss'][0], hist.history['acc'][0]]
            train_eval.append(res)
            sub_e_time = time.time() - sub_epoch_start
            print("[train] Epoch: {}/{} Batch: {}/{} train_l: {:.4f} train_acc: {:.4f} time: {:.2f}".format(i+1,epochs,j+1,42320/batch_size, res[0], res[1], sub_e_time))
            j += 1
        # find mean of loss and acc
        #train_eval = np.mean(train_eval, axis=0)
        
        val_eval = []
        j = 0
        print('Evaluating validation set')
        for inp, label in val_generator:
            res = model.evaluate(inp, label, verbose=1, batch_size=batch_size)
            print("[valid] Epoch: {}/{} Batch: {}/{} val_l: {:.4f} val_acc: {:.4f}".format(i+1,epochs,j+1,3000/batch_size, res[0], res[1]))
            val_eval.append(res)
            j += 1
            if j==5:
                break
        val_eval = np.mean(val_eval, axis=0)
        if val_eval[0] < best_val_loss:
            print('Saving weights')
            best_val_loss = val_eval[0]
            model.save_weights('models/{}_{}_{:.4f}.hdf5'.format(exp_name,i+1,round(best_val_loss)))
        time_taken = time.time() - time_start
        train_eval=[0.0,0.0]
        log = 'Epoch: {}, train_l: {:.4f}, train_a: {:.4f}, val_l: {:.4f}, val_a: {:.4f}, time: {:.4f}\n'.format(i+1,train_eval[0], train_eval[1], val_eval[0],val_eval[1], time_taken)
        f.write(log)
        print(log)
    f.close()
def save_bottleneck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)
    #weights='best_weights/defrost_everything_init_47_freeze_fixed_weights.03-0.60.hdf5')
    weights = 'best_weights/defrost_all_cropped_77.hdf5'
    model = create_model(num_classes=500, include_top=False, weights=weights)
    print(model.summary())

    validation_generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        shuffle=False,
        class_mode=None)

    bottleneck_features_validation = model.predict_generator(
        validation_generator, nb_validation_samples // batch_size, verbose=1)

    np.save(open(bottleneck_dir + exp_name + '_validation.npy', 'wb'),
            bottleneck_features_validation)

    train_generator = datagen.flow_from_directory(train_data_dir,
                                                  target_size=(img_width,
                                                               img_height),
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  class_mode=None)
    print(nb_train_samples // batch_size)
    bottleneck_features_train = model.predict_generator(train_generator,
                                                        nb_train_samples //
                                                        batch_size,
                                                        verbose=1)

    np.save(open(bottleneck_dir + exp_name + '_train.npy', 'wb'),
            bottleneck_features_train)

    test_generator = datagen.flow_from_directory(test_data_dir,
                                                 target_size=(img_width,
                                                              img_height),
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 class_mode=None)

    bottleneck_features_test = model.predict_generator(test_generator,
                                                       nb_test_samples //
                                                       batch_size,
                                                       verbose=1)

    np.save(open(bottleneck_dir + exp_name + '_test.npy', 'wb'),
            bottleneck_features_test)
Beispiel #5
0
def defrost_train():
    nb_classes = 500

    img_width, img_height = 299, 299

    train_data_dir = '../cropped/train'
    validation_data_dir = '../cropped/validation'
    test_data_dir = '../cropped/test'

    nb_train_samples = 42320
    nb_validation_samples = 3000
    nb_test_samples = 4500

    data_aug = False

    if data_aug:
        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)
    else:
        train_datagen = ImageDataGenerator(rescale=1. / 255)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    nb_epoch = 5
    batch_size = 16
    exp_name = 'defrost_all_cropped'

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size)

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size)

    #test_generator = test_datagen.flow_from_directory(
    #test_data_dir,
    #target_size=(img_width, img_height),
    #batch_size=batch_size)

    tensorboard_callback = TensorBoard(log_dir='./logs/' + exp_name + '/',
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=False)
    checkpoint_callback = ModelCheckpoint(
        './models/' + exp_name + '_weights.{epoch:02d}-{val_acc:.2f}.hdf5',
        monitor='val_acc',
        verbose=0,
        save_best_only=True,
        save_weights_only=False,
        mode='auto')

    # load top model architecture
    base_weights = 'best_weights/defrost_everything_init_47_freeze_fixed_weights.03-0.60.hdf5'
    top_weights = 'best_weights/top_cropped_69.hdf5'
    defrost_model = create_model(freeze_level=None,
                                 weights_output_dim=500,
                                 top_weights=top_weights,
                                 weights=base_weights)

    defrost_model.compile(
        optimizer=optimizers.Adam(lr=1e-5),  #tried 6 zeros
        loss='categorical_crossentropy',
        metrics=['accuracy', metrics.top_k_categorical_accuracy])
    print(defrost_model.summary())

    hist_model = defrost_model.fit_generator(
        train_generator,
        nb_train_samples // batch_size,
        epochs=nb_epoch,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        verbose=1,
        initial_epoch=0,
        callbacks=[tensorboard_callback, checkpoint_callback])

    ev_validation = defrost_model.evaluate_generator(
        validation_generator, nb_validation_samples // batch_size)
    print(ev_validation)
from inceptionv4 import create_model

model = create_model()
print("Model created")
print(len(model.layers))

for layer in model.layers[::-1]:
    print(type(layer))
Beispiel #7
0
from keras.preprocessing.image import ImageDataGenerator
from inceptionv4 import create_model
from keras import metrics

data_directory = '../cropped_pred_scale1.2/'
#data_directory = '../cropped_aligned/'
train_data_dir = 'train'
validation_data_dir = 'validation'
test_data_dir = 'test'

model = create_model(num_classes=500, weights='best_weights/defrost_all_cropped_77.hdf5')
model.compile(optimizer='adam',loss='mean_absolute_error', metrics=['accuracy', metrics.top_k_categorical_accuracy])


validation_datagen = ImageDataGenerator(rescale=1./255)

validation_generator = validation_datagen.flow_from_directory(
    data_directory+validation_data_dir,
    target_size=(299, 299),
    batch_size=100,
    class_mode='categorical')

validation_eval = []
count = 0

res = model.evaluate_generator(validation_generator, 3000//100)
print(res)

#for inp, label in validation_generator:
#    loss,accuracy = model.evaluate(inp, label, verbose=0)
#    validation_eval.append(accuracy)
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)


def plot_data(img, x=None, y=None):
    implot = plt.imshow(img)
    if x is not None and y is not None:
        plt.plot(x, y, 'o', marker=5)
    plt.show()


# In[3]:

weights = 'best_weights/defrost_all_cropped_77.hdf5'
model, inp = create_model(num_classes=500,
                          include_top=False,
                          weights=weights,
                          return_input=True)
# this is the placeholder for the input images

# In[4]:

input_img = inp

# In[5]:

print(model.summary())

# In[6]:

print(model.layers[-1].name)
Beispiel #9
0
                                                                 img_height),
                                                    batch_size=batch_size)

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size)

test_generator = test_datagen.flow_from_directory(test_data_dir,
                                                  target_size=(img_width,
                                                               img_height),
                                                  batch_size=batch_size)

#get base model of inceptionv4
inceptionv4_base, x, inputs = create_model(num_classes=nb_classes,
                                           include_top=False,
                                           weights='imagenet')
print(inceptionv4_base.summary())
#frozen_inceptionv4 = Sequential()
#frozen_inceptionv4.add(inceptionv4_base)
print('Inceptionv4 Base loaded')

## Freezing all layers of inceptionv4 base
for i in range(len(inceptionv4_base.layers)):
    if hasattr(inceptionv4_base.layers[i], 'trainable'):
        inceptionv4_base.layers[i].trainable = False
print('Froze weights')

#frozen_inceptionv4 = Sequential()
#frozen_inceptionv4.add(Dense(1000, input_shape=(1536,), activation='relu'))
#frozen_inceptionv4.add(Dropout(0.5))
Beispiel #10
0
def crop():

    part_file_name = 'parts_info.txt'
    #validation_data_dir = 'validation/'
    batch_size = 100
    target_dim = (299, 299)
    cache = False
    save_path = '../cropped_pred_scale1.1'

    model = create_model(num_classes=4,
                         weights='best_weights/defrost_all_bb_8.hdf5',
                         activation=None)
    print(model.summary())
    model.compile(optimizer='adam',
                  loss='mean_absolute_error',
                  metrics=['accuracy'])

    direcs = ['test/', 'train/']
    nums = [4500, 42320]
    #direcs = ['validation/']
    #nums = [3000]
    for direc, num in zip(direcs, nums):
        gen = img_parts_generator(part_file_name,
                                  direc,
                                  batch_size=batch_size,
                                  load_image=True,
                                  target_dim=target_dim,
                                  cache=False,
                                  load_paths=True,
                                  load_orig_img=True,
                                  bb_only=True)
        bar = Bar('Cropping: ' + direc[:-1], max=num)
        for imgs, orig_imgs, paths, parts in gen:
            preds = model.predict(imgs, batch_size=100, verbose=1)
            for i in range(len(imgs)):
                img = imgs[i]
                orig_img = orig_imgs[i]
                path = paths[i]

                ## Rescaling predicted points to original dimensions
                t_l_point = (preds[i][0], preds[i][1])
                b_r_point = (preds[i][2], preds[i][3])
                #print("Orig: ", parts[i])
                #print("Predicted:", t_l_point, b_r_point)
                b_r_point = scale(b_r_point, orig_img.shape)
                t_l_point = scale(t_l_point, orig_img.shape)

                t_l_point, b_r_point = scale_bounding_box(t_l_point,
                                                          b_r_point,
                                                          orig_img.shape,
                                                          scale=1.1)
                # get bounding boxes
                t_l_x = int(t_l_point[0])
                t_l_y = int(t_l_point[1])
                b_r_x = int(b_r_point[0])
                b_r_y = int(b_r_point[1])
                if (b_r_y > t_l_y and b_r_x > t_l_x):
                    img = crop(orig_img, t_l_x, t_l_y, b_r_x, b_r_y)
                else:
                    img = orig_img
                try:
                    img = resize(img, (299, 299))
                except ValueError:
                    print(img.shape)
                    print(orig_img.shape)
                    print(t_l_x, b_r_x, t_l_y, b_r_y)
                    print(t_l_point, b_r_point)
                    a = orig_img[t_l_y:b_r_y, :]
                    print(a.shape)
                    raise ValueError("Error")
                save_img_path = get_save_path(path, save_path)

                img = Image.fromarray(img)
                img.save(save_img_path)
                bar.next()
            #break
        bar.finish()
Beispiel #11
0
    x += 0.5
    x *= 255.
    x = np.clip(x, 0, 255).astype('uint8')
    return x


K.set_learning_phase(0)

# Build the InceptionV3 network with our placeholder.
# The model will be loaded with pre-trained ImageNet weights.
#model = inception_v3.InceptionV3(weights='imagenet',
#                                 include_top=False)

weights_file = 'best_weights/defrost_all_cropped_77.hdf5'
model, dream = create_model(num_classes=500,
                            return_input=True,
                            include_top=False,
                            weights=weights_file)

#dream = model.input
print('Model loaded.')
print(model.summary())
# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# Define the loss.
loss = K.variable(0.)
for layer_name in settings['features']:
    # Add the L2 norm of the features of a layer to the loss.
    assert layer_name in layer_dict.keys(
    ), 'Layer ' + layer_name + ' not found in model.'
    coeff = settings['features'][layer_name]