def extract_feature_from_uploaded_image(image_path):
    model = ResNet50(weights='imagenet',
                     include_top=False,
                     input_shape=(224, 224, 3))

    img = load_img(image_path)
    img_array = img_to_array(img)
    expanded_img_array = np.expand_dims(img_array, axis=0)
    expanded_img_array = np.array(expanded_img_array, dtype='float64')

    preprocessed_img = preprocess_input(expanded_img_array)
    features = model.predict(preprocessed_img)
    flattened_features = features.flatten()
    normalized_features = flattened_features / norm(flattened_features)

    image_path = image_path.split('\\')[-1]

    df.loc[len(df.index)] = [image_path, normalized_features]
    feature_list.append(normalized_features)
    REF_INDEX = len(df.index) - 1

    similar_img_ids, _, __ = annoy_search(
        ref_index=REF_INDEX,
        features=feature_list,
        tree_size=TREE_SIZE,
        recommended_item_size=RECOMMENDED_ITEM_SIZE,
        _metric='euclidean')

    clear_recommendation_data()
    return df.iloc[similar_img_ids[1:]]['img_name'].tolist()
Exemple #2
0
def create_model(img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT, resnet_weights="imagenet"):

    input_1 = Input(shape=(img_height, img_width, 3,))
    input_2 = Input(shape=(img_height, img_width, 3,))
    input_3 = Input(shape=(img_height, img_width, 3,))


    res_net = ResNet50(include_top=False,
                       pooling='avg',
                       weights=resnet_weights,
                       input_shape=(img_height, img_width, 3))
    for layer in res_net.layers:
        layer.trainable = False

    tower_1 = Model(inputs=res_net.input, outputs=res_net.layers[-1].output, name='resnet50_1')(input_1)
    tower_2 = Model(inputs=res_net.input, outputs=res_net.layers[-1].output, name='resnet50_2')(input_2)
    tower_3 = Model(inputs=res_net.input, outputs=res_net.layers[-1].output, name='resnet50_3')(input_3)
    #tower_1.trainable = False
    #tower_2.trainable = False
    #tower_3.trainable = False

    #tower_1 = ResNet50(include_top=False,
    #                   pooling='avg',
    #                   weights=resnet_weights) \
    #          (input_1)
    #tower_1 = MaxPooling2D((1, 9), strides=(1, 1), padding='same')(tower_1)

    #tower_2 = ResNet50(include_top=False,
    #                   pooling='avg',
    #                   weights=resnet_weights) \
    #          (input_2)
    ##tower_2 = MaxPooling2D((1, 9), strides=(1, 1), padding='same')(tower_2)
    #tower_2.trainable = False

    #tower_3 = ResNet50(include_top=False,
    #                   pooling='avg',
    #                   weights=resnet_weights) \
    #          (input_3)
    ##tower_3 = MaxPooling2D((1, 6), strides=(1, 1), padding='same')(tower_3)
    #tower_3.trainable = False

    difference_1 = subtract([tower_2, tower_1])
    difference_2 = subtract([tower_3, tower_1])

    #merged = concatenate([tower_1, tower_2, tower_3], axis=1)
    merged = concatenate([difference_1, difference_2], axis=1)
    merged = Flatten()(merged)
    merged = Dropout(0.2)(merged)

    out = Dense(20, activation='relu')(merged)
    #out1 = Dropout(0.2)(out1)
    #out2 = Dense(20, activation='relu')(out2)
    out = Dense(2, activation='softmax')(out)

    #model = Model(input_shape, out)
    model = Model(inputs=[input_1, input_2, input_3], outputs=[out])

    return model
Exemple #3
0
def test_save_load_model():
    model = ResNet50(weights=None)
    data = save_model_to_bytes(model)
    loaded = load_model_from_bytes(data)
    assert loaded.get_config() == model.get_config()

    w_orig = model.get_weights()
    w_loaded = loaded.get_weights()
    assert len(w_orig) == len(w_loaded)
    for (orig, loaded) in zip(w_orig, w_loaded):
        assert (orig == loaded).all()
    def _resnet_50(input_shape):
        image = Input(shape=input_shape, name='image')
        _layer_names = [
            'conv3_block4_out',
            'conv4_block6_out',
            'conv5_block3_out'
        ]

        base_model = ResNet50(include_top=False, input_tensor=image)
        feature_maps = [base_model.get_layer(layer_name).output for layer_name in _layer_names]
        return base_model, feature_maps
Exemple #5
0
def evaluate(encoder, decoder, optimizer, step_counter, image):
    attention_plot = np.zeros((max_length, attention_features_shape))

    hidden = decoder.reset_state(batch_size=1)
    size = [224, 224]

    def load_image(image_path):
        img = tf.read_file(PATH + image_path)
        img = tf.image.decode_jpeg(img, channels=3)
        img = tf.image.resize(img, size)
        img = tf.keras.applications.resnet50.preprocess_input(img)
        return img, image_path

    from tensorflow.python.keras.applications.resnet import ResNet50

    image_model = ResNet50(weights='resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
                           , include_top=False)  # 创建ResNet网络

    new_input = image_model.input
    hidden_layer = image_model.layers[-2].output
    image_features_extract_model = tf.keras.Model(new_input, hidden_layer)

    temp_input = tf.expand_dims(load_image(image)[0], 0)
    img_tensor_val = image_features_extract_model(temp_input)
    img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))

    features = encoder(img_tensor_val)

    #    print(step_counter.numpy())
    dec_input = tf.expand_dims([word_index['<start>']], 0)
    result = []

    for i in range(max_length):
        predictions, hidden, attention_weights = decoder(dec_input, features, hidden)

        attention_plot[i] = tf.reshape(attention_weights, (-1,)).numpy()

        print(predictions.get_shape())

        predicted_id = tf.multinomial(predictions, num_samples=1)[0][0].numpy()
        result.append(index_word[predicted_id])

        print(predicted_id)

        if index_word[predicted_id] == '<end>':
            return result, attention_plot

        dec_input = tf.expand_dims([predicted_id], 0)

    attention_plot = attention_plot[:len(result), :]
    return result, attention_plot
def ResNet(weights="imagenet"):
    """
    Resnet model
    :param weights: None or "imagenet" pretraining
    :return:
    """
    channels = 3
    input_shape = spectrogram_dim + (3, )
    model = keras.models.Sequential([
        ResNet50(input_shape=input_shape, include_top=False, weights=weights),
        layers.GlobalMaxPool2D(input_shape=(8, 9, 2048)),
        layers.Dense(1024, activation="relu"),
        layers.Dense(len(birdcodes.bird_code), activation="sigmoid"),
    ])
    return model, input_shape, channels
Exemple #7
0
def prepare_resnet_learner(
    data_loaders: Tuple[PrefetchDataset, PrefetchDataset, PrefetchDataset],
    steps_per_epoch: int = 100,
    vote_batches: int = 10,
    learning_rate: float = 0.001,
) -> KerasLearner:
    # RESNET model
    rows = 28
    cols = 28
    channels = 1
    new_channels = 3
    padding = 2
    n_classes = 10

    input_img = tf.keras.Input(shape=(rows, cols, channels), name="Input")
    x = tf.keras.layers.ZeroPadding2D(padding=padding)(input_img)
    x = tf.keras.layers.Flatten()(x)
    x = tf.keras.layers.RepeatVector(new_channels)(
        x)  # mnist only has one channel so duplicate inputs
    x = tf.keras.layers.Reshape((rows + padding * 2, cols + padding * 2,
                                 new_channels))(x)  # who knows if this works

    resnet = ResNet50(include_top=False, input_tensor=x)

    x = resnet.output
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x = Dropout(0.7)(x)
    x = tf.keras.layers.Dense(n_classes, activation='softmax')(x)

    model = tf.keras.Model(inputs=input_img, outputs=x)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
                  loss='sparse_categorical_crossentropy',
                  metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

    learner = KerasLearner(
        model=model,
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
        model_evaluate_kwargs={"steps": vote_batches},
    )
    return learner
Exemple #8
0
 def get_model(self, name=None, training_variant=None):
     result = ResNet50(weights='imagenet')
     if name is not None:
         result._name = name
     return result
Exemple #9
0
 def get_raw_model(self, name=None) -> Model:
     result = ResNet50()
     if name is not None:
         result._name = name
     return result
Exemple #10
0
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
        zoom_range = 0.1, # Randomly zoom image
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images



model = Sequential()
#vgg-16 , 80% accuracy with 100 epochs
# model.add(VGG16(input_shape=(224,224,3),pooling='avg',classes=1000,weights=vgg16_weights_path))
#resnet-50 , 87% accuracy with 100 epochs
model.add(ResNet50(include_top=False,input_tensor=None,input_shape=(224,224,3),pooling='avg',classes=2,weights=resnet_weights_path))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(1, activation='sigmoid'))

model.layers[0].trainable = False
model.summary()



model.compile(optimizer=Adam(lr=0.001),loss='binary_crossentropy',metrics=['accuracy'])
Exemple #11
0

train_images, train_labels = preprocess_data(train_images, train_labels)
print(train_images.shape, train_labels.shape)
validation_images, validation_labels = preprocess_data(validation_images,
                                                       validation_labels)

train_images, train_labels = preprocess_data(train_images, train_labels)
print(train_images.shape, train_labels.shape)
validation_images, validation_labels = preprocess_data(validation_images,
                                                       validation_labels)

resnet_model = keras.models.Sequential()
resnet_model.add(
    keras.layers.Lambda(lambda image: tf.image.resize(image, (192, 192))))
resnet_model.add(ResNet50(include_top=False, pooling='max',
                          weights='imagenet'))
resnet_model.add(keras.layers.Flatten())
resnet_model.add(keras.layers.Dense(256, activation='relu'))
resnet_model.add(keras.layers.BatchNormalization())
resnet_model.add(keras.layers.Dropout(0.3))
resnet_model.add(keras.layers.Dense(128, activation='relu'))
resnet_model.add(keras.layers.BatchNormalization())
resnet_model.add(keras.layers.Dropout(0.5))
resnet_model.add(keras.layers.Dense(3, activation='softmax'))

# cu 10 epoci si droputuri 0.3 0.5 [0.8269070982933044, 0.7431111335754395]
#cu 15 epoci si 0.3 0.7 [0.7292381525039673, 0.7775555849075317]
# 20 epoci 0.3 0.7 [0.7863541841506958, 0.7955555319786072] si 0.99 pe train
# 20 epoci 0.7 0.9 2e-5 [0.6383113861083984, 0.8028888702392578]
opt = keras.optimizers.Adam(learning_rate=2e-5)
resnet_model.compile(optimizer=opt,
def construct_model(pretrainedNN):

    model = Sequential()
    if (pretrainedNN == 'VGG16'):
        model.add(
            VGG16(weights=None, include_top=False, input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'VGG19'):
        model.add(
            VGG19(weights=None, include_top=False, input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet101'):
        model.add(
            ResNet101(weights=None, include_top=False,
                      input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet152'):
        model.add(
            ResNet152(weights=None, include_top=False,
                      input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet50V2'):
        model.add(
            ResNet50V2(weights=None,
                       include_top=False,
                       input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet101V2'):
        model.add(
            ResNet101V2(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet152V2'):
        model.add(
            ResNet152V2(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'MobileNet'):
        model.add(
            MobileNet(weights=None, include_top=False,
                      input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'MobileNetV2'):
        model.add(
            MobileNetV2(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'DenseNet121'):
        model.add(
            DenseNet121(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'DenseNet169'):
        model.add(
            DenseNet169(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'DenseNet201'):
        model.add(
            DenseNet201(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    else:
        model.add(
            ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3)))

    model.add(Flatten())

    model.add(Dense(77, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
def create_model():

    n_units = 100

    # Initialize a ResNet50_ImageNet Model
    resnet_input = Input(shape=(IMAGE_WIDTH, IMAGE_HEIGHT, 3))
    resnet_model = ResNet50(weights='imagenet',
                            include_top=False,
                            input_tensor=resnet_input)

    # New Layers over ResNet50
    net = resnet_model.output
    #net = Flatten(name='flatten')(net)
    net = GlobalAveragePooling2D(name='gap')(net)
    #net = Dropout(0.5)(net)
    net = Dense(n_units, activation='relu', name='t_emb_1')(net)
    net = Lambda(lambda x: K.l2_normalize(x, axis=1),
                 name='t_emb_1_l2norm')(net)

    # model creation
    base_model = Model(resnet_model.input, net, name="base_model")

    # triplet framework, shared weights
    input_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, 3)
    input_anchor = Input(shape=input_shape, name='input_anchor')
    input_positive = Input(shape=input_shape, name='input_pos')
    input_negative = Input(shape=input_shape, name='input_neg')

    net_anchor = base_model(input_anchor)
    net_positive = base_model(input_positive)
    net_negative = base_model(input_negative)

    # The Lamda layer produces output using given function. Here its Euclidean distance.
    positive_dist = Lambda(euclidean_distance,
                           name='pos_dist')([net_anchor, net_positive])
    negative_dist = Lambda(euclidean_distance,
                           name='neg_dist')([net_anchor, net_negative])
    tertiary_dist = Lambda(euclidean_distance,
                           name='ter_dist')([net_positive, net_negative])

    # This lambda layer simply stacks outputs so both distances are available to the objective
    stacked_dists = Lambda(lambda vects: K.stack(vects, axis=1),
                           name='stacked_dists')(
                               [positive_dist, negative_dist, tertiary_dist])

    model = Model([input_anchor, input_positive, input_negative],
                  stacked_dists,
                  name='triple_siamese')

    # Setting up optimizer designed for variable learning rate

    # Variable Learning Rate per Layers
    lr_mult_dict = {}
    last_layer = ''
    for layer in resnet_model.layers:
        # comment this out to refine earlier layers
        # layer.trainable = False
        # print layer.name
        lr_mult_dict[layer.name] = 1
        # last_layer = layer.name
    lr_mult_dict['t_emb_1'] = 100

    #base_lr = 0.0001
    #momentum = 0.9
    #v_optimizer = LR_SGD(lr=base_lr, momentum=momentum, decay=0.0, nesterov=False, multipliers = lr_mult_dict)

    #model.compile(optimizer=v_optimizer, loss=triplet_loss, metrics=[accuracy])

    return model
print("X_train.shape", X_train.shape)
print("X_valid.shape", X_val.shape)
print("y_train.shape", y_train.shape)
print("y_valid.shape", y_val.shape)

# One hot encoding the labels
y_train = keras.utils.to_categorical(y_train, NUM_CATEGORIES)
y_val = keras.utils.to_categorical(y_val, NUM_CATEGORIES)

print(y_train.shape)
print(y_val.shape)

# Making the model
model = Sequential()
model.add(
    ResNet50(weights='imagenet', include_top=False, input_shape=(64, 64, 3)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(1024, activation='sigmoid'))
model.add(Dense(y_train.shape[1], activation='softmax'))

lr = 0.001
epochs = 30
opt = Adam(lr=lr, decay=lr / (epochs * 0.5))

# Cost and optimization method to use
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

# Augmenting the data and training the model
Exemple #15
0
    def get_predictions(self, in_img, network_name, top=3):
        # network model selection
        model = None
        size = (224, 224)
        int_model = False
        if network_name == 'VGG19':
            if self.MODEL_VGG19 is None:
                from tensorflow.keras.applications.vgg19 import VGG19
                self.MODEL_VGG19 = VGG19(weights='imagenet')
            model = self.MODEL_VGG19
            int_model = True
            from tensorflow.keras.applications.vgg19 import decode_predictions

        elif network_name == 'DenseNet201':
            if self.MODEL_DENSENET201 is None:
                from tensorflow.python.keras.applications.densenet import DenseNet201
                self.MODEL_DENSENET201 = DenseNet201(weights='imagenet')
            model = self.MODEL_DENSENET201
            from tensorflow.keras.applications.densenet import decode_predictions

        elif network_name == 'MobileNetV2':
            if self.MODEL_MOBILENET is None:
                from tensorflow.keras.applications.mobilenet import MobileNet  # (244,244)
                self.MODEL_MOBILENET = MobileNet(weights='imagenet')
            model = self.MODEL_MOBILENET
            from tensorflow.keras.applications.mobilenet import decode_predictions

        elif network_name == 'InceptionV3':
            if self.MODEL_INCEPTIONV3 is None:
                from tensorflow.keras.applications.inception_v3 import InceptionV3  # (299,299)
                self.MODEL_INCEPTIONV3 = InceptionV3(weights='imagenet')
            model = self.MODEL_INCEPTIONV3
            size = (299, 299)
            from tensorflow.keras.applications.inception_v3 import decode_predictions

        elif network_name == 'ResNet50':
            if self.MODEL_RESNET50 is None:
                from tensorflow.python.keras.applications.resnet import ResNet50  # (244,244)
                self.MODEL_RESNET50 = ResNet50(weights='imagenet')
            model = self.MODEL_RESNET50
            int_model = True
            from tensorflow.keras.applications.resnet import decode_predictions

        else:
            print("no valid model selected")
            return

        # if input is a numpy image, convert it to a cv image
        if (isinstance(in_img, numpy.ndarray)):
            in_img = _to_cv_image(in_img)

        img = cv2.resize(in_img, dsize=size, interpolation=cv2.INTER_CUBIC)
        img = _cv_to_array(img)
        if (int_model):
            img = img * 255
        img = numpy.expand_dims(img, axis=0)
        # predict model and return top predictions
        features = model.predict(img)
        predictions = []
        for p in decode_predictions(features, top=top)[0]:
            predictions.append([p[0], p[1], float(p[2])])

        # get the indexes of the top predictions
        class_codes = numpy.flip(numpy.argsort(features[0]))[:top]

        return (predictions, class_codes)  # ottiene nome della classe predetta
Exemple #16
0
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "model_checkpoints/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights every 5 epochs
cp_callback = ModelCheckpoint(
    filepath=checkpoint_path,
    verbose=1,
    save_weights_only=True,
    period=5)

num_classes = 2
resnet_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

model = Sequential()
model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))
model.add(Dense(num_classes, activation='softmax'))

# Say not to train first layer (ResNet) model. It is already trained
model.layers[0].trainable = False

# Compile model
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))

model.fit_generator(
        train_generator,
        steps_per_epoch=26,
        epochs=15,
Exemple #17
0
def load_model(args):

    print("Initializing model")
    print("Batch size: ", args.batch_size)
    print("Input size: {} x {}".format(args.input_size, args.input_size))

    if not args.resnet:
        print("Using hourglass")
        print("Num stacks: ", args.num_stacks)
        print("Heatmap size: {} x {}".format(args.heatmap_size,
                                             args.heatmap_size))
        print("Use diamond coords for output: {}".format(args.diamond))
        print("Scale for vp: {}".format(args.scale))

        print("Heatmap feautures: ", args.features)
        print("Channels: ", args.channels)
        print("Mobilenet version: ", args.mobilenet)

    else:
        print("Using ResNet50")

    print("Experiment number: ", args.experiment)

    loss_str = args.loss

    if args.loss == 'mse':
        loss = keras.losses.mse
    elif args.loss == 'mae':
        loss = keras.losses.mae
    elif args.loss == 'rmse':
        loss = keras.metrics.RootMeanSquaredError()
    elif args.loss == 'sl1':
        loss = keras.losses.huber
    else:
        loss, loss_str = get_loss(args.loss)

    if args.mobilenet:
        module = 'mobilenet'
        module_str = 'm'
    else:
        module = 'bottleneck'
        module_str = 'b'

    if args.crop_delta == 0 and args.perspective_sigma == 0.0:
        print("Not using augmentation")

        aug_str = 'noaug'
    else:
        print("Using augmentation")
        print("Perspectve sigma: {}".format(args.perspective_sigma))
        print("Crop delta: {}".format(args.crop_delta))
        aug_str = 'aug_{}ps_{}cd'.format(args.perspective_sigma,
                                         args.crop_delta)

    if not args.resnet:
        model_name = 'reg_diamond' if args.diamond else 'reg_orig'
        snapshot_dir_name = 'VP1VP2_{}_{}_{}_{}_{}in_{}out_{}s_{}f_{}n_{}b_{}c_{}'.\
            format(model_name, loss_str, aug_str, module_str, args.input_size, args.heatmap_size, args.scale, args.features, args.num_stacks, args.batch_size, args.channels, args.experiment)

        backbone = create_hourglass_network(args.features,
                                            args.num_stacks,
                                            inres=args.input_size,
                                            outres=args.heatmap_size,
                                            bottleneck=module,
                                            num_channels=args.channels)

        outputs = []
        for i in range(args.num_stacks):
            backbone_out = backbone.outputs[i]
            x = keras.layers.GlobalAveragePooling2D(
                name='mlp_pool_{}'.format(i))(backbone_out)
            x = keras.layers.Dense(args.features // 2,
                                   activation='relu',
                                   name='mlp_1_{}'.format(i))(x)
            x = keras.layers.Dense(args.features // 4,
                                   activation='relu',
                                   name='mlp_2_{}'.format(i))(x)
            x = keras.layers.Dense(4, name='mlp_out_{}'.format(i))(x)
            outputs.append(x)

        model = keras.models.Model(inputs=backbone.input, outputs=outputs)

    else:
        if args.num_stacks != 1:
            raise Exception("Cannot use ResNet with multiple outputs!")
        model_name = 'resnet_diamond' if args.diamond else 'resnet_orig'


        snapshot_dir_name = 'VP1VP2_{}_{}_{}_{}in_{}s_{}b_{}'.\
            format(model_name, loss_str, aug_str, args.input_size,  args.scale, args.batch_size, args.experiment)

        model = keras.models.Sequential()
        backbone = ResNet50(input_shape=(args.input_size, args.input_size, 3),
                            include_top=False,
                            pooling='avg')
        print(backbone.summary())
        model.add(backbone)
        model.add(keras.layers.Dense(128, activation='relu', name='mlp_1'))
        model.add(keras.layers.Dense(64, activation='relu', name='mlp_2'))
        model.add(keras.layers.Dense(4, name='mlp_out'))

    print("Dir name: ", snapshot_dir_name)
    snapshot_dir_path = os.path.join('snapshots', snapshot_dir_name)

    if args.resume:
        if args.experiment_resume is None:
            args.experiment_resume = args.experiment

        if not args.resnet:
            model_name = 'reg_diamond' if args.diamond else 'reg_orig'
            resume_snapshot_dir_name = 'VP1VP2_{}_{}_{}_{}_{}in_{}out_{}s_{}f_{}n_{}b_{}c_{}'. \
                format(model_name, loss_str, aug_str, module_str, args.input_size, args.heatmap_size, args.scale, args.features,
                       args.num_stacks, args.batch_size, args.channels, args.experiment_resume)
        else:
            if args.num_stacks != 1:
                raise Exception("Cannot use ResNet with multiple outputs!")
            model_name = 'resnet_diamond' if args.diamond else 'resnet_orig'
            resume_snapshot_dir_name = 'VP1VP2_{}_{}_{}_{}in_{}s_{}b_{}'. \
                format(model_name, loss_str, aug_str, args.input_size, args.scale, args.batch_size, args.experiment_resume)

        resume_snapshot_dir_path = os.path.join('snapshots',
                                                resume_snapshot_dir_name)

        resume_model_path = os.path.join(resume_snapshot_dir_path,
                                         'model.{:03d}.h5'.format(args.resume))
        print("Loading model", resume_model_path)
        model.load_weights(resume_model_path)

    return model, loss, snapshot_dir_name, snapshot_dir_path