Exemple #1
0
def initialize_model(model_num):
    if model_num == 1:
        return models.model1(input_size, num_classes)
    elif model_num == 2:
        return models.model2(input_size, num_classes)
    elif model_num == 3:
        return models.model3(input_size, num_classes)
    elif model_num == 4:
        return models.model4(input_size, num_classes)
    elif model_num == 5:
        return models.model5(input_size, num_classes)
    elif model_num == 6:
        return models.model6(input_size, num_classes)
    elif model_num == 7:
        return models.model7(input_size, num_classes)
Exemple #2
0
def initialize_model(model_num, vocab_size, embed_size):
    if model_num == 1:
        return models.model1(vocab_size, embed_size)
    elif model_num == 2:
        return models.model2(vocab_size, embed_size)
    elif model_num == 3:
        return models.model3(vocab_size, embed_size)
    elif model_num == 4:
        return models.model4(vocab_size, embed_size)
    elif model_num == 5:
        return models.model5(vocab_size, embed_size)
    elif model_num == 6:
        return models.model6(vocab_size, embed_size)
    elif model_num == 7:
        return models.model7(vocab_size, embed_size)
Exemple #3
0
def initialize_model(model_num):
    if model_num == 1:
        return models.model1()
    elif model_num == 2:
        return models.model2()
    elif model_num == 3:
        return models.model3()
    elif model_num == 4:
        return models.model4()
    elif model_num == 5:
        return models.model5()
    elif model_num == 6:
        return models.model6()
    elif model_num == 7:
        return models.model7()
Exemple #4
0
def build_model(args, scope):
    nh = args.max_clause
    nw = args.max_var
    nc = 2
    nact = nc * nw
    ob_shape = (None, nh, nw, nc * args.nstack)
    X = tf.placeholder(tf.float32, ob_shape)
    Y = tf.placeholder(tf.float32, (None, nact))
    Z = tf.placeholder(tf.float32, (None))

    p, v = model3(X, nact, scope)
    params = find_trainable_variables(scope)
    with tf.name_scope("loss"):
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=p))
        value_loss = tf.losses.mean_squared_error(labels = Z, predictions = v)
        lossL2 = tf.add_n([ tf.nn.l2_loss(vv) for vv in params ])
        loss = cross_entropy + value_loss + args.l2_coeff * lossL2

    return X, Y, Z, p, v, params, loss
Exemple #5
0
import datasets
import models

# Parameters
input_size = 784
num_classes = 10

# Model you wish to evaluate
#TODO: Change to the model you wish to evaluate!
file_path = r'./saved models/Model 4 - Split image-16, lr=0.001, wd=0.0001, bs=64.pkl'
model_name = file_path.split('saved models/')[1]
model_name = model_name.split('.pkl')[0]

state = torch.load(file_path, lambda storage, loc: storage)
model = models.model3(input_size, num_classes)
model.load_state_dict(state['state_dict'])

if torch.cuda.is_available():
    print('GPU detected - Enabling Cuda!')
    model = model.cuda()
else:
    print('No GPU detected!')

# Dataset
test_dataset = datasets.test_dataset()

# Dataset Loader (Input Pipeline)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=32,
                                          shuffle=False)
        feature_maps[:, :, start_index:start_index+nchannel] = output[0][0, :, :, :]

        start_index = start_index + nchannel

    return feature_maps


if __name__ == '__main__':
    _, _file = argv
    features_dir = './feature_maps/'
    Data = pd.read_csv(_file, delimiter=',', header=0)
   
    inputImages = inData.ImageFile.tolist()
   
    weight_path = 'weights-improvement__016-0.022715.hdf5'
    model = model3(weights_path=weight_path)

    attrs = ['ColorHarmony', 'Content', 'DoF',
              'Light', 'Object', 'VividColor', 'score']
    
    for attr in attrs:
        if not os.path.isdir(attr):
            os.makedirs(attr)

    n = len(inputImages)
    weights = joblib.load('weights.pkl')

    for i, image_path in enumerate(inputImages):
        img = load_image(image_path)

        filename = image_path.split('/')[-1]