示例#1
0
def load_all(overwrite=False):

    load_all_models()

    filein = "{0}/known_simulations.json".format(_dir_path)
    with open(filein) as json_file:
        data = json.load(json_file)
        for name in data.keys():
            print name
            data['model'] = get_model(data[model])
            sim = Simulation(**data[name])
            sim.add2known(overwrite=overwrite)
示例#2
0
from ImgConst import *

from LoadData import prepare_data, img_vec
from Model import get_model, train_model, save_model
from SimImgIdx import get_sorted_similarity_idx
from ImgFeatures import save_img_feature, load_img_features

try:
    x_train = prepare_data()
    autoencoder = get_model()
    autoencoder = train_model(x_train,autoencoder)
    model_path = save_model(autoencoder) 
    autoencoder_save = load_model(model_path)
    feature_path = save_img_feature(autoencoder_save, x_train)
    feature_val = load_img_features(feature_path)


    img_test_file = "f0006_09.png"
    img_test = img_vec(img_test_file)
    img_test = img_test.astype('float32') / 255.
    img_test = np.reshape(img_test, (len(img_test),IMG_WIDTH, IMG_HEIGHT, IMG_COLOR))
    similarity_sorted = get_sorted_similarity_idx(autoencoder_save, img_test, encoded_images=feature_val, loss=LOSS_2)
    similar_idx = similarity_sorted[0]
    
    print(similar_idx)
#     print(similarity_sorted)
    
#     f = plt.figure(figsize=(20, 4))
#     f.add_subplot(1, 2, 1)
#     plt.imshow(x_train[1].reshape(28, 28))
#     f.add_subplot(1,2, 2)
# tokenize all content
tokenizer = Tokenizer()
tokenizer.fit_on_texts(training_dataset['content'])
tokenizer.fit_on_texts(test_dataset['content'])
num_encoder_tokens = len(tokenizer.word_index) + 1

# fetch encoder input data and decoder input data
encoder_input_data = get_encoded_padded_content(tokenizer, training_dataset['content'], max_encoder_seq_length)
decoder_input_data, decoder_target_data = get_decoder_inputs(training_dataset["labels"].values, num_decoder_tokens)

# print(encoder_input_data.shape)
# exit(-1)
# print(encoder_input_data[0])
# print(decoder_input_data[0])

model, encoder_model, decoder_model = get_model(num_encoder_tokens, num_decoder_tokens, tokenizer, sys.argv[1],
                                                encoder_input_data, decoder_input_data, decoder_target_data)


encoder_input_test_data = get_encoded_padded_content(tokenizer, test_dataset['content'], max_encoder_seq_length)
decoder_input_test_data, decoder_target_test_data = get_decoder_inputs(test_dataset["labels"].values,
num_decoder_tokens)

model_output = []


def decode_sequence(seq_input):
    # Encode the input as state vectors.
    states_value = encoder_model.predict(seq_input)

    # Generate empty target sequence of length 1.
    target_seq = np.zeros((1, 1, num_decoder_tokens))
示例#4
0
from FGSM import FGSM
from BIM import BIM
from visualize import visualise

# In[2]:

# %matplotlib inline
# %matplotlib qt

# In[3]:

device = torch.device('cpu')

# In[4]:

model = get_model(device)  # loads a pretrained vgg11 model
model.eval()

# In[5]:


def imshow(img, wnid, title=None):
    img = img.cpu().detach().numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])

    img = img * std + mean
    #     img = np.clip(img,0,1)
    plt.imshow(img)

    #     title = getClassOfId(wnid)
from Model import get_model
from keras import callbacks
from keras.optimizers import Adam
from keras.losses import sparse_categorical_crossentropy
from data_generator import data_generator

model_systole = get_model(n_ch=32)
adam = Adam(lr=0.0001)
model_systole.compile(optimizer=adam, loss=sparse_categorical_crossentropy)
model_systole.summary()

#  --- Define data and training ----#
path_train = 'Training_arrays/systole/'
path_val = 'Validation_arrays/systole'
pathModel = 'SystoleModel/'
pathLog = 'SystoleLog/'

batchsize = 4

n_epochs, n_iter_train, n_iter_val = 400, int(491 / batchsize), int(194 /
                                                                    batchsize)
data_train = data_generator(path_train,
                            batchsize=batchsize,
                            mode='Systole',
                            shuffle=True,
                            train=True)
data_val = data_generator(path_val,
                          batchsize=batchsize,
                          mode='Systole',
                          shuffle=False,
                          train=False)