Exemplo n.º 1
0
def main():
    (train_images, train_labels), (test_images, test_labels) = cnn.init()
    train_images, test_images = cnn.preprocess_normalise(
        train_images, test_images)
    sharpened = cnn.preprocess_contrast(train_images)
    img = train_images[2]
    plt.imshow(img, cmap="gray")
    plt.show()
    plt.imshow(sharpened[2], cmap="gray")
    plt.show()
    train_images += sharpened
    model = cnn.create_model(1)
    model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    train_images, test_images = cnn.reshape(train_images, test_images)
    history = model.fit(train_images,
                        train_labels,
                        epochs=3,
                        validation_data=(test_images, test_labels))
    cnn.plot_history(history)
    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
    print(test_acc)
 def _build(self):
     config = basic_config.Config()
     session_config = tf.ConfigProto(allow_soft_placement=True,
                                     log_device_placement=False)
     session_config.gpu_options.allow_growth = True
     g = tf.Graph()
     with g.as_default():
         with tf.device(None):
             self.sess = tf.Session(config=session_config)
             self.input_ids = tf.placeholder(
                 dtype=tf.int64,
                 shape=[config.batch_size, config.max_seq_length],
                 name='input_ids')
             self.e1_mas = tf.placeholder(
                 dtype=tf.int64,
                 shape=[config.batch_size, config.max_seq_length],
                 name='e1_mas')
             self.e2_mas = tf.placeholder(
                 dtype=tf.int64,
                 shape=[config.batch_size, config.max_seq_length],
                 name='e2_mas')
             self.labels, self.probs = cnn.create_model(
                 input_ids=self.input_ids,
                 e1_mas=self.e1_mas,
                 e2_mas=self.e2_mas)
             self.sess.run(tf.global_variables_initializer())
Exemplo n.º 3
0
def train(code, start, end):
    input_data = cnn_data_generater.generate_input_data(code, start, end)
    output_data = cnn_data_generater.generate_output_data(code, start, end)

    model = cnn.create_model()
    cnn.fit(model, input_data, output_data)
    cnn.save_model(model, code)
Exemplo n.º 4
0
def main():
    model = create_model()
    model.load_weights(f"models/{MODEL_WEIGHTS}")

    # Create argument parser to allow CLI commands
    parser = argparse.ArgumentParser(description="A cat-and-dog classifier")
    parser.add_argument(
        '-i',
        '--input',
        help="The folder directory in which the image is located on")
    parser.add_argument('-s',
                        '--show',
                        action="store_true",
                        help="Display the current image file")

    args = parser.parse_args()

    # If no argument is provided
    if len(sys.argv) <= 1:
        parser.print_help()
        sys.exit(1)

    # Performs the default operation
    if args.input:
        try:
            classify(model, args.input)

            # Plot the image
            if args.show:
                show_image(args.input)

        except FileNotFoundError:
            print("Error: No such folder. Try again")
            sys.exit(1)
Exemplo n.º 5
0
def load(event):
    global model
    model = cnn.create_model()
    try:
        model.load_weights("models/cnn")
        label_message["text"] = "Model loaded successfully"
    except Exception:
        label_message["text"] = "Model loading failed, no saved models found"
Exemplo n.º 6
0
def main(weights_dir, n):
    os.makedirs(weights_dir, exist_ok=True)

    (x_train, y_train), _ = cifar10.load_data()
    model = cnn.create_model(x_train.shape, 10)

    for i in range(n):
        reset_weights(model)
        model.save_weights(os.path.join(weights_dir, '{:03d}'.format(i)))
Exemplo n.º 7
0
def train_and_score(network):
    model = create_model(network['layers_and_filters'],
                         network['kernel_size'],
                         network['activation'], (IMG_SIZE, IMG_SIZE, levels),
                         network['dropout_rate'],
                         network['optimizer'],
                         network['learning_rate'],
                         output_size=train_Y.shape[1])
    fit_one_at_time(model, train_X, train_Y, epochs=network['epochs'])
    return score_one_at_time(model, test_X, test_Y)
Exemplo n.º 8
0
def train_and_score_in_memory(network):
    model = create_model(network['layers_and_filters'],
                         network['kernel_size'],
                         network['activation'], (IMG_SIZE, IMG_SIZE, levels),
                         network['dropout_rate'],
                         network['optimizer'],
                         network['learning_rate'],
                         output_size=train_Y.shape[1])
    result = model.fit(x=train_X, y=train_Y, epochs=network['epochs'])
    preds = model.predict(test_X)
    std_dev = np.std(preds)
    preds[preds >= 0.5] = 1
    preds[preds < 0.5] = 0
    _, _, f1 = scores(preds, test_Y)
    return np.mean(f1) + 2 * std_dev
Exemplo n.º 9
0
def train(event):
    global model
    label_message["text"] = "Training in progress..."
    label_message.update()
    cnn.load_data()
    (x_train, y_train), (x_test, y_test) = cnn.get_data()
    model = cnn.create_model()
    model.fit(x_train,
              y_train,
              batch_size=100,
              validation_data=(x_test, y_test))
    label_message["text"] = "Done! Now testing..."
    label_message.update()
    test_loss, test_acc = model.evaluate(x_test, y_test)
    label_message["text"] = "Test accuracy: {}%".format(
        round(test_acc * 100, 2))
Exemplo n.º 10
0
    def __init__(self, args):

        config_path = join(THIS_DIR_PATH, args[0])
        with open(config_path) as f:
            self.config = json.load(f)
        self.img_height = self.config['network']['input_height']
        self.network = model.create_model(self.config)
        if torch.cuda.is_available():
            self.network.cuda()
            self.dtype = torch.cuda.FloatTensor
            self.network.load_state_dict(
                torch.load(join(THIS_DIR_PATH,
                                self.config['model_save_path'])))
        else:
            self.dtype = torch.FloatTensor
            self.network.load_state_dict(
                torch.load(join(THIS_DIR_PATH, self.config['model_save_path']),
                           map_location='cpu'))
        self.network.eval()
Exemplo n.º 11
0
from configuration import MAX_MEMORY, EPOCHS, MODEL_NAME
from cnn import create_model
from datetime import datetime
=======
from config import MAX_MEMORY, EPOCHS, MP_MN, FB_TIME, LR_TIME
from cnn import checkModel
>>>>>>> ultrasonic:reinforced.py

memory = []
moves = 3
learningRate = 0.9
epsilon = 1.0
epsilon_min = 0.01
epsilon_decay = 0.995
<<<<<<< HEAD:Using-Object-Detection-For-Reward/reinforced.py
model = create_model()
=======
model = checkModel()
>>>>>>> ultrasonic:reinforced.py

for i in range(EPOCHS):
	game_over = False
	input_img, errors = getImage()
	errors = False
	reward = 0
	while game_over==False:
		if np.random.rand() <= epsilon:
			action = np.random.randint(0, moves, size=1)[0]
		else:
			output = model.predict(input_img)
			action = np.argmax(output[0])
Exemplo n.º 12
0
else:
    input_shape = (img_width, img_height, 3)

#
training_data_gen = ImageDataGenerator(rescale=1. / 255)
training_data_img = training_data_gen.flow_from_directory(
    train_data_dir, batch_size=batch_size, target_size=(img_width, img_height))

validation_data_gen = ImageDataGenerator(rescale=1. / 255)
validation_data_img = validation_data_gen.flow_from_directory(
    validation_data_dir,
    batch_size=batch_size,
    target_size=(img_width, img_height))

#
model = cnn.create_model(input_shape, len(training_data_img.class_indices))

model.compile(optimizer=keras.optimizers.adam(),
              loss=keras.losses.categorical_crossentropy,
              metrics=['accuracy'])

model.summary()

model.fit_generator(training_data_img,
                    steps_per_epoch=training_data_img.samples / batch_size,
                    validation_data=validation_data_img,
                    validation_steps=validation_data_img.samples / batch_size,
                    epochs=epochs,
                    verbose=1)

predictions = model.predict_generator(generator=validation_data_img,
Exemplo n.º 13
0
def train(model_type='parallel', label_set='full', drop_unk=False,
          word_vecs=None, setup_only=False):
    print "Loading data..."
    df = sentences_df(SENTENCES_CSV, labels=label_set, drop_unk=drop_unk)
    X, y, word2idx, l_enc = load_dataset(df, pad=True)
    print "X shape:", X.shape
    y_orig = y
    y_binary = to_categorical(y)
    labels = np.unique(y_orig)
    nb_labels = labels.shape[0]
    if drop_unk:
        label_set_str = label_set + ' (-unk)'
    else:
        label_set_str = label_set
    print "Number of labels: %i [%s]" % (nb_labels, label_set_str)
    if nb_labels > 2:
        y = y_binary
    maxlen = X.shape[1]
    vocab_size = len(word2idx) + 1 # 0 masking
    if pretrained_embeddings is True:
        word_vectors = load_bin_vec(word_vecs, word2idx)
        add_unknown_words(word_vectors, word2idx)
        embedding_weights = np.zeros((vocab_size+1, emb_dim))
        for word, index in word2idx.items():
            embedding_weights[index,:] = word_vectors[word]
    else:
        embedding_weights = None
    print "Data loaded."

    if setup_only:
        cnn = create_model(vocab_size, nb_labels, emb_dim, maxlen,
                           embedding_weights, filter_hs, nb_filters,
                           dropout_p, trainable_embeddings,
                           pretrained_embeddings, model_type=model_type)
        return {'X': X,
                'y': y,
                'word2idx': word2idx,
                'l_enc': l_enc,
                'y_binary': y_binary,
                'labels': labels,
                'nb_labels': nb_labels,
                'maxlen': maxlen,
                'emb_dim': emb_dim,
                'vocab_size': vocab_size,
                'embedding_weights': embedding_weights,
                'cnn': cnn}

    params = [('filter_hs',filter_hs), ('nb_filters',nb_filters),
              ('dropout_p',dropout_p),
              ('trainable_embeddings',trainable_embeddings),
              ('pretrained_embeddings',pretrained_embeddings),
              ('batch_size',batch_size), ('nb_epoch',nb_epoch),
              ('lr',lr), ('beta_1',beta_1), ('beta_2',beta_2),
              ('epsilon',epsilon)]
    print "\nModel type: %s" % model_type
    for (name, value) in params:
        print name + ':', value

    skf = StratifiedKFold(y_orig, n_folds=10, shuffle=True, random_state=0)
    cv_scores = []
    for i, (train, test) in enumerate(skf):
        start_time = time.time()
        cnn = None
        cnn = create_model(vocab_size,
                           nb_labels,
                           emb_dim,
                           maxlen,
                           embedding_weights,
                           filter_hs,
                           nb_filters,
                           dropout_p,
                           trainable_embeddings,
                           pretrained_embeddings,
                           model_type=model_type)
        if i == 0:
            print_summary(cnn.model.layers)

        acc = train_and_test_model(cnn, X[train], y[train], X[test], y[test],
                                   batch_size, nb_epoch,
                                   lr, beta_1, beta_2, epsilon)
        cv_scores.append(acc)
        train_time = time.time() - start_time
        print('\nLabel frequencies in y[test]')
        print_label_frequencies((y_orig[test], l_enc))
        y_pred = cnn.model.predict(X[test])
        y_pred = probas_to_classes(y_pred)
        c = Counter(y_pred)
        total = float(len(y_pred))
        print('\nLabel frequencies in predict(y[test])')
        for label, count in c.most_common():
            print l_enc.inverse_transform(label), count, count / total
        print "fold %i/10 - time: %.2f s - acc: %.4f on %i samples" % \
            (i+1, train_time, acc, len(test))
    print "Avg cv accuracy: %.4f" % np.mean(cv_scores)
Exemplo n.º 14
0
    text_train = get_cleaned_text('../data/test_text')
    text_test = get_cleaned_text('../data/training_text')
    w2v_model = Word2Vec(text_train + text_test,
                         size=vector_size,
                         window=5,
                         min_count=3,
                         workers=4,
                         iter=5)
    return w2v_model


if __name__ == '__main__':
    train_variants = pd.read_csv('../data/training_variants')
    test_variants = pd.read_csv('../data/test_variants')

    n_text = len(train_variants)
    #w2v_model  = create_w2v()
    #w2v_model = Word2Vec.load('w2v_tt200')
    w2v_model = Word2Vec.load('w2v_tt50')

    fvec = get_feature_vector2('../data/training_text', train_variants,
                               n_text + 1)

    wvec_size = w2v_model.layer1_size

    #nn_model = cnn.test(fvec2)
    nn_model = cnn.create_model(fvec[:, :, :], train_variants, epochs=10)

    fvec_test = get_feature_vector2('../data/test_text', test_variants)
    cnn.predict_cnn(nn_model, fvec_test)
Exemplo n.º 15
0
import pickle as pkl
import config
import cnn

data = []
for i, pickle_file in enumerate(
    ["X_train", "Y_train", "X_validation", "Y_validation", "X_test",
     "Y_test"]):
    with open("data/" + pickle_file + ".pkl", "rb") as pklf:
        data.append(pkl.load(pklf))

X_train, Y_train, X_validation, Y_validation, X_test, Y_test = data
print(X_train.shape)
print("Creating model...")
model = cnn.create_model(config.n_classes, config.image_size,
                         config.learning_rate)
model.summary()
print("Training the model...")
model.fit(
    X_train,
    Y_train,
    epochs=config.epochs,
    batch_size=config.batch_size,
    shuffle=True,
    validation_data=(X_validation, Y_validation),
)
print("Model trained!")
print("Saving the weights...")
model.save_weights('audio_event_classifier.h5')
print("Weights saved!")
Exemplo n.º 16
0
def main():
    config_path = sys.argv[1]

    with open(config_path) as f:
        config = json.load(f)


#Replace next line with initializing the dataset object for both training and validation. Create two HWDataset class objects ...
#Beginning edits of this code.
    train_dataset, test_dataset = datasets.get_training_and_validation_datasets(
        config['image_root_directory'])
    train_dataset = HwDataset(
        config['image_root_directory']
    )  #This doesn't work yet as is--I think it needs to be passed something else
    test_dataset = HwDataset(config)  #

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=8,
                                  shuffle=True,
                                  num_workers=0,
                                  collate_fn=hw_dataset.collate)

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=8,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=hw_dataset.collate)

    hw = model.create_model(config)

    if torch.cuda.is_available():
        hw.cuda()
        dtype = torch.cuda.FloatTensor
        print("Using GPU")
    else:
        dtype = torch.FloatTensor
        print("No GPU detected")

    optimizer = torch.optim.Adam(hw.parameters(),
                                 lr=config['network']['learning_rate'])
    criterion = torch.nn.CrossEntropyLoss()
    lowest_loss = 0.0
    for epoch in range(1000):
        sum_loss = 0.0
        steps = 0.0
        hw.train()
        for i, x in enumerate(train_dataloader):
            line_imgs = Variable(x['line_imgs'].type(dtype),
                                 requires_grad=False)
            gts = Variable(torch.from_numpy(np.array(x['gt'])))

            preds = hw(line_imgs).cpu()

            loss = criterion(preds, gts)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            prs = preds.data.numpy()
            prs = np.argmax(prs, axis=1)
            out = [
                1 for num in range(len(prs))
                if prs[num] == gts.data.numpy()[num]
            ]
            sum_loss += sum(out)

        print("Training accuracy: %.2f" % (sum_loss * 100 /
                                           (len(train_dataloader) * 8)) + "%")
        sum_loss = 0.0
        steps = 0.0
        hw.eval()
        for x in test_dataloader:
            line_imgs = Variable(x['line_imgs'].type(dtype),
                                 requires_grad=False,
                                 volatile=True)
            gts = Variable(torch.from_numpy(np.array(x['gt'])))

            preds = hw(line_imgs).cpu()

            #output_batch = preds.permute(1,0,2)
            out = preds.data.cpu().numpy()

            for i, gt_line in enumerate(x['gt']):
                logits = out[i, ...]
                max = np.argmax(logits)
                sum_loss += 1 if max == gt_line else 0
                steps += 1

        print("Test accuracy: %.2f" % (sum_loss * 100 / steps) + '%')

        if lowest_loss < sum_loss / steps:
            lowest_loss = sum_loss / steps
            print("Saving Best")
            dirname = os.path.dirname(config['model_save_path'])
            if len(dirname) > 0 and not os.path.exists(dirname):
                os.makedirs(dirname)

            torch.save(hw.state_dict(),
                       os.path.join(config['model_save_path']))
Exemplo n.º 17
0
def train(model_type='parallel',
          label_set='full',
          drop_unk=False,
          word_vecs=None,
          setup_only=False):
    print "Loading data..."
    df = sentences_df(SENTENCES_CSV, labels=label_set, drop_unk=drop_unk)
    X, y, word2idx, l_enc = load_dataset(df, pad=True)
    print "X shape:", X.shape
    y_orig = y
    y_binary = to_categorical(y)
    labels = np.unique(y_orig)
    nb_labels = labels.shape[0]
    if drop_unk:
        label_set_str = label_set + ' (-unk)'
    else:
        label_set_str = label_set
    print "Number of labels: %i [%s]" % (nb_labels, label_set_str)
    if nb_labels > 2:
        y = y_binary
    maxlen = X.shape[1]
    vocab_size = len(word2idx) + 1  # 0 masking
    if pretrained_embeddings is True:
        word_vectors = load_bin_vec(word_vecs, word2idx)
        add_unknown_words(word_vectors, word2idx)
        embedding_weights = np.zeros((vocab_size + 1, emb_dim))
        for word, index in word2idx.items():
            embedding_weights[index, :] = word_vectors[word]
    else:
        embedding_weights = None
    print "Data loaded."

    if setup_only:
        cnn = create_model(vocab_size,
                           nb_labels,
                           emb_dim,
                           maxlen,
                           embedding_weights,
                           filter_hs,
                           nb_filters,
                           dropout_p,
                           trainable_embeddings,
                           pretrained_embeddings,
                           model_type=model_type)
        return {
            'X': X,
            'y': y,
            'word2idx': word2idx,
            'l_enc': l_enc,
            'y_binary': y_binary,
            'labels': labels,
            'nb_labels': nb_labels,
            'maxlen': maxlen,
            'emb_dim': emb_dim,
            'vocab_size': vocab_size,
            'embedding_weights': embedding_weights,
            'cnn': cnn
        }

    params = [('filter_hs', filter_hs), ('nb_filters', nb_filters),
              ('dropout_p', dropout_p),
              ('trainable_embeddings', trainable_embeddings),
              ('pretrained_embeddings', pretrained_embeddings),
              ('batch_size', batch_size), ('nb_epoch', nb_epoch), ('lr', lr),
              ('beta_1', beta_1), ('beta_2', beta_2), ('epsilon', epsilon)]
    print "\nModel type: %s" % model_type
    for (name, value) in params:
        print name + ':', value

    skf = StratifiedKFold(y_orig, n_folds=10, shuffle=True, random_state=0)
    cv_scores = []
    for i, (train, test) in enumerate(skf):
        start_time = time.time()
        cnn = None
        cnn = create_model(vocab_size,
                           nb_labels,
                           emb_dim,
                           maxlen,
                           embedding_weights,
                           filter_hs,
                           nb_filters,
                           dropout_p,
                           trainable_embeddings,
                           pretrained_embeddings,
                           model_type=model_type)
        if i == 0:
            print_summary(cnn.model.layers)

        acc = train_and_test_model(cnn, X[train], y[train], X[test], y[test],
                                   batch_size, nb_epoch, lr, beta_1, beta_2,
                                   epsilon)
        cv_scores.append(acc)
        train_time = time.time() - start_time
        print('\nLabel frequencies in y[test]')
        print_label_frequencies((y_orig[test], l_enc))
        y_pred = cnn.model.predict(X[test])
        y_pred = probas_to_classes(y_pred)
        c = Counter(y_pred)
        total = float(len(y_pred))
        print('\nLabel frequencies in predict(y[test])')
        for label, count in c.most_common():
            print l_enc.inverse_transform(label), count, count / total
        print "fold %i/10 - time: %.2f s - acc: %.4f on %i samples" % \
            (i+1, train_time, acc, len(test))
    print "Avg cv accuracy: %.4f" % np.mean(cv_scores)
Exemplo n.º 18
0
frame_probs.grid(row=0, column=2)

labels_prob = []
for i in range(10):
    labels_prob.append(
        tk.Label(frame_probs,
                 text="{}: 0.00%".format(i),
                 anchor=tk.W,
                 justify=tk.LEFT))
    labels_prob[-1].grid(row=i, sticky="W")

root.mainloop()
exit(0)

cnn.load_data()
(x_train, y_train), (x_test, y_test) = cnn.get_data()

model = cnn.create_model()
model.summary()
model.fit(x_train, y_train, batch_size=100, validation_data=(x_test, y_test))
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test accuracy:', test_acc)

model.save_weights("models/cnn")

model = cnn.create_model()
model.load_weights("models/cnn")
test_loss, test_acc = model.evaluate(x_test, y_test)

print('Test accuracy:', test_acc)
def main(debug):
    n_times = 10
    weights_dir = '_weights'
    models_dir = '_models'
    plots_dir = '_plots'
    results_filename = 'results.pkl'

    x_train, y_train, x_test, y_test, num_classes = prepare_data(debug)
    batches = [32, 128, 512, 20000, len(x_train)]
    WEIGHT_UPDATES = int(len(x_train) / 32 * 100)

    os.makedirs(models_dir, exist_ok=True)
    os.makedirs(plots_dir, exist_ok=True)

    model = cnn.create_model(x_train.shape, num_classes)
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(),
                  metrics=['accuracy'])
    model.summary()

    results = {}
    # recover results
    if os.path.isfile(results_filename):
        results = pickle.load(open(results_filename, 'rb'))

    for weights_filename in sorted(glob.glob(weights_dir + '/*')):
        w_num = int(weights_filename[-3:])
        if w_num not in results:
            results[w_num] = {}

        for batch_size in batches:
            if batch_size not in results[w_num]:
                results[w_num][batch_size] = {}

            for exp_num in range(n_times):
                name = '{}_{}_{}'.format(weights_filename[-3:],
                                         batch_size, exp_num)

                if exp_num not in results[w_num][batch_size]:
                    print('Start {}'.format(name))
                else:
                    print('Skip: {} - already calculated'.format(name))
                    continue

                model.load_weights(weights_filename)
                updates_per_epochs = len(x_train) // batch_size
                epochs = WEIGHT_UPDATES // updates_per_epochs

                history = model.fit(x_train, y_train,
                                    batch_size=batch_size,
                                    epochs=epochs,
                                    validation_data=(x_test, y_test),
                                    shuffle=True)

                y_scores = model.predict_proba(x_test)

                results[w_num][batch_size][exp_num] = save_results(
                    model, name, history, y_test, y_scores,
                    models_dir, plots_dir,
                    debug)

                pickle.dump(results, open(results_filename, 'wb'))
Exemplo n.º 20
0
from cnn import create_model
import tensorflow as tf
import numpy as np

W, D = 19, 500


def create_fake_images(value=0, N=10, w=W):
    return tf.constant(value, shape=(N, w, w, w, 1), dtype=tf.dtypes.float32)


def create_fake_labels(r=1, d=D, N=10):
    prob = np.zeros([N, d], dtype="float32")
    prob[:, 0] = 1
    return np.hstack(
        [np.full(fill_value=r, shape=(N, 1), dtype="float32"), prob])


if __name__ == "__main__":
    model = create_model(W, D)
    preds = model.predict(create_fake_images())
    # print(preds[0].shape, preds[1].shape)
    model.evaluate(create_fake_images(N=5), create_fake_labels(N=5), verbose=1)
    # print(len(create_fake_labels()))