Example #1
0
def evaluate(epoch_id, model, train, test, batch_size=4, min_res_size=16):
	gen = batch_generator(batch_size, min_res_size, train, test, False)
	batch_n = next(gen)

	evalMetrics = EvaluationMetrics(batch_n)

	model.zero_grad()

	#s = perf_counter()
	for n in range(batch_n):
		X, y, _, bboxes = next(gen)

		with torch.no_grad():
			prediction = model(X)
		
		# evaluate this batch
		#print(y, prediction)
		evalMetrics.evaluateBatch(y, prediction)

		del X, y, prediction, bboxes

		gc.collect()
		torch.cuda.empty_cache()
		loading(n+1, batch_n)


	#print("total time = ", perf_counter() - s)
	evalMetrics.getEvaluation(epoch_id)

	return evalMetrics.pixel_acc, evalMetrics.iou, evalMetrics.dice_coeff
Example #2
0
def test(val_model, session):
    its = 0
    val_summaries = []
    val_loss = 0
    true_labels = []
    predicted_labels = []
    for X_gene_seqs, X_regulator_expression, Y_gene_expression in dataset.batch_generator(
            dataset.getTest(), val_model.batch_size):

        feed_dict = {
            val_model.X_gene_seqs: X_gene_seqs,
            val_model.X_regulator_expression: X_regulator_expression / 10,
            val_model.Y_gene_expression: discretize(Y_gene_expression)
        }
        vl, val_summary, pred_lab = session.run([
            val_model.expression_loss, val_model.summary,
            val_model.predicted_labels
        ],
                                                feed_dict=feed_dict)
        val_loss += vl

        val_summaries.append(val_summary)
        disc = discretize(Y_gene_expression)
        for k in range(len(pred_lab)):
            predicted_labels.append(pred_lab[k])
            true_labels.append(disc[k])
        its += 1

    return val_loss / its, val_summaries, confusion_matrix(
        np.array(true_labels), np.array(predicted_labels))
Example #3
0
def train(name, labels, unlabels, aug, resume):

    numpy.set_printoptions(
        precision=2,
        linewidth=140)

    # paths
    log_path = "logs/{}.json".format(name)
    out_path = "snapshots/" + name + ".{epoch:06d}.h5"
    echo('log path', log_path)
    echo('out path', out_path)

    lib.log.info(log_path, {'_commandline': {
        'name': name, 'labels': labels, 'unlabels': unlabels, 'aug': aug, 'resume': resume}})

    # init
    echo('train', (name, resume))
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    # dataset
    echo('dataset loading...')
    batch_size = 100
    gen_train, gen_test = dataset.batch_generator(labels, unlabels, batch_size=batch_size, aug=aug)

    # model building
    echo('model building...')
    model = lib.model.build()
    model.summary()
    if resume:
        echo('Resume Learning from {}'.format(resume))
        model.load_weights(resume, by_name=True)

    # x_tensor = model.layers[2].get_input_at(0)
    # z_tensor = model.layers[2].get_output_at(0)
    # embedding = K.function([x_tensor], [z_tensor])
    # z_tensor = model.layers[5].get_input_at(0)
    # y_tensor = model.layers[5].get_output_at(0)
    # predict = K.function([z_tensor], [y_tensor])

    # training
    echo('start learning...')
    callbacks = [
        lib.log.JsonLog(log_path),
        # keras.callbacks.ModelCheckpoint(out_path, monitor='val_loss', save_weights_only=True)
    ]
    model.fit_generator(
        gen_train,
        epochs=50,
        steps_per_epoch=(59900 // batch_size),
        validation_data=gen_test,
        validation_steps=(1000 // batch_size),
        callbacks=callbacks)
Example #4
0
def train(name, resume, batch_size, epochs, verbose):

    # paths
    log_path = "logs/{}.json".format(name)
    out_path = "snapshots/" + name + ".{epoch:06d}.h5"
    echo('log path', log_path)
    echo('out path', out_path)

    # init
    echo('train', locals())
    logging.info(log_path, {'train': locals()})
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    # dataset
    echo('dataset loading...')
    seq_train, seq_valid = dataset.batch_generator(batch_size)

    # model building
    echo('model building...')
    model = Model.build()
    model.summary()
    if resume:
        echo('Resume Learning from {}'.format(resume))
        model.load_weights(resume, by_name=True)

    # training
    echo('start learning...')
    callbacks = [
        logging.JsonLog(log_path),
        keras.callbacks.ModelCheckpoint(
            out_path,
            monitor='val_loss',
            save_weights_only=True,
            save_best_only=True,
        )
    ]
    model.fit_generator(
        seq_train,
        validation_data=seq_valid,
        shuffle=True,
        epochs=epochs,
        verbose=verbose,
        callbacks=callbacks,
        workers=1,
        use_multiprocessing=True,
    )
Example #5
0
def test(model, session):
    its = 0
    test_summaries = []
    test_loss = 0
    for X_gene_seqs, X_regulator_expression, Y_gene_expression in dataset.batch_generator(
            dataset.getTest(), model.batch_size):
        feed_dict = {
            model.X_gene_seqs: X_gene_seqs,
            model.X_regulator_expression: X_regulator_expression,
            model.Y_gene_expression: Y_gene_expression
        }
        tl, test_summary = session.run([model.expression_loss, model.summary],
                                       feed_dict=feed_dict)
        test_loss += tl
        test_summaries.append(test_summary)
        its += 1

    return test_loss / its, test_summaries
Example #6
0
def validate(val_model, session):
    its = 0
    val_summaries = []
    val_loss = 0
    for X_gene_seqs, X_regulator_expression, Y_gene_expression in dataset.batch_generator(
            dataset.getVal(), val_model.batch_size):
        feed_dict = {
            val_model.X_gene_seqs: X_gene_seqs,
            val_model.X_regulator_expression: X_regulator_expression,
            val_model.Y_gene_expression: Y_gene_expression
        }
        vl, val_summary = session.run(
            [val_model.expression_loss, val_model.summary],
            feed_dict=feed_dict)
        val_loss += vl
        val_summaries.append(val_summary)
        its += 1

    return val_loss / its, val_summaries
Example #7
0
def train_epoch(train_model,
                val_model,
                session,
                global_iters,
                epoch,
                val_every=4000):
    summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
    i = 0
    print("training")
    for X_gene_seqs, X_regulator_expression, Y_gene_expression in dataset.batch_generator(
            dataset.getTrain(), train_model.batch_size):
        feed_dict = {
            train_model.X_gene_seqs: X_gene_seqs,
            train_model.X_regulator_expression: X_regulator_expression,
            train_model.Y_gene_expression: Y_gene_expression
        }
        expression_loss, train_summary, _, seq, pred = session.run(
            [
                train_model.expression_loss, train_model.summary,
                train_model.train_op, train_model.seq_features,
                train_model.expression_prediction
            ],
            feed_dict=feed_dict)
        summary_writer.add_summary(train_summary, global_iters)

        if i % 50 == 49:
            print "expression loss: %.4f" % expression_loss
            print "approximately %.4f percent remaining in epoch %d" % (
                (100 * (1 - i * 32 / float(633906))), epoch)
            print pred
        if i % val_every == val_every - 1:
            val_loss, val_summaries = validate(val_model, session)
            for summary in val_summaries:
                summary_writer.add_summary(summary, global_iters)
            summary_writer.flush()
            print "validation loss was: %.4f" % val_loss
        i += 1

        global_iters += 1

    return global_iters
def model_train(model):
    callback = get_callbacks("predict_labels_validacc", model)
    # 自定义验证
    my_callback = ValidCallback()
    callbacks = [my_callback] + callback
    print('!!!!!', model.metrics_names)

    tfrecode_file = FLAGS.train_data
    batch_size = FLAGS.batch_size
    n_classes = FLAGS.class_number
    suffle_buffer = 5000
    train_gen = batch_generator(tfrecode_file, batch_size, suffle_buffer)
    steps_per_epoch = int((5822652 / batch_size) / 30)
    model.fit_generator(
        train_gen,
        #  steps_per_epoch=30,
        #  steps_per_epoch=steps_per_epoch,
        steps_per_epoch=FLAGS.steps_per_epoch,
        epochs=FLAGS.epochs,
        callbacks=callbacks,
        verbose=1)
Example #9
0
def test(snapshot, batch_size):

    # init
    echo('test', locals())
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(0)

    # model loading
    echo('model loading...')
    model = Model.build()
    model.load_weights(snapshot)

    # testing data
    echo('testing dataset loading...')
    seq_test = dataset.batch_generator(batch_size, test=True)

    # testing
    results = model.evaluate_generator(seq_test)
    for metrics, value in zip(model.metrics_names, results):
        print(f"{metrics}: {value}")
Example #10
0
def train_epoch(train_model, val_model, session, global_iters, val_every=4000):
    summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
    i = 0
    print("training")
    for X_gene_seqs, X_regulator_expression, Y_gene_expression in dataset.batch_generator(
            dataset.getTrain(), train_model.batch_size):

        feed_dict = {
            train_model.X_gene_seqs: X_gene_seqs,
            train_model.X_regulator_expression: X_regulator_expression / 10,
            train_model.Y_gene_expression: discretize(Y_gene_expression)
        }
        expression_loss, train_summary, _ = session.run([
            train_model.expression_loss, train_model.summary,
            train_model.train_op
        ],
                                                        feed_dict=feed_dict)
        summary_writer.add_summary(train_summary, global_iters)

        if i % 200 == 199:
            print discretize(Y_gene_expression)
            print "expression loss: %.4f" % expression_loss
            print "approximately %.4f percent remaining" % (
                100 * (1 - i * 32 / float(633906)))

        if i % val_every == val_every - 1:
            val_loss, val_summaries, val_confusion = validate(
                val_model, session)
            for summary in val_summaries:
                summary_writer.add_summary(summary, global_iters)
            summary_writer.flush()
            print "====VALIDATING===="
            print "validation loss was: %.4f" % val_loss
            print "confusion matrix", val_confusion
            print "====VALIDATING===="
        i += 1

        global_iters += 1

    return global_iters
Example #11
0
def test(name, snapshot):

    config = Config(name)

    if snapshot is None:
        snapshots = sorted(glob(f"snapshots/{name}.*.h5"), reverse=True)
        if len(snapshots) == 0:
            echo(f"[Error] No snapshots found for name={name}", fg='red')
            return
        snapshot = snapshots[0]
        del snapshots

    # running parameters
    run_params = locals()
    del run_params['config']
    run_params.update(dict(config))

    # init
    echo('test', run_params)
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(0)

    # model loading
    echo(f'model loading from {snapshot}...')
    model = build(config)
    model.load_weights(snapshot)

    # testing data
    echo('test dataset loading...')
    seq_test = dataset.batch_generator(config('batch_size'), test=True)

    # testing
    echo('testing...')
    results = model.evaluate_generator(seq_test)
    for metrics, value in zip(model.metrics_names, results):
        print(f"{metrics}: {value}")
Example #12
0
def train(name, critics, clip, batch_size, epochs, out, verbose):

    # paths
    log_path = "logs/{}.json".format(name)
    out_path = "snapshots/" + name + ".{epoch:06d}.h5"
    echo('log path', log_path)
    echo('out path', out_path)

    # init
    echo('train', locals())
    logging.info(log_path, {'train': locals()})
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    # dataset
    echo('dataset loading...')
    data = dataset.batch_generator(batch_size)

    # model building
    echo('model building...')
    models = Model.build()
    echo('- Generator:')
    models[0].summary()
    echo('- Critic:')
    models[1].summary()

    # training
    echo('start learning...')
    Model.train(models,
                data,
                epochs,
                batch_size,
                critics,
                clip,
                out=out,
                log=lambda data: logging.info(log_path, data))
Example #13
0
def train(name, resume, verbose):

    config = Config(name)

    # paths
    log_path = f"logs/{name}.json"
    out_path = f"snapshots/{name}.{{epoch:06d}}.h5"
    echo('log path', log_path)
    echo('out path', out_path)

    # running parameters
    run_params = locals()
    del run_params['config']
    run_params.update(dict(config))

    # init
    echo('train', run_params)
    log = logging.Logger(log_path)
    log({'train': run_params})
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    if len(list(glob(f"snapshots/{name}.*.h5"))) > 0:
        echo(
            f"Error: Some snapshots for name ({name}) already exists. Use another name",
            fg='red')
        return

    # dataset
    echo('dataset loading...')
    seq_train, seq_valid = dataset.batch_generator(config('batch_size'))

    # model building
    echo('model building...')
    model = build(config)
    model.summary()
    if resume:
        echo('Resume Learning from {}'.format(resume))
        model.load_weights(resume, by_name=True)

    # training
    echo('start learning...')
    callbacks = [
        logging.JsonLog(log_path),
        keras.callbacks.ModelCheckpoint(
            out_path,
            monitor='val_loss',
            save_weights_only=True,
            save_best_only=True,
        ),
    ]
    model.fit_generator(
        seq_train,
        validation_data=seq_valid,
        shuffle=True,
        epochs=config('epochs'),
        verbose=verbose,
        callbacks=callbacks,
        workers=1,
        use_multiprocessing=True,
    )
Example #14
0
    log = logging.Logger(log_path)
    log({'train': run_params})
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    if len(list(glob(f"snapshots/{name}.*.h5"))) > 0:
        echo(f"Error: Some snapshots for name ({name}) already exists. Use another name", fg='red')
        return

    # dataset
    echo('dataset loading...')
{%- if cookiecutter.fit_generator == "no" %}
    X, y = dataset.load()
{%- else %}
    seq_train, seq_valid = dataset.batch_generator(config('batch_size'))
{%- endif %}

    # model building
    echo('model building...')
    model = build(config)
    model.summary()
    if resume:
        echo('Resume Learning from {}'.format(resume))
        model.load_weights(resume, by_name=True)

    # training
    echo('start learning...')
    callbacks = [
        logging.JsonLog(log_path),
        keras.callbacks.ModelCheckpoint(out_path,
Example #15
0
	dev = "cpu"  
device = torch.device(dev)

m = IOGnet()
m.to(device)


min_res = 16
optimizer = torch.optim.Adam(m.parameters(), lr=lr)
loss_fce = F.binary_cross_entropy

epoch_losses = []
mean_epoch_losses = [] 
accuracies = []

g_train = batch_generator(batch_size, min_res, args.train, args.test ,False, False)
batch_n = next(g_train)

for n in range(21):
	epoch_losses = []
	# First train model on dataset
	for i in range(batch_n):
		X, y, _ = next(g_train)

		# Predict result
		y_pred = m(X)

		loss = loss_fce(y_pred, y)
		loss.backward()
		optimizer.step()
Example #16
0
def train(name, resume, batch_size, epochs, verbose):

    # paths
    log_path = None if name is None else f"logs/{name}.json"
    out_path = None if name is None else f"snapshots/{name}.{{epoch:06d}}.h5"
    echo('log path', log_path)
    echo('out path', out_path)
    result_dir = "result/{}".format(name)
    echo('result images', result_dir)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    # running parameters
    run_params = locals()

    # init
    echo('train', run_params)
    log = logging.Logger(log_path)
    log({'train': run_params})
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    if name is None:
        echo("Warning: name is None. Models will never be saved.", fg='red')

    # dataset
    echo('dataset loading...')
    seq_train, seq_valid = dataset.batch_generator(batch_size)

    # model building
    echo('model building...')
    enc, dec, auto, gen, dis, gan_fake, gan_real, clf = Model.build()

    echo('Encoder', fg='yellow')
    enc.summary()
    echo('Decoder', fg='yellow')
    dec.summary()
    echo('Generator', fg='yellow')
    gen.summary()
    echo('Discriminator', fg='yellow')
    dis.summary()
    echo('GANs', fg='yellow')
    gan_fake.summary()
    gan_real.summary()
    echo('Classifier', fg='yellow')
    clf.summary()

    # training
    echo('start learning...')

    eps = 0.001
    TRUE = numpy.ones((batch_size, )) - eps
    FALSE = numpy.zeros((batch_size, )) + eps

    def make_noise():
        """() -> U"""
        x = numpy.random.randn(batch_size, Model.U_DIM)
        return x

    def interpolating():
        U = make_noise()
        Z = gen.predict_on_batch(U)
        u = Z[0]
        v = Z[1] + 0.001
        X = numpy.array(
            [a * u + (1 - a) * v for a in numpy.linspace(0, 3, batch_size)])
        return X

    for epoch in range(epochs):

        INTERVAL = 20
        auto_loss = 0
        dis_fake_loss = 0
        dis_real_loss = 0
        gen_loss = 0
        enc_loss = 0
        clf_loss = 0
        clf_acc = 0
        last_metrics = ()

        # Training
        for i, (X, y) in enumerate(seq_train):

            xsize = len(X)

            # Training autoencodfer
            auto.trainable = True
            enc.trainable = True
            dec.trainable = True
            auto_loss += auto.train_on_batch(X, X)

            # Training GAN (dis for real)
            clf.trainable = True
            dis.trainable = True
            enc.trainable = False
            gen.trainable = False
            z_real = enc.predict_on_batch(X)
            dis_real_loss += dis.train_on_batch(z_real, TRUE[:xsize])

            # Training GAN (dis for fake)
            clf.trainable = True
            dis.trainable = True
            enc.trainable = False
            gen.trainable = False
            u = make_noise()
            z_fake = gen.predict_on_batch(u)
            dis_fake_loss += dis.train_on_batch(z_fake, FALSE)

            # Training GAN (gen)
            clf.trainable = False
            dis.trainable = False
            gen.trainable = True
            u = make_noise()
            gen_loss += gan_fake.train_on_batch(u, TRUE)

            # Training GAN (enc)
            dis.trainable = False
            clf.trainable = False
            enc.trainable = True
            enc_loss += gan_real.train_on_batch(X, TRUE[:xsize])

            # Training Classifier (clf for real data)
            clf.trainable = True
            dis.trainable = True
            enc.trainable = False
            gen.trainable = False
            z_real = enc.predict_on_batch(X)
            _loss, _acc = clf.train_on_batch(z_real, y)
            clf_loss += _loss
            clf_acc += _acc

            if i % INTERVAL == INTERVAL - 1:
                if i > INTERVAL:
                    click.echo('\r', nl=False)
                auto_loss /= INTERVAL
                dis_fake_loss /= INTERVAL
                dis_real_loss /= INTERVAL
                gen_loss /= INTERVAL
                enc_loss /= INTERVAL
                clf_loss /= INTERVAL
                clf_acc /= INTERVAL
                click.echo(
                    f"Epoch:{epoch:4d} "
                    f"Train: "
                    f"auto={auto_loss:.4f} "
                    f"dis_fake={dis_fake_loss:.4f} "
                    f"dis_real={dis_real_loss:.4f} "
                    f"gen={gen_loss:.4f} "
                    f"enc={enc_loss:.4f} "
                    f"clf={clf_loss:.4f},{clf_acc:.4f} ",
                    nl=False)
                last_metrics = (auto_loss, dis_fake_loss, dis_real_loss,
                                gen_loss, enc_loss, clf_loss, clf_acc)
                auto_loss = 0
                dis_fake_loss = 0
                dis_real_loss = 0
                gen_loss = 0
                enc_loss = 0
                clf_loss = 0
                clf_acc = 0
        click.echo('')

        # Logging
        auto_loss, dis_fake_loss, dis_real_loss, \
            gen_loss, enc_loss, clf_loss, clf_acc = last_metrics
        log({
            'epoch': epoch,
            'loss': {
                'auto': auto_loss,
                'dis_fake_loss': dis_fake_loss,
                'dis_real_loss': dis_real_loss,
                'gen_loss': gen_loss,
                'enc_loss': enc_loss,
                'clf_loss': clf_loss,
                'clf_acc': clf_acc,
            }
        })

        # Testing
        X, _ = seq_valid[0]
        X_rec = auto.predict_on_batch(X)
        testing.imgsave(X_rec, f"{result_dir}/rec.{epoch:03d}.png")
        u = make_noise()
        z = gen.predict_on_batch(u)
        X_smp = dec.predict_on_batch(z)
        testing.imgsave(X_smp, f"{result_dir}/smp.{epoch:03d}.png")
        z = interpolating()
        X_int = dec.predict_on_batch(z)
        testing.imgsave(X_int, f"{result_dir}/int.{epoch:03d}.png")
Example #17
0
        x1 = decoder_block(x2, x1, self.decoder_block3, self.relu)
        x0 = decoder_block(x1, x0, self.decoder_block4, self.relu)

        #
        # START OF FINE NET
        #
        x4 = finenet_block(x4, self.fine_block1)
        x3 = finenet_block(x3, self.fine_block2)
        x2 = finenet_block(x2, self.fine_block3)
        x1 = finenet_block(x1, self.fine_block4)
        x0 = finenet_block(x0, self.fine_block5)

        x0 = self.final_conv1(torch.cat([x4, x3, x2, x1, x0], 1))

        x0 = self.relu(self.final_bn1(x0))
        x0 = self.relu(self.final_conv2(x0))
        x0 = self.sigmoid(self.final_conv3(x0))
        return x0


from dataset import batch_generator
if __name__ == "__main__":
    m = IOGnet()
    g = batch_generator(1, 16, False, False)
    print(next(g))

    X, _, _ = next(g)
    print(X.shape)
    y_pred = m(X)
    print(y_pred.shape)
Example #18
0
training_model = Model(inputs=input_tensor, outputs=feature_tensors)
training_model.summary()


def save_int_image(epoch_idx, logs):
    if epoch_idx % 100 == 99:
        flattened_image_data = image_layer.get_weights()[0]
        # flattened_image_data = K.eval(flattened_image_tensor)
        image_data = np.reshape(flattened_image_data, (768, 1024, 3))
        save_image(f'./images/result{epoch_idx:04}.jpeg', image_data)


save_model = ModelCheckpoint('weights.{epoch:03d}-{loss:.2e}.hdf5',
                             monitor='loss')

optimizer = Adam(lr=0.001)  #changed to 10 from 0.001
training_model.compile(loss='mean_squared_error',
                       optimizer=optimizer,
                       loss_weights=[2.5, *([1] * 5)])

training_model.fit_generator(
    #one example, one feature, the value is 1 since 1s function
    batch_generator(featurization_model, style_values),
    #batch gen gives x and y values
    steps_per_epoch=(num_batches // 32),
    epochs=3000 * 32,
    # callbacks=[LambdaCallback(on_epoch_end=save_int_image)]
    callbacks=[save_model])

# ls | head | parallel