Exemplo n.º 1
0
def probablity_result():
        tra_Name,tra_Label,val_Name,val_Label = readcsv(args.tra_path,args.val_path)
        test(val_Name,args.dataset_path)
        print(val_Label.shape)
        model = load_model('/Volumes/MyPassport/duke/test_chk1_model/ede/cross1/weights.20-0.22-0.94-Dukelungedema_.hdf5')
        result = model.predict_generator(get_train_batch(val_Name, val_Label, 8, True, 224, 224, 1, False, args.dataset_path,3),steps=math.ceil(val_Label.shape[0]/8.0),verbose=1)
        print(result.shape)
        AUC = roc_auc_score(val_Label,result)
        print('************')
        print(AUC)
        print(result)
        np.save("test_edema.hdf5",result)
Exemplo n.º 2
0
def test():
    logging.debug('Running system tests.')
    try:
        data.test(config.NEO4J_SERVER)
        nlp.test()
        scraping.test(config.SEC_SERVER)
        logging.debug('Tests cleared.')
        print('\n\n\nTests returned positive. All systems ready to go.\n\n\n')
    except:
        logging.error('System tests returned negative.')
        logging.debug(type(sys.exc_type))
        logging.debug(str(sys.exc_type))
        logging.error('%s:%s' % (sys.exc_type, sys.exc_value))
        sys.exit('Tests returned negative. Check logs for more details.')
Exemplo n.º 3
0
def main():
    paddle.init(use_gpu=True, trainer_count=1)

    img = paddle.layer.data(name='img',
                            type=paddle.data_type.dense_vector(784))
    label = paddle.layer.data(name='label',
                              type=paddle.data_type.integer_value(10))

    predict = letnet_5(img)

    cost = paddle.layer.classification_cost(input=predict, label=label)
    if os.path.exists(PARAMETERS_TAR):
        with open(PARAMETERS_TAR, 'r') as f:
            parameters = paddle.parameters.Parameters.from_tar(f)
    else:
        parameters = paddle.parameters.create(cost)
    optimizer = paddle.optimizer.Momentum(
        learning_rate=0.1 / 128,
        momentum=0.9,
        regularization=paddle.optimizer.L2Regularization(rate=0.0001))

    trainer = paddle.trainer.SGD(cost=cost,
                                 parameters=parameters,
                                 update_equation=optimizer)

    def event_handler(evt):
        if isinstance(evt, paddle.event.EndPass):
            print 'Pass id: %d, %s' % (evt.pass_id, evt.metrics)

    trainer.train(reader=paddle.batch(paddle.reader.shuffle(dataset.train(),
                                                            buf_size=8192),
                                      batch_size=64),
                  event_handler=event_handler,
                  num_passes=30)

    with open(PARAMETERS_TAR, 'w') as f:
        trainer.save_parameter_to_tar(f)

    test_reader = dataset.test()()
    test_list = []
    for img in test_reader:
        test_list.append(img)

    labels = paddle.infer(output_layer=predict,
                          parameters=parameters,
                          input=test_list)

    labels = np.argmax(labels, axis=1)
    dataset.save_result2csv('./result.csv', labels)
    print 'training end'
Exemplo n.º 4
0
def beam_search(FLAGS):
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device("gpu" if FLAGS.use_gpu else "cpu")

    # yapf: disable
    inputs = [
        Input([None, 1, 48, 384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in")
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask")
    ]
    # yapf: enable

    model = paddle.Model(Seq2SeqAttInferModel(encoder_size=FLAGS.encoder_size,
                                              decoder_size=FLAGS.decoder_size,
                                              emb_dim=FLAGS.embedding_dim,
                                              num_classes=FLAGS.num_classes,
                                              beam_size=FLAGS.beam_size),
                         inputs=inputs,
                         labels=labels)

    model.prepare(metrics=SeqBeamAccuracy())
    model.load(FLAGS.init_model)

    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(),
         data.PadTarget()])
    test_sampler = data.BatchSampler(test_dataset,
                                     batch_size=FLAGS.batch_size,
                                     drop_last=False,
                                     shuffle=False)
    test_loader = paddle.io.DataLoader(test_dataset,
                                       batch_sampler=test_sampler,
                                       places=device,
                                       num_workers=0,
                                       return_list=True,
                                       collate_fn=test_collate_fn)

    model.evaluate(eval_data=test_loader,
                   callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
Exemplo n.º 5
0
Arquivo: eval.py Projeto: wzzju/hapi
def main(FLAGS):
    device = set_device("gpu" if FLAGS.use_gpu else "cpu")
    fluid.enable_dygraph(device) if FLAGS.dynamic else None
    model = Seq2SeqAttModel(encoder_size=FLAGS.encoder_size,
                            decoder_size=FLAGS.decoder_size,
                            emb_dim=FLAGS.embedding_dim,
                            num_classes=FLAGS.num_classes)

    # yapf: disable
    inputs = [
        Input([None, 1, 48, 384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in")
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask")
    ]
    # yapf: enable

    model.prepare(loss_function=WeightCrossEntropy(),
                  metrics=SeqAccuracy(),
                  inputs=inputs,
                  labels=labels,
                  device=device)
    model.load(FLAGS.init_model)

    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(),
         data.PadTarget()])
    test_sampler = data.BatchSampler(test_dataset,
                                     batch_size=FLAGS.batch_size,
                                     drop_last=False,
                                     shuffle=False)
    test_loader = fluid.io.DataLoader(test_dataset,
                                      batch_sampler=test_sampler,
                                      places=device,
                                      num_workers=0,
                                      return_list=True,
                                      collate_fn=test_collate_fn)

    model.evaluate(eval_data=test_loader,
                   callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
Exemplo n.º 6
0
def main(FLAGS):
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device("gpu" if FLAGS.use_gpu else "cpu")

    # yapf: disable
    inputs = [
        Input([None,1,48,384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in"),
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask"),
    ]
    # yapf: enable

    model = paddle.Model(
        Seq2SeqAttModel(
            encoder_size=FLAGS.encoder_size,
            decoder_size=FLAGS.decoder_size,
            emb_dim=FLAGS.embedding_dim,
            num_classes=FLAGS.num_classes),
        inputs,
        labels)

    lr = FLAGS.lr
    if FLAGS.lr_decay_strategy == "piecewise_decay":
        learning_rate = fluid.layers.piecewise_decay(
            [200000, 250000], [lr, lr * 0.1, lr * 0.01])
    else:
        learning_rate = lr
    grad_clip = fluid.clip.GradientClipByGlobalNorm(FLAGS.gradient_clip)
    optimizer = fluid.optimizer.Adam(
        learning_rate=learning_rate,
        parameter_list=model.parameters(),
        grad_clip=grad_clip)

    model.prepare(optimizer, WeightCrossEntropy(), SeqAccuracy())

    train_dataset = data.train()
    train_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(), data.PadTarget()])
    train_sampler = data.BatchSampler(
        train_dataset, batch_size=FLAGS.batch_size, shuffle=True)
    train_loader = paddle.io.DataLoader(
        train_dataset,
        batch_sampler=train_sampler,
        places=device,
        num_workers=FLAGS.num_workers,
        return_list=True,
        collate_fn=train_collate_fn)
    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(), data.PadTarget()])
    test_sampler = data.BatchSampler(
        test_dataset,
        batch_size=FLAGS.batch_size,
        drop_last=False,
        shuffle=False)
    test_loader = paddle.io.DataLoader(
        test_dataset,
        batch_sampler=test_sampler,
        places=device,
        num_workers=0,
        return_list=True,
        collate_fn=test_collate_fn)

    model.fit(train_data=train_loader,
              eval_data=test_loader,
              epochs=FLAGS.epoch,
              save_dir=FLAGS.checkpoint_path,
              callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html

import pandas as pd
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import plotly.graph_objects as go
import numpy as np

import data as DATA
DATA.test()

external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']

app = dash.Dash(__name__, external_stylesheets=external_stylesheets)

data = pd.read_csv('complete.csv')
##############################Pie Chart - Percentage of States Affected###################################
state = 28
UT = 8
total_states_UT = state + UT

No_afftected_states = len(pd.unique(data['Name of State / UT']))
States_not_affected = total_states_UT - No_afftected_states

labels = ['Number of afftected States', 'Number of Unafftected States']
values = [No_afftected_states, States_not_affected]
##############################Pie Chart - Percentage of States Affected###################################
Exemplo n.º 8
0
'''
Run all data cleaning and create example.osm.json
'''
if __name__ == "__main__":

    import mapparser
    import tags
    import users
    import audit
    import data
    mapparser.test()
    tags.test()
    users.test()
    audit.test()
    data.test()
Exemplo n.º 9
0
from skimage.io import imread, imshow
from skimage.transform import resize
import matplotlib.pyplot as plt
from tqdm import tqdm

np.random.seed = 42

IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3

#################################

X_train, Y_train = data.train(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
X_test = data.test(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)

#################################

model = arch.architecture(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
print(model.summary())

################################

#Modelcheckpoint
checkpointer = tf.keras.callbacks.ModelCheckpoint('model_for_nuclei.h5',
                                                  verbose=1,
                                                  save_best_only=True)

callbacks = [
    tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),