Ejemplo n.º 1
0
def main():
    train_loader, test_loader = make_loader()
    print("train VAE")
    vae = model.VAE(6, 2)
    vae_optimizer = torch.optim.Adam(vae.parameters(), lr=0.001)
    losses = []
    for i in range(3):
        for data in train_loader:
            vae.zero_grad()
            loss = vae.loss(data['data'])
            loss.backward()
            vae_optimizer.step()
            losses.append(loss.cpu().detach().numpy())
        print("EPOCH: {} loss: {}".format(i, np.average(losses)))
    torch.save(vae.state_dict(), './saved/vae')

    print("train Estimator")
    estimator = model.Estimator()
    estimator_optimizer = torch.optim.Adam(vae.parameters(), lr=0.001)
    losses = []
    for i in range(3):
        for data in train_loader:
            estimator.zero_grad()
            _, z = vae(data['data'])
            loss = estimator.loss(z, data['treat'], data['outcome'])
            loss.backward()
            estimator_optimizer.step()
            losses.append(loss.cpu().detach().numpy())
        print("EPOCH: {} loss: {}".format(i, np.average(losses)))
    torch.save(estimator.state_dict(), './saved/estimator')
Ejemplo n.º 2
0
    def __call__(self):
        img_list=np.sort(np.asarray(glob.glob('%s/*.png' % self.input_path)))
        print(img_list)

        input=tf.placeholder(tf.float32, shape=[None, None, None, 3])

        EST=model.Estimator(input,'EST')
        sigma_hat=EST.output

        MODEL=model.Denoiser(input,sigma_hat,'Denoise1')
        output=MODEL.output

        saverE=tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='EST'))
        count_param('EST')

        vars1=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Denoise1')
        vars2=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Noise_ENC')
        saverM=tf.train.Saver(var_list=vars1+vars2)

        count_param('Denoise1')
        count_param('Noise_ENC')
        count_param()

        print('Sigma: ', self.sigma*255.)

        with tf.Session(config=self.conf) as sess:
            ckpt_modelE = os.path.join(self.model_path, 'EST')
            print(ckpt_modelE)
            saverE.restore(sess,ckpt_modelE)

            ckpt_model = os.path.join(self.model_path, 'AWGN')
            print(ckpt_model)
            saverM.restore(sess,ckpt_model)

            P = []

            print('Process %d images' % len(img_list))
            for idx, img_path in enumerate(img_list):
                img=imread(img_path)
                img=img[None,:,:,:]

                np.random.seed(0)

                noise_img = img + np.random.standard_normal(img.shape)*self.sigma

                out=sess.run(output, feed_dict={input:noise_img})
                P.append(psnr(img[0]*255., np.clip(np.round(out[0]*255.), 0., 255.)))

                if not os.path.exists('%s/Noise%d' % (self.output_path, self.sigma*255.)):
                    os.makedirs('%s/Noise%d'% (self.output_path, self.sigma*255.))
                imageio.imsave('%s/Noise%d/%s.png'% (self.output_path, self.sigma*255., os.path.basename(img_path[:-4])), np.uint8(np.clip(np.round(out[0] * 255.), 0., 255.)))

                if idx % 5 == 0:
                    print('[%d/%d] Processing' % ((idx+1), len(img_list)))

            print('PSNR: %.4f' % np.mean(P))
Ejemplo n.º 3
0
def objective(params):

    kernel_sizes = dict.fromkeys(modalities_assoc['Torso'])
    for i, (k, _) in enumerate(kernel_sizes.items()):
        offset = (i * 3) + 2
        kernel_sizes[k] = params[offset:offset+3]

    estimator = model.Estimator(
        num_classes=num_classes,
        num_modalities=num_modalities,
        fingerprint_size=num_features,
        batch_size=batch_size,
        learning_rate=params[0], # 0.1,
        decay=10**-1,
        num_filters=params[1],
        kernel_sizes=kernel_sizes,
        overlap=params[23],
        num_units_dense_layer=params[24],
        dropout=params[25],
        offsets_assoc=offsets_assoc,
        modalities_assoc=modalities_assoc
    )

    if config.VALIDATION == 'meta-segmented-cv':
        print('======================================================')
        print('       META-SEGMENTED CROSS-VALIDATION                ')
        print('======================================================')

        from metacvpartition import metacvpartition

        _X = X
        _y = y[:, 0]

        mxval = metacvpartition(
            _y
            , config.xval_nfolds
            , config.xval_metasegmentlength
            , debug=False)

        y_pred = cross_val_predict(
            estimator,
            _X, _y,
            cv=mxval.splitsGenerator(),
            n_jobs=config.N_JOBS_cv,
            verbose=6)

        print(y_pred.shape)

        conf_mat = confusion_matrix(_y, y_pred)
        print(conf_mat)

        f = utils.save(conf_mat, 'confusion_matrix', estimator._io._num_persistor)
        print('confusion matrix saved to %s' % f)

        f = utils.save(y_pred, 'y_pred', estimator._io._num_persistor)
        print('y_pred saved to %s' % f)

        f = utils.save(_y, 'y', estimator._io._num_persistor)
        print('y saved to %s' % f)

        f = utils.save(list(mxval.splitsGenerator()), 'splits', estimator._io._num_persistor)
        print('Generated splits saved to %s' % f)

        # compute a bunch of scores -----------
        fscore_micro = f1_score(_y, y_pred, average='micro')
        print('[objective] f1_score_micro = %s%%' % (fscore_micro * 100,))
        fscore = scoring.Fscore(_y, y_pred, list(mxval.splitGenerator()))
        avg = fscore.avg_fscore()
        print('[objective] f1_score_avg = %s%%' % (avg * 100,))
        prre = fscore.prre_fscore()
        print('[objective] f1_score_prre = %s%%' % (prre * 100,))
        tpfp = fscore.tpfp_fscore()
        print('[objective] f1_score_tpfp = %s%%' % (tpfp * 100,))

        return -avg
Ejemplo n.º 4
0
def trainSingleModel():
    """ Building the predictive model using previously found best
        hyperparameters.
    """

    print('==================================================')
    print('Training a unique model with the instantiation:')
    print(config.cmd_args)
    print('Experiment version %s.%s.%s-%s-%s'
          % (config.VERSION
             , config.REVISION
             , config.MINOR_REVISION
             , config.POSITION
             , config.USER))
    print('==================================================')

    # Build data reader and get training data
    dr = dataset.DataReader()
    X = dr.train
    y = dr.labels

    # Build pipeline
    p = pipeline.Pipeline(X, y)

    X = p.X
    y = p.y
    offsets_assoc = p.offsets_assoc
    modalities_assoc = p.modalities_assoc

    kernel_sizes = dict.fromkeys(modalities_assoc['Torso'])
    for i, (k, _) in enumerate(kernel_sizes.items()):
        offset = (i * 3)
        kernel_sizes[k] = config.kernel_sizes_list[offset:offset+3]

    """
# -----------------------------
    X_train, X_test, y_train, y_test =\
        train_test_split(
            X, y,
            test_size=0.30,
            random_state=1)
# -----------------------------
    """

    # Build estimator
    estimator = model.Estimator(
        num_classes=config.num_classes,
        num_modalities=dataset.DataReader.num_modalities,
        fingerprint_size=p.num_features,
        batch_size=config.batch_size,
        learning_rate=config.learning_rate, # 0.1,
        decay=10**-1,
        num_filters=config.num_filters,
        kernel_sizes=kernel_sizes,
        overlap=config.overlap,
        num_units_dense_layer=config.num_units_dense_layer,
        dropout=config.dropout,
        offsets_assoc=offsets_assoc,
        modalities_assoc=modalities_assoc
    )

    estimator.__setstate__(estimator.__dict__)

    if config.VALIDATION == 'meta-segmented-cv':
        print('======================================================')
        print('       META-SEGMENTED CROSS-VALIDATION                ')
        print('======================================================')

        from metacvpartition import metacvpartition

        _X = X
        _y = y[:, 0]

        mxval = metacvpartition(
            _y
            , config.xval_nfolds
            , config.xval_metasegmentlength
            , debug=False)

        start = timeit.default_timer()  # -----
        y_pred = cross_val_predict(
            estimator,
            _X, _y,
            cv=mxval.splitsGenerator(),
            n_jobs=config.N_JOBS_cv,
            verbose=6)
        stop = timeit.default_timer()  # -----
        print('training time')
        print(stop - start)

        print(y_pred.shape)
        conf_mat = confusion_matrix(_y, y_pred)
        print(conf_mat)
        f = utils.save(conf_mat, 'confusion_matrix', estimator._io._num_persistor)
        print('confusion matrix saved to %s' % f)
        f = utils.save(y_pred, 'y_pred', estimator._io._num_persistor)
        print('y_pred saved to %s' % f)
        f = utils.save(_y, 'y', estimator._io._num_persistor)
        print('y saved to %s' % f)
        f = utils.save(list(mxval.splitsGenerator()), 'splits', estimator._io._num_persistor)
        print('Generated splits saved to %s' % f)

        # compute a bunch of scores -----------
        fscore_micro = f1_score(_y, y_pred, average='micro')
        print('[objective] f1_score_micro = %s%%' % (fscore_micro * 100,))
        fscore = scoring.Fscore(_y, y_pred, list(mxval.splitsGenerator()), debug=True)
        avg = fscore.avg_fscore()
        print('[objective] f1_score_avg = %s%%' % (avg * 100,))
        prre = fscore.prre_fscore()
        print('[objective] f1_score_prre = %s%%' % (prre * 100,))
        tpfp = fscore.tpfp_fscore()
        print('[objective] f1_score_tpfp = %s%%' % (tpfp * 100,))

        print('OK')
Ejemplo n.º 5
0
from flask import Flask, request, jsonify
from flask_cors import CORS
import torch
import model
import json

app = Flask(__name__)
CORS(app)

vae = model.VAE(6, 2)
vae.load_state_dict(torch.load('./saved/vae'))
estimator = model.Estimator()
estimator.load_state_dict(torch.load('./saved/estimator'))

@app.route("/ping", methods=['GET'])
def ping():
    return 'ok'

@app.route("/estimate", methods=['POST'])
def estimate():
    data = request.data.decode('utf-8')
    data = json.loads(data) # {"data": [age, HbA1c, BMI, WBC, CRP, Lac]}
    _, z = vae(torch.Tensor([data['data']]))
    outcome_untreat = estimator(z, torch.Tensor([[0]]).float()).cpu().detach().numpy()[0][0].item()
    outcome_treat = estimator(z, torch.Tensor([[1]]).float()).cpu().detach().numpy()[0][0].item()
    return jsonify({"untreated": outcome_untreat, "treated": outcome_treat})

if __name__ == '__main__':
    app.run(debug=False, host='0.0.0.0', port=3001)