def test(self):
        """Test Function."""
        print("Testing the results")

        self.inputs = data_loader.load_data(self._dataset_name,
                                            self._size_before_crop, False,
                                            self._do_flipping)

        self.model_setup()
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        with tf.Session() as sess:
            sess.run(init)

            chkpt_fname = tf.train.latest_checkpoint(self._checkpoint_dir)
            saver.restore(sess, chkpt_fname)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            self._num_imgs_to_save = cyclegan_datasets.DATASET_TO_SIZES[
                self._dataset_name]
            self.save_images(sess, 0)

            coord.request_stop()
            coord.join(threads)
Пример #2
0
def main():

    # Load config
    config = get_config_from_json('config/model.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    test_dataset = MyDataset(X_test, Y_test)

    # Create Data Loaders
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config.model.batch_size,
                             shuffle=False)

    # Build model
    model = CNNModel()
    model = model.double()
    model.eval()

    if os.path.isfile(config.model.path):
        model.load_state_dict(torch.load(config.model.path))
        print('Loaded checkpoint..')
    else:
        print('checkpoint not found..')

    evaluate(model, test_loader)
Пример #3
0
def mainClassification():

    # Load config
    config = get_config_from_json('config/modelClassification.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    train_dataset = MyDataset(X_train, Y_train)
    val_dataset = MyDataset(X_CV, Y_CV)

    # Create Data Loaders
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config.model.batch_size,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=config.model.batch_size,
                            shuffle=False)

    # Build model
    model = CNNModel()
    model = model.double()
    print(model)

    # Train model
    train(model, config, train_loader, val_loader)
Пример #4
0
def load_data(filepath, cfg, feature_list, start_feature=5, mask_value=0):
    X_train_data, y_train_data = data_loader.load_data(
        datafile=filepath + 'normalized_training.csv',
        flare_label=cfg.flare_label,
        series_len=cfg.seq_len,
        start_feature=start_feature,
        n_features=cfg.n_features,
        mask_value=mask_value,
        feature_list=feature_list)
    X_train_data = np.reshape(X_train_data,
                              (len(X_train_data), cfg.n_features))
    X_train_data = X_train_data.astype(np.float32)
    y_train_data = data_loader.label_transform(y_train_data)

    X_valid_data, y_valid_data = data_loader.load_data(
        datafile=filepath + 'normalized_validation.csv',
        flare_label=cfg.flare_label,
        series_len=cfg.seq_len,
        start_feature=start_feature,
        n_features=cfg.n_features,
        mask_value=mask_value,
        feature_list=feature_list)
    X_valid_data = np.reshape(X_valid_data,
                              (len(X_valid_data), cfg.n_features))
    X_valid_data = X_valid_data.astype(np.float32)
    y_valid_data = data_loader.label_transform(y_valid_data)

    X_test_data, y_test_data = data_loader.load_data(
        datafile=filepath + 'normalized_testing.csv',
        flare_label=cfg.flare_label,
        series_len=cfg.seq_len,
        start_feature=start_feature,
        n_features=cfg.n_features,
        mask_value=mask_value,
        feature_list=feature_list)
    X_test_data = np.reshape(X_test_data, (len(X_test_data), cfg.n_features))
    X_test_data = X_test_data.astype(np.float32)
    y_test_data = data_loader.label_transform(y_test_data)

    return X_train_data, X_valid_data, X_test_data, y_train_data, y_valid_data, y_test_data
Пример #5
0
def mainSiamese():
    # Load config
    config = get_config_from_json('config/modelSiamese.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    test_dataset = MySiameseDataset(X_test, Y_test)

    # Create Data Loaders
    test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=True)

    # Build model
    model = SiameseModel()
    model = model.double()
    print(model)
    model.eval()

    if os.path.isfile(config.model.path):
        model.load_state_dict(torch.load(config.model.path))
        print('Loaded checkpoint..')
    else:
        print('checkpoint not found..')

    dataiter = iter(test_loader)
    x0, _, _ = next(dataiter)

    plt.tight_layout()

    plt.subplot(4, 3, 2)
    plt.axis('off')
    plt.imshow(x0[0][0], 'gray')
    plt.title('Original Image', fontdict={'fontsize': 10})

    for i in range(9):
        _, x1, label = next(dataiter)
        output0, output1 = model(x0, x1)

        output0 = output0.type(torch.DoubleTensor)
        output1 = output1.type(torch.DoubleTensor)

        euclidean_distance = F.pairwise_distance(output0, output1)

        plt.subplot(4, 3, i + 4)
        plt.axis('off')
        plt.imshow(x1[0][0], 'gray')
        plt.title(str(round(euclidean_distance.item(), 2)),
                  fontdict={'fontsize': 10})

    plt.show()
    def train(self):
        """Training Function."""
        # Load Dataset from the dataset folder
        self.inputs = data_loader.load_data(self._dataset_name,
                                            self._size_before_crop, True,
                                            self._do_flipping)

        # Build the network
        self.model_setup()

        # Loss function calculations
        self.compute_losses()

        # Initializing the global variables
        init = (tf.global_variables_initializer(),
                tf.local_variables_initializer())
        saver = tf.train.Saver()

        max_images = cyclegan_datasets.DATASET_TO_SIZES[self._dataset_name]

        with tf.Session() as sess:
            sess.run(init)

            # Restore the model to run the model from last checkpoint
            if self._to_restore:
                chkpt_fname = tf.train.latest_checkpoint(self._checkpoint_dir)
                saver.restore(sess, chkpt_fname)

            writer = tf.summary.FileWriter(self._output_dir)

            if not os.path.exists(self._output_dir):
                os.makedirs(self._output_dir)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            # Training Loop
            for epoch in range(sess.run(self.global_step), self._max_step):
                print("In the epoch ", epoch)
                saver.save(sess,
                           os.path.join(self._output_dir, "cyclegan"),
                           global_step=epoch)

                # Dealing with the learning rate as per the epoch number
                if epoch < 100:
                    curr_lr = self._base_lr
                else:
                    curr_lr = self._base_lr - \
                        self._base_lr * (epoch - 100) / 100

                self.save_images(sess, epoch)

                for i in range(0, max_images):
                    print("Processing batch {}/{}".format(i, max_images))

                    inputs = sess.run(self.inputs)

                    # Optimizing the G_A network
                    _, fake_B_temp, summary_str = sess.run(
                        [
                            self.g_A_trainer, self.fake_images_b,
                            self.g_A_loss_summ
                        ],
                        feed_dict={
                            self.input_a: inputs['images_i'],
                            self.input_b: inputs['images_j'],
                            self.learning_rate: curr_lr
                        })
                    writer.add_summary(summary_str, epoch * max_images + i)

                    fake_B_temp1 = self.fake_image_pool(
                        self.num_fake_inputs, fake_B_temp, self.fake_images_B)

                    # Optimizing the D_B network
                    _, summary_str = sess.run(
                        [self.d_B_trainer, self.d_B_loss_summ],
                        feed_dict={
                            self.input_a: inputs['images_i'],
                            self.input_b: inputs['images_j'],
                            self.learning_rate: curr_lr,
                            self.fake_pool_B: fake_B_temp1
                        })
                    writer.add_summary(summary_str, epoch * max_images + i)

                    # Optimizing the G_B network
                    _, fake_A_temp, summary_str = sess.run(
                        [
                            self.g_B_trainer, self.fake_images_a,
                            self.g_B_loss_summ
                        ],
                        feed_dict={
                            self.input_a: inputs['images_i'],
                            self.input_b: inputs['images_j'],
                            self.learning_rate: curr_lr
                        })
                    writer.add_summary(summary_str, epoch * max_images + i)

                    fake_A_temp1 = self.fake_image_pool(
                        self.num_fake_inputs, fake_A_temp, self.fake_images_A)

                    # Optimizing the D_A network
                    _, summary_str = sess.run(
                        [self.d_A_trainer, self.d_A_loss_summ],
                        feed_dict={
                            self.input_a: inputs['images_i'],
                            self.input_b: inputs['images_j'],
                            self.learning_rate: curr_lr,
                            self.fake_pool_A: fake_A_temp1
                        })
                    writer.add_summary(summary_str, epoch * max_images + i)

                    writer.flush()
                    self.num_fake_inputs += 1

                sess.run(tf.assign(self.global_step, epoch + 1))

            coord.request_stop()
            coord.join(threads)
            writer.add_graph(sess.graph)
Пример #7
0
# Enabling cross-site scripting (might need to be removed later):
# cors = CORS(app)s

# Parameters for filtering template spreadsheets:
# fiterParams = api.model( "Diagram data filter application",{
#     'pmid' : fields.String(description="Pubmed ID of a requested publication", required=False, default=False),
#     'efo' : fields.String(description="EFO id of the term", required=False, default=False),
#     'pvalue' : fields.String(description="Upper boundary of the p-value (eg. 1e-8)", required=False, default=False),
# })

# Loading data - loaded once, filtered after:
parent_mapping_file = Configuration.parent_mapping_file
association_file = Configuration.association_file
ancestry_file = Configuration.ancestry_file
gwas_data = load_data(parent_mapping_file,association_file,ancestry_file)

@api.route('/v1/filter')
@api.expect(fiterParams, validate=True)
class diagarmFilter(Resource):

    @api.doc('Filter diagram data')
    def post(self):
        global gwas_data

        # Parsing and validating input paramters
        parameters = eu.validate_paramters(fiterParams.parse_args())
        print(parameters)

        # Get filtered dataset:
        filteredData = filter(gwas_data, parameters)
from data_loader.data_loader import load_data
from utils.load_model_optimizer_lr import load_model_op_lr
from utils.get_result import get_result
from utils.utils import get_config_from_json

config_file = 'config1.json'
config = get_config_from_json(config_file)

for k, v in config.items():
    print(k, ': ', v)

# 1.加载数据
print('loading data.')
if config['only_train']:
    train_loader = load_data(config)
else:
    train_loader, valid_loader, test_loader = load_data(config)
print('loaded data.')

# 2.加载模型、优化器、损失函数、学习率计划器
sentiment_model, optimizer, criterion, scheduler = load_model_op_lr(config, len(train_loader))


def validation(sentiment_model):
    y_pred = []
    y_true = []
    total_eval_loss = 0

    sentiment_model.eval()  # eval模式下dropout不会工作
    for i, (inp, tar) in enumerate(valid_loader):
Пример #9
0
        for idx in range(len(words_row)):
            if labels_row[idx] == "B" and idx + 1 < len(
                    words_row) and labels_row[idx + 1] == "I":
                keywords_tmp.append(words_row[idx] + " " + words_row[idx + 1])
            elif labels_row[idx] == "I":
                if idx > 0 and labels_row[idx - 1] == "B":
                    continue
                else:
                    keywords_tmp.append(words_row[idx])
        keywords.append(keywords_tmp)
    return keywords


if __name__ == "__main__":
    dataset = load_data(".\\kpwr-1.1\\*\\result.csv")
    features = create_features_list(dataset)
    dataset["features"] = features

    train, test = train_test_split(dataset)
    CRF.train(train['features'], train['label_base'])
    preds = CRF.test(test['features'])

    keywords_true = test['base_keywords_in_text']
    keywords_pred = get_keywords_from_labels(test['base_words_list'], preds)

    prec_h, rec_h, f1_h = evaluator.hard_evaluation(keywords_true,
                                                    keywords_pred)
    prec_s, rec_s, f1_s = evaluator.soft_evaluation(keywords_true,
                                                    keywords_pred)