Exemple #1
0
def main():
    options = get_options()

    options = initialize(options)
    options = create_dataset(options, train=True)
    options = create_dataset(options, train=False)

    model = get_model(options)

    optimizer = get_optimizer(options, model)

    scheduler = get_scheduler(options, optimizer)

    # Criterions are like `torch.nn.CrossEntropyLoss()`
    criterion = get_criterion(options, model)

    metrics = get_metrics(options)

    model = convert_dtype(options.dtype, model)
    criterion = convert_dtype(options.dtype, criterion)
    if options.use_cuda:
        model.cuda()
        criterion.cuda()

    options = checkpoint.maybe_resume(options, model, optimizer, scheduler)

    controlflow = get_controlflow(options)
    controlflow(model=model, optimizer=optimizer, criterion=criterion,
                metrics=metrics, scheduler=scheduler, options=options)
def train(args, glove, data, param_file_path):
    if glove is None:
        embedding_size = (utils.NUM_UNIQUE_TOKENS, int(args['--embedding-dims']))
    else:
        embedding_size = glove[0].shape

    print("Loading model definition for %s..." % args['--model'])
    net = models.get_model(args['--model'], embedding_size=embedding_size,
                           train_embedding=args['--train-embedding'],
                           hidden_dims=int(args['--hidden-dims']),
                           learning_rate=float(args['--learning-rate']))
    model = tflearn.DNN(net, clip_gradients=5., tensorboard_verbose=0)

    if args['--evaluate-only'] or args['--continue-training']:
        print("Loading saved parameters from %s" % param_file_path)
        model.load(param_file_path)
    elif glove is not None:
        print("Initializing word embedding...")
        # Retrieve embedding layer weights (only a single weight matrix, so index is 0)
        embedding_weights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
        # Initialize with glove embedding
        model.set_weights(embedding_weights, glove[0])

    if not args['--evaluate-only']:
        print("Training...")
        model.fit(data.trainX, data.trainY,
                  n_epoch=int(args['--epochs']),
                  validation_set=(data.valX, data.valY),
                  show_metric=True, batch_size=128,
                  run_id=os.path.splitext(param_file_path)[0])

        print("Saving parameters to %s" % param_file_path)
        model.save(param_file_path)

    return model
def load_model(save_dir):
    props = pickle_load(os.path.join(save_dir, "props.json"))
    model = get_model(props)
    weights_path = os.path.join(save_dir, weights_file)
    if os.path.exists(weights_path):
        model.load_weights(weights_path)
    return model, props
def fit_model(structure, data_matrix, old_root=None, gibbs_steps=200):
    if old_root is None:
        X = data_matrix.sample_latent_values(np.zeros((data_matrix.m, data_matrix.n)), 1.)
        old_root = GaussianNode(X, 'scalar', 1.)
    root = initialization.initialize(data_matrix, old_root, old_root.structure(), structure, num_iter=gibbs_steps)
    model = models.get_model(structure, fixed_noise_variance=data_matrix.fixed_variance())
    models.align(root, model)
    dumb_samplers.sweep(data_matrix, root, num_iter=gibbs_steps)
    dumb_samplers.sweep(data_matrix, root, maximize=True, num_iter=1)  
    return root
def get_uploads_from_temp(ids):
    """Method returns of uploaded files"""

    from models import get_model

    ats = []
    files = get_model().objects.filter(pk__in=ids)

    #Getting THE FILES

    for fl in files:
        ats.append({"file":File(fl.file), "date":fl.upload_date, "name":fl.filename})

    return ats
def train():
    embedding = generate_embedding()
    data = utils.load_sst('sst_data.pkl')
    net = generate_net(embedding)
    print("Loading model definition for %s..." % model)
    model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0)
    net = models.get_model(model)

    print("Training...")
    model.fit(data.trainX, data.trainY,
              validation_set=(data.valX, data.valY),
              show_metric=True, batch_size=128)

    print("Saving Model...")
    model_path = '%s.tflearn' % model
    model.save(model_path)
    print("Saved model to %s" % model_path)
Exemple #7
0
    def search(self, request):
        """
        Searches in the fields of the given related model and returns the 
        result as a simple string to be used by the jQuery Autocomplete plugin
        """
        query = request.GET.get('q', None)
        app_label = request.GET.get('app_label', None)
        model_name = request.GET.get('model_name', None)
        search_fields = request.GET.get('search_fields', None)

        if search_fields and app_label and model_name and query:
            def construct_search(field_name):
                # use different lookup methods depending on the notation
                if field_name.startswith('^'):
                    return "%s__istartswith" % field_name[1:]
                elif field_name.startswith('='):
                    return "%s__iexact" % field_name[1:]
                elif field_name.startswith('@'):
                    return "%s__search" % field_name[1:]
                else:
                    return "%s__icontains" % field_name

            model = models.get_model(app_label, model_name)
            qs = model._default_manager.all()
            for bit in query.split():
                or_queries = [models.Q(**{construct_search(
                    smart_str(field_name)): smart_str(bit)})
                        for field_name in search_fields.split(',')]
                other_qs = QuerySet(model)
                other_qs.dup_select_related(qs)
                other_qs = other_qs.filter(reduce(operator.or_, or_queries))
                qs = qs & other_qs
                data = ''.join([u'%s|%s\n' % (f.__unicode__(), f.pk) for f in qs])
            
            return HttpResponse(data)
        return HttpResponseNotFound()
Exemple #8
0
def calc_func(noisy_dir_path):
    with torch.no_grad():
        debug_model = args.debug_model
        _method = method
        model_opts = json.load(
            open(os.path.join("configs/%s.json" % args.model_config), 'r'))
        gen_model = model_opts['gen_model_name']
        calc_target = get_target(args.target_type)

        device = torch.device("cuda")
        print_with_time("Loading model...")
        Generator, _ = get_model(gen_model, None)
        model = Generator(model_opts['gen_model_opts']).to(device)

        checkpoint = torch.load("Checkpoints/%s/checkpoint_%09d.pth" %
                                (_method, args.global_step))
        model.load_state_dict(checkpoint["generator"])
        # model.load_state_dict(checkpoint["enhancer"])
        model.eval()
        melbank = get_fft_mel_mat(512, 16000, 40)

        _method = "_".join([_method, str(args.global_step)])
        if debug_model:
            os.system('mkdir -p debug/%s' % _method)
        print_with_time("Start to enhance wav file in %s with method %s\n" %
                        (noisy_dir_path, _method))
        udir_path = "%s_%s" % (noisy_dir_path, _method)
        if not os.path.exists(udir_path):
            os.mkdir(udir_path)
        wav_scp = read_path_list(os.path.join(noisy_dir_path, "wav.scp"))
        if not debug_model:
            ark_file = open(os.path.join(udir_path, "feats.ark"), 'wb')
            scp_file = open(os.path.join(udir_path, "feats.scp"), 'w')
            key_len = wav_scp[0].find(' ')
            kaldi_holder = KaldiFeatHolder(key_len, 3000, 40)
            offset = key_len + 1
        enhanced_number = 0
        for it, (one_wav) in enumerate(wav_scp):
            wav_id, wav_path = one_wav.split(' ')
            sr, noisy_speech = wavfile.read(wav_path)
            if len(noisy_speech.shape) > 1:
                noisy_speech = np.mean(noisy_speech, 1)

            early50_path = wav_path.replace('.wav', '_early50.wav')
            sr, early50 = wavfile.read(early50_path)
            if len(early50.shape) > 1:
                early50 = np.mean(early50, 1)
            # as the training dataset, use "power_norm" to normalize the waveform to match the input of model.
            # c = np.sqrt(np.mean(np.square(noisy_speech)))
            c = calc_rescale_c(noisy_speech, args.rescale_method)
            noisy_speech = noisy_speech / c
            early50 = early50 / c

            noisy_fbank, noisy_mag = log_fbank(noisy_speech, False, True, True,
                                               None)
            early50_fbank, early50_mag = log_fbank(early50, False, True, True,
                                                   None)
            noise_fbank, noise_mag = log_fbank(noisy_speech - early50, False,
                                               True, True, None)
            if args.feature_domain == "mel":
                feat = torch.Tensor(noisy_fbank.T).unsqueeze(0).to(device)
                label = torch.Tensor(early50_fbank.T).unsqueeze(0).to(device)
                noise = torch.Tensor(noise_fbank.T).unsqueeze(0).to(device)
            else:
                feat = torch.Tensor(
                    np.square(noisy_mag).T).unsqueeze(0).to(device)
                label = torch.Tensor(
                    np.square(early50_mag).T).unsqueeze(0).to(device)
                noise = torch.Tensor(
                    np.square(noise_mag).T).unsqueeze(0).to(device)

            if args.target_type.lower() == "mapping_mag":
                predict = model.forward(feat.sqrt())
            else:
                predict = model.forward(torch.log(feat + opts['eps']))

            results = calc_target(feat, label, noise, predict, opts)
            enhanced = results["enhanced"]
            predict = results["predict"]
            target = results["target"]

            if args.feature_domain == "mel":
                enhanced_pow = 0
                enhanced_fbank = enhanced[0, :, :].cpu().numpy()
            else:
                enhanced_pow = enhanced[0, :, :].cpu().numpy()
                enhanced_fbank = np.matmul(enhanced_pow, melbank.T)

            log_enhanced_fbank = np.log(enhanced_fbank * (c**2.) + opts['eps'])

            if debug_model:
                sio.savemat(
                    "debug/%s/%s_%s" %
                    (_method, wav_id, wav_path.split('/')[-5]), {
                        'noisy_mag':
                        noisy_mag,
                        'noisy_fbank':
                        noisy_fbank,
                        'enhanced_mag':
                        np.sqrt(enhanced_pow).T,
                        'enhanced_fbank':
                        enhanced_fbank.T,
                        'early50_mag':
                        early50_mag,
                        'early50_fbank':
                        early50_fbank,
                        'predict':
                        predict[0, :, :].cpu().numpy().T,
                        'target':
                        target[0, :, :].cpu().numpy().T,
                        'log_enhanced_fbank':
                        log_enhanced_fbank.T,
                        'log_early50_fbank':
                        np.log(early50_fbank * (c**2.) + opts['eps']),
                        'c':
                        c
                    })
                if it >= 0:
                    return
            else:
                kaldi_holder.set_key(wav_id)
                kaldi_holder.set_value(log_enhanced_fbank)
                kaldi_holder.write_to(ark_file)
                scp_file.write("%s %s/feats.ark:%d\n" %
                               (wav_id, udir_path, offset))
                offset += kaldi_holder.get_real_len()

            enhanced_number += 1
            if enhanced_number % 40 == 0:
                print_with_time(
                    "Enhanced %5d(%6.2f%%) utterance" %
                    (enhanced_number, 100. * enhanced_number / len(wav_scp)))
        print_with_time("Enhanced %d utterance" % enhanced_number)
        ark_file.close()
        scp_file.close()
        post_process(noisy_dir_path, udir_path)
        print_with_time("Done %s." % _method)
Exemple #9
0
def main(args):
    config = get_config_from_args(args, mode='infer')
    max_seq_length = args.max_seq_length or config.max_seq_length
    config.max_seq_length = max_seq_length
    max_answer_span = args.max_answer_span or config.max_answer_span
    config.max_answer_span = max_answer_span

    model_file = args.model_file
    questions = [
        "What is the AFC short for?",
        # "What day was the game played on?",
    ]
    contexts = [
        "The American Football Conference (AFC) champion Denver Broncos defeated the National "
        "Football Conference (NFC) champion Carolina Panthers 24–10 to earn their third Super Bowl title.",
        # "The game was played on February 7, 2016, at Levi's Stadium in the San Francisco Bay Area " \
        # "at Santa Clara, California.",
    ]

    logger.info("running in eager mode...")
    tf.enable_eager_execution()
    checkpoint_path = tf.train.latest_checkpoint(config.checkpoint_dir)

    logger.info("restoring model weights...")

    with tf.contrib.eager.restore_variables_on_create(checkpoint_path):

        model = get_model(config)
        logger.info("warming up model...")
        model.warm_up(config)

    context_spans, inputs = model.get_inputs(questions, contexts, config)
    inputs_tensor = [
        tf.convert_to_tensor(i, dtype=tf.int32) for i in inputs.values()
    ]
    logger.info("begin inferring...")
    start_predictions, end_predictions, norm_scores = model.infer(
        inputs_tensor, max_answer_span=config.max_answer_span, export=True)

    prediction_answers = decode_answer(contexts, context_spans,
                                       start_predictions, end_predictions)
    for q, c, a, ns in zip(questions, contexts, prediction_answers,
                           norm_scores):
        logger.info('q={}\na={}\n\tcontext={}\n\n'.format(
            q, (a, round(float(ns), 4)), c))

    print(model.embeddings.shape)
    print(model.logits.shape)
    input_ids = inputs_tensor[0]
    print(input_ids.shape)

    input_ids_file = os.path.join(os.path.dirname(model_file), 'input_ids')
    input_embeddings_file = os.path.join(os.path.dirname(model_file),
                                         'input_embeddings')
    output_logits_file = os.path.join(os.path.dirname(model_file),
                                      'output_logits')
    np.save(input_ids_file, input_ids)
    np.save(input_embeddings_file, model.embeddings)
    np.save(output_logits_file, model.logits)

    # Load TFLite model and allocate tensors.
    interpreter = tf.lite.Interpreter(model_path=model_file)
    interpreter.allocate_tensors()

    # Get input and output tensors.
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # Test model on random input data.
    print(input_details)
    print(output_details)
    print(model.logits)
    interpreter.set_tensor(input_details[0]['index'], input_ids)
    interpreter.set_tensor(input_details[1]['index'], model.embeddings)

    interpreter.invoke()

    # The function `get_tensor()` returns a copy of the tensor data.
    # Use `tensor()` in order to get a pointer to the tensor.
    output_data = interpreter.get_tensor(output_details[0]['index'])
    print(output_data.shape)
    print(output_data)
    print(np.allclose(output_data, model.logits, rtol=1e-4))
Exemple #10
0
def five_fold_training(training_dataset, comp_feature_list, tar_feature_list, comp_hidden_lst, tar_num_of_last_neurons, fc1, fc2, learn_rate, batch_size, model_nm, dropout, experiment_name, n_epoch, fold_num=None, external_comp_feat_fl=None):
    arguments = [str(argm) for argm in [comp_hidden_lst[0], comp_hidden_lst[1], tar_num_of_last_neurons, fc1, fc2, learn_rate, batch_size, model_nm, dropout, n_epoch, fold_num]]

    str_arguments = "-".join(arguments)
    print("Arguments:", str_arguments)

    torch.manual_seed(123)
    np.random.seed(123)

    use_gpu = torch.cuda.is_available()

    device = "cpu"

    if use_gpu:
        print("GPU is available on this device!")
        device = "cuda"
    else:
        print("CPU is available on this device!")

    #get_cnn_test_val_folds_train_data_loader("Davis_Filtered", ["ecfp4"], ["sequencematrix500"], external_comp_feat_fl="aacrtest_ecfp4_normalized.tsv")
    loader_fold_dict, test_loader, external_data_loader = get_cnn_test_val_folds_train_data_loader(training_dataset,
                                                                                                   comp_feature_list,
                                                                                                   tar_feature_list,
                                                                                                   batch_size,
                                                                                                   external_comp_feat_fl)
    num_of_folds = len(loader_fold_dict)
    validation_fold_epoch_results, test_fold_epoch_results = [], []

    if not os.path.exists("{}/result_files/{}".format(project_file_path, experiment_name)):
        subprocess.call("mkdir {}".format("{}/result_files/{}".format(project_file_path, experiment_name)),
                        shell=True)

    best_test_result_fl = open(
        "{}/result_files/{}/test_performance_results-{}.txt".format(project_file_path, experiment_name,
                                                                             "-".join(arguments)), "w")

    best_test_prediction_fl = open(
        "{}/result_files/{}/test_predictions-{}.txt".format(project_file_path, experiment_name,
                                                                      "-".join(arguments)), "w")

    folds = range(num_of_folds) if not fold_num else range(fold_num, fold_num + 1)
    print(list(folds))
    for fold in folds:
        best_val_fold_mse_score, best_test_fold_mse_score = 10000, 10000
        best_val_test_performance_dict, best_test_test_performance_dict = dict(), dict()
        best_val_test_performance_dict["MSE"], best_test_test_performance_dict["MSE"] = 100000000.0, 100000000.0

        str_best_val_test_predictions = ""
        str_best_test_test_predictions = ""

        test_fold_epoch_results.append([])
        validation_fold_epoch_results.append([])
        train_loader, valid_loader = loader_fold_dict[fold]

        print("FOLD : {}".format(fold + 1))

        model = get_model(model_nm, tar_feature_list, 1024, tar_num_of_last_neurons, comp_hidden_lst[0],
                          comp_hidden_lst[1], fc1, fc2, dropout).to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
        criterion = torch.nn.MSELoss()
        optimizer.zero_grad()

        for epoch in range(n_epoch):
            print("Epoch :{}".format(epoch))
            total_training_loss, total_validation_loss, total_test_loss = 0.0, 0.0, 0.0
            total_training_count, total_validation_count, total_test_count = 0, 0, 0
            # validation_predictions, validation_labels, test_predictions, test_labels = [], [], [], []
            # test_all_comp_ids, test_all_tar_ids =  [], []
            batch_number = 0
            model.train()
            print("Training:", model.training)
            for i, data in enumerate(train_loader):
                batch_number += 1
                # clear gradient DO NOT forget you fool!
                optimizer.zero_grad()

                comp_feature_vectors, target_feature_vectors, labels, compound_ids, target_ids = data
                comp_feature_vectors, target_feature_vectors, labels = Variable(comp_feature_vectors).to(
                    device), Variable(
                    target_feature_vectors).to(device), Variable(labels).to(device)

                total_training_count += comp_feature_vectors.shape[0]
                y_pred = model(comp_feature_vectors, target_feature_vectors).to(device)
                loss = criterion(y_pred.squeeze(), labels)
                total_training_loss += float(loss.item())
                loss.backward()
                optimizer.step()
            print("Epoch {} training loss:".format(epoch), total_training_loss)

            model.eval()
            with torch.no_grad():  # torch.set_grad_enabled(False):
                print("Training:", model.training)
                total_validation_loss, total_validation_count, validation_labels, validation_predictions, all_val_comp_ids, all_val_tar_ids = compute_test_loss(
                    model, criterion, valid_loader, device)
                print("Epoch {} validation loss:".format(epoch), total_validation_loss)

                total_test_loss, total_test_count, test_labels, test_predictions, all_test_comp_ids, all_test_tar_ids = compute_test_loss(
                    model, criterion, test_loader, device)

                print("==============================================================================")
                validation_scores_dict = get_scores(validation_labels, validation_predictions, "Validation",
                                                    total_training_loss, total_validation_loss, epoch,
                                                    validation_fold_epoch_results, False, fold)

                print("------------------------------------------------------------------------------")
                test_scores_dict = get_scores(test_labels, test_predictions, "Test", total_training_loss,
                                              total_test_loss, epoch, test_fold_epoch_results, False, fold)

                if test_scores_dict["MSE"] < best_test_fold_mse_score:
                    best_test_test_performance_dict, best_test_test_predictions, best_test_fold_mse_score, str_best_test_test_predictions = save_best_model_predictions(
                        "Test", epoch, test_scores_dict, best_test_fold_mse_score, test_scores_dict["MSE"], model,
                        project_file_path, training_dataset, str_arguments,
                        all_test_comp_ids, all_test_tar_ids, test_labels, test_predictions, fold)

            if epoch == n_epoch - 1:
                best_test_prediction_fl.write("FOLD : {}\n".format(fold + 1))
                best_test_result_fl.write("FOLD : {}\n".format(fold + 1))
                score_list = get_list_of_scores()
                for scr in score_list:
                    best_test_result_fl.write("Test {}:\t{}\n".format(scr, best_test_test_performance_dict[scr]))

                best_test_prediction_fl.write("FOLD : {}\n".format(fold + 1))
                best_test_prediction_fl.write(best_test_test_predictions)

    best_test_prediction_fl.close()
    best_test_result_fl.close()
Exemple #11
0
def main():
    parser = argparse.ArgumentParser('description')
    parser.add_argument('--logfolder', '-l', help='Log folder.')
    parser.add_argument('--csvfolder', '-c', help='Output CSV folder for graphs.')
    parser.add_argument('--output', '-o', help='Folder for saving output models.')
    parser.add_argument('--model', '-m', help='Selects a particular model.')
    parser.add_argument('--maxbatches', '-B', default=0, type=int, help='Maximum number of batches to process (in thousands).')
    parser.add_argument('--batchsize', '-b', type=int, default=BATCH_SIZE, help='Batch size')
    parser.add_argument('--dimensions', '-d', type=int, default=0, help='Number of dimensions from the space to use. If 0 (default), use all.')
    parser.add_argument('--learningrate', '-r', type=float, default=LEARNING_RATE, help='Learning rate')
    args = parser.parse_args()

    logger.debug("Reading distributional space '%s'" % SPACE_FILENAME)
    space = load_numpy(SPACE_FILENAME, insertblank=True)
    if args.dimensions:
        space.matrix = space.matrix[:,:args.dimensions]
    if True:
        m = space.matrix
        norm_mean = m[1:].mean(axis=0)
        norm_std = (m[1:].std(axis=0) * 10)
        m = (m - norm_mean) / norm_std
        m[0] = 0
        space.matrix = m
    #space = space.normalize()
    logger.debug("Finished reading space")
    logger.debug("Space contains %d words with %d dimensions each." % space.matrix.shape)

    cbr = CorpusBatchReader(CORPUS_FOLDER, space, batch_size=args.batchsize)
    data_iterator = DataIterator(cbr, epochs=1, maxbatches=args.maxbatches * 1000)

    HIDDEN = space.matrix.shape[1]

    logger.debug("Compiling compute graph")
    R = data_iterator.test[0].shape[1]
    model = models.get_model(args.model, space, R, HIDDEN, args.learningrate)

    modelinfo = {
        'model': args.model,
        'learningrate': args.learningrate,
        'hidden': HIDDEN,
        'space': SPACE_FILENAME,
        'dimensions': space.matrix.shape[1],
    }

    filename = _generate_filename(modelinfo)
    csvlog = CSVLogger(os.path.join(args.csvfolder, filename + ".csv"))

    logger.debug("Compilation finished")
    if DEBUG:
        logger.debug("Theano compute graph:\n" + debugprint(model._train.maker.fgraph.outputs[0], file='str'))

    logger.debug("Starting training")
    start_time = datetime.now()
    for X, Y in data_iterator:
        trainscore = model.train_on_batch(X, Y)

        if data_iterator.batch % 1000 == 0:
            valscore = model.evaluate(*data_iterator.val, verbose=False)
            testscore = model.evaluate(*data_iterator.test, verbose=False)
            progress = data_iterator.progress()
            elapsed = (datetime.now() - start_time)
            rank = intrinsic_eval(model, space, data_iterator.test[0], data_iterator.test[1])
            #rank = 0.0
            eta = _compute_eta(start_time, progress)
            batchinfo = dict(
                epoch=data_iterator.epoch,
                kbatch=data_iterator.batch/1000,
                trainscore=trainscore,
                valscore=valscore,
                testscore=testscore,
                intrinsic=rank,
                progress=100 * progress,
                elapsed=elapsed.total_seconds(),
                eta=eta
            )
            info = _dictmerge(batchinfo, modelinfo)
            logger.debug("%(epoch)3d ep %(kbatch)8d Kba %(intrinsic)6.4f / %(valscore)8.5f / %(testscore)8.5f [%(progress)5.1f%% eta %(eta)s]" % info)
            del info['eta']
            csvlog.append(info)

        if data_iterator.batch % 5000 == 0:
            checkpoint_filename = os.path.join(args.output, "%s__batch%08d.hd5" % (filename, data_iterator.batch))
            logger.debug("Checkpointing model to %s" % checkpoint_filename)
            model.save_weights(checkpoint_filename, overwrite=True)
 def build_model(self, model_name, arch_params):
     grid_dim = int(np.sqrt(self.metadata["output_features"]))
     self.model = get_model(
         model_name, out_features=self.metadata["output_features"], in_channels=self.metadata["num_channels"], arch_params=arch_params
     )
    return args


if __name__ == '__main__':
    args = parse_args()

    config = config[args.model]

    if not args.no_cuda and torch.cuda.is_available():
        args.device = "cuda"
    else:
        args.device = "cpu"
    device = torch.device(args.device)

    model = get_model(args.model, pretrained=True)
    model.to(device)
    model.eval()

    models_3d = ['c3d']
    FeatureExtractor = FeatureExtractor3D if args.model in models_3d else FeatureExtractor2D
    extractor = FeatureExtractor(
        stride=args.stride,
        mean=config.mean,
        std=config.std,
        resize_to=config.resize_to,
        crop_to=config.crop_to,
        model=model,
        batch_size=args.batch_size)

    videos = os.listdir(args.video_dpath)
    start, end = -1,-1
    if opts.start is not None and opts.end is not None:
        start, end = opts.start,opts.end
        print("start: %d, end: %d" % (start, end))
    if opts.stdout:
        stdout = opts.stdout
        stdout_filename = stdout + '__stdout.log'
        sys.stdout = logger.Logger(settings.stdout_path + stdout_filename)
    #Display progress logs on stdout
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
    
    fullgrid=opts.fullgrid
    
    model_id = opts.model_id
    assert isinstance(model_id, str), "must specify model_id"
    model = models.get_model(models.BaseModel(model_id), fullgrid)

    classifier_name = None
    if not fullgrid: classifier_name=model.classifier_name

    rawdata, classes, classes_names = data.load(model.path)

    classifier_grid_parameters={}
    if fullgrid:
        print("fullgrid")
        pipeline_parameters=None
    else:
        pipeline_parameters=model.pipeline_parameters

    print(model.pipeline_parameters)
Exemple #15
0
def main(raw_args=None):
    """
    Main function to run the code. Can either take the raw_args in argument or get the arguments from the config_file.
    """

    #-----------------------------------------------------------------------------------------
    ### First, set the parameters of the function, including the config file, log directory and the seed.
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_file', required=True,
                        type=str,help = 'path to the config file for the training')
    parser.add_argument('--logdir', required=True,
                        type=str,help = 'path to the directory containing all run folders')
    parser.add_argument('--num_workers', type=int, default=4,
                        help='dataloader threads')
    parser.add_argument('--seed', type=int, default=np.random.randint(2**32 - 1),
                        help='the seed for reproducing experiments')
    parser.add_argument('--debug', action='store_true', default=False,
                        help='whether to debug or not')
    args = parser.parse_args(raw_args)

    print("SEED used is ", args.seed) # Allows to quickly know if you set the wrong seed
    torch.manual_seed(args.seed) # the seed for every torch calculus
    np.random.seed(args.seed) # the seed for every numpy calculus
    #-----------------------------------------------------------------------------------------
    ### Prepare the log by adding the config with runtime and seed
    with open(args.config_file) as json_file:
        config = json.load(json_file)

    config['runtime']= {}
    config['runtime']['num_workers'] = args.num_workers
    config['dataset']['num_workers'] = args.num_workers
    config['runtime']['SEED'] = args.seed

    if not os.path.exists(args.logdir):
        os.mkdir(args.logdir)

    time = datetime.datetime.today()
    log_id = '{}_{}h{}min'.format(time.date(), time.hour, time.minute)
    log_path = os.path.join(args.logdir,log_id)
    i = 0
    created = False
    while not created:
        try:
            os.mkdir(log_path)
            created = True
        except:
            i += 1
            log_id ='{}_{}h{}min_{}'.format(time.date(), time.hour, time.minute, i)
            log_path = os.path.join(args.logdir,log_id)
    with open(os.path.join(log_path,'config.json'), 'w') as file:
        json.dump(config, file)


    #-----------------------------------------------------------------------------------------
    ### Get the parameters according to the configuration
    config = ObjFromDict(config)

    model = get_model(config.model)

    data_path = config.dataset.root
    print('data_path ', data_path)

    # fix the seed for the split
    split_seed = 0
    np.random.seed(split_seed)

    image_dir = os.listdir(os.path.join(data_path,'Training Batch 1')) + os.listdir(os.path.join(data_path,'Training Batch 2'))
    all_indexes = [ int(file_name[7:-4]) for file_name in image_dir if 'volume' in file_name]
    split = np.random.permutation(all_indexes)
    n_train, n_val, n_test = int(0.8 * len(split)), int(0.1 * len(split)), int(0.1 * len(split))

    train = split[: n_train]
    val = split[n_train : n_train+n_val]
    test = split[n_train + n_val :]

    with open(os.path.join(log_path,'splits.json'), 'w+') as file:
        json.dump({
            "train": train.tolist(),
            "val": val.tolist(),
            "test": test.tolist()
        }, file)

    # reset the previous seed
    torch.manual_seed(args.seed) # the seed for every torch calculus
    np.random.seed(args.seed)

    # Setup Data Loader
    if args.debug:
        train_dataset = LiTSDataset(data_path, train[:1], no_tumor=True,
                                    augment=None,
                                    aug_parameters=vars(config.dataset.aug_parameters),
                                    bounding_box=config.dataset.bounding_box,
                                    spacing=config.dataset.spacing,
                                    physical_reference_size=config.dataset.physical_reference_size)
        val_dataset = LiTSDataset(data_path, train[:1], no_tumor=True,
                                  aug_parameters=vars(config.dataset.aug_parameters),
                                  bounding_box=config.dataset.bounding_box,
                                  spacing=config.dataset.spacing,
                                  physical_reference_size=config.dataset.physical_reference_size)
        test_dataset = LiTSDataset(data_path, train[:1], no_tumor=True,
                                   aug_parameters=vars(config.dataset.aug_parameters),
                                   bounding_box=config.dataset.bounding_box,
                                   spacing=config.dataset.spacing,
                                   physical_reference_size=config.dataset.physical_reference_size)
    else :
        train_dataset = LiTSDataset(data_path, train, augment=True, no_tumor=True,

                                    aug_parameters=vars(config.dataset.aug_parameters),
                                    bounding_box=config.dataset.bounding_box,
                                    spacing=config.dataset.spacing,
                                    physical_reference_size=config.dataset.physical_reference_size
                                    )
        val_dataset = LiTSDataset(data_path, val, no_tumor=True,
                                  aug_parameters=vars(config.dataset.aug_parameters),
                                  bounding_box=config.dataset.bounding_box,
                                  spacing=config.dataset.spacing,
                                  physical_reference_size=config.dataset.physical_reference_size
                                  )
        test_dataset = LiTSDataset(data_path, test, no_tumor=True,
                                   aug_parameters=vars(config.dataset.aug_parameters),
                                   bounding_box=config.dataset.bounding_box,
                                   spacing=config.dataset.spacing,
                                   physical_reference_size=config.dataset.physical_reference_size)


    train_dataloader = DataLoader(dataset=train_dataset, num_workers=config.dataset.num_workers,
                                  batch_size=config.training.batch_size, shuffle=True)
    val_dataloader = DataLoader(dataset=val_dataset, num_workers=config.dataset.num_workers,
                                batch_size=config.training.batch_size, shuffle=False)
    test_dataloader  = DataLoader(dataset=test_dataset,  num_workers=config.dataset.num_workers,
                                  batch_size=config.training.batch_size, shuffle=False)

    # Compute on gpu or cpu
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)

    # trainable parameters
    params = [p for p in model.parameters() if p.requires_grad]
    # optimizer and learning rate
    optimizer = torch.optim.Adam(params, lr=config.optimizer.learning_rate,
                                 weight_decay=config.optimizer.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=config.optimizer.lr_scheduler.step_size,
                                                   gamma=config.optimizer.lr_scheduler.gamma)
    # tensorboard logs
    writer = SummaryWriter(log_path)
    best_scores = {}
    best_scores['validation_dice'] = -1
    #-----------------------------------------------------------------------------------------
    ### Now, we can run the training
    for epoch in range(config.training.epochs):
        writer = train_one_epoch(config, model, optimizer, train_dataloader, device, epoch, writer, freq_print=10000)
        writer, eval_score = evaluate(config, model, val_dataloader, device, epoch, writer)
        lr_scheduler.step()

        if eval_score['validation_dice'] > best_scores['validation_dice']:
            torch.save(model.state_dict(), os.path.join(log_path,'best_{}.pth'.format('validation_dice')))
            best_scores['validation_dice'] = eval_score['validation_dice']
        elif epoch % 3 == 0 or epoch == config.training.epochs - 1:
            torch.save(model.state_dict(), os.path.join(log_path, 'epoch_{}.pth'.format(epoch)))

    writer.close()

    return best_scores
Exemple #16
0
if __name__ == "__main__":
    embedding, train_iter, valid_iter, test_iter = get_dataset(
        params.dataset, rate=params.noise_ratio, batch_size=params.batch_size)
    model_params = {}
    if params.dataset == "IMDB":
        model_params["output_size"] = 2
        model_params["hidden_size"] = 256

    if params.model == "LSTM" or params.model == "LSTMATT":
        model_params["weights"] = embedding

    if params.dataset == "CIFAR100":
        model_params["num_classes"] = 100

    model = get_model(params.model, **model_params)
    model.to(device)

    if params.optimizer == "laprop":
        optim = VLaProp(model.parameters(),
                        lr=params.lr,
                        eps=1e-15,
                        v=params.v,
                        alpha=params.alpha,
                        auto_v=params.auto,
                        weight_decay=params.weight_decay)
    elif params.optimizer == "adam":
        optim = Vadam2(model.parameters(),
                       lr=params.lr,
                       eps=1e-15,
                       v=params.v,
Exemple #17
0
def model_analysis(config, device, setting=1.0, debug=False):
    device = torch.device(device if torch.cuda.is_available() else 'cpu')

    # BoilerPlate
    custom_module_mapping = {
        USConv2d: usconv_flops_counter_hook,
        USConv2dStaticSamePadding: usconv_flops_counter_hook,
        USBatchNorm2d: bn_flops_counter_hook,
    }

    student_model_config = config['student_model']
    if setting == 'Teacher':
        width = 1.0
    else:
        width = float(setting)

    num_bits = config['student_model']['bottleneck_transformer']['components'][
        'quantizer']['params']['num_bits']

    # Build Model
    student_model = get_model(student_model_config, device, strict=False)
    encoder = full_encoder(student_model, student_model_config)

    # Analyze
    results = {}
    size = student_model.input_size
    jpeg_size = jpeg_size_example(size)
    print("Width = {}".format(width))
    set_width(student_model, width)

    def input_constructor(input_res):
        batch = torch.rand((1, *input_res),
                           dtype=next(student_model.parameters()).dtype,
                           device=device)
        return {"images": batch}

    with hide_prints(debug)():
        encoder.use_encoder = False
        macs_base_encoder, params_base_encoder = get_model_complexity_info(
            encoder, (3, size, size),
            as_strings=False,
            print_per_layer_stat=False,
            input_constructor=input_constructor,
            custom_modules_hooks=custom_module_mapping)
        encoder.use_encoder = True
        macs_encoder, params_encoder = get_model_complexity_info(
            encoder, (3, size, size),
            as_strings=False,
            print_per_layer_stat=False,
            verbose=False,
            input_constructor=input_constructor,
            custom_modules_hooks=custom_module_mapping)

        macs_full, params_full = get_model_complexity_info(
            student_model, (3, size, size),
            as_strings=False,
            print_per_layer_stat=False,
            verbose=False,
            input_constructor=input_constructor,
            custom_modules_hooks=custom_module_mapping)

    ### Hotfix??? ####
    params_encoder = sum(p.numel() for p in encoder.encoder.parameters()
                         if p.requires_grad)
    params_encoder += params_base_encoder
    ####################

    results['input_size'] = size
    results["jpeg_size"] = jpeg_size
    results["output_shape"] = [int(x) for x in encoder.output_shape]
    results["compression"] = np.prod(
        results["output_shape"]) / (size * size * 3)
    results['output_size'] = np.prod(results["output_shape"]) * num_bits / 8.0
    results["jpeg_compression"] = jpeg_size / (size * size * 3)
    results["macs_base_encoder"] = macs_base_encoder
    results["params_base_encoder"] = params_base_encoder
    results["macs_compressor"] = macs_encoder - macs_base_encoder
    results["params_compressor"] = params_encoder - params_base_encoder
    results["macs_decoder"] = macs_full - macs_encoder
    results["params_decoder"] = params_full - params_encoder

    if setting == 'Teacher':
        results["output_shape"] = [
            int(x) for x in encoder.original_output_shape
        ]
        results["compression"] = np.prod(
            results["output_shape"]) / (size * size * 3)
        results["macs_compressor"] = 0.0
        results["params_compressor"] = 0.0

    del student_model
    del encoder

    return deepcopy(results)
Exemple #18
0
            videos.append(folder)
    videos.sort()

    # set the visualizer
    my_visualizer = Visualizer(n_workers=getattr(opt, 'vis_workers', 4),
                               param_f=getattr(opt, 'vis_param_f', None))

    # set the models to use
    # first, set the Spherical map rendering
    render_spherical_map = render_spherical().to(device)
    print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    print('The model for rendering spherical maps has been set successfully')
    # second, set the genre full model. In this program, use the second model which
    # is spherical map inpainting net, to inpaint the spherical map, and use the refine
    # model which refines the voxels to get the final shape
    Full_model = models.get_model(opt.net, test=True)
    full_model = Full_model(opt, logger)
    full_model.to(device)
    full_model.eval()
    print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    print('The model for inpainting sphercial maps has been set successfully')
    print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    print('The model for refining voxels has been set successfully')

    # Process for each video
    print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    print("Process videos")
    print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    for video in videos:

        start = time.time()
    # for some number of iterations
    for sample_no in range(search_range.num_samples):
        # build argparse args by parsing args and then setting empty fields to specified ones above
        train_parser = util.get_train_parser()
        train_args = train_parser.parse_args([
            '--model_name', search_range.model_name, '--dataset',
            search_range.dataset, '--env_name', search_range.env_name,
            '--country', search_range.country
        ])
        generate_hps(train_args, search_range)
        train_args.epochs = search_range.epochs
        dataloaders = datasets.get_dataloaders(train_args.country,
                                               train_args.dataset, train_args)

        model = models.get_model(**vars(train_args))
        model.to(train_args.device)
        experiment_name = f"model:{train_args.model_name}_dataset:{train_args.dataset}_epochs:{search_range.epochs}_sample_no:{sample_no}"

        train_args.name = experiment_name
        print("=" * 100)
        print(f"TRAINING: {experiment_name}")
        for hp in hps:
            print(hp, train_args.__dict__[hp])
        try:
            train.train(model,
                        train_args.model_name,
                        train_args,
                        dataloaders=dataloaders)
            print("FINISHED TRAINING")
            for state_dict_name in os.listdir(train_args.save_dir):
Exemple #20
0
 def setUp(self):
     self.history_model = hmodels.get_model('historical', "HistoricalTestHistorySimple")
Exemple #21
0
def check_models():
    for model_name in MODELS_NAMES:
        # Try loading the model
        get_model(model_name)
Exemple #22
0
    def batches(self):
        lens = self.group_by_len()
        for l, ex in lens.items():
            yield l, ex

mydir = os.path.dirname(os.path.abspath(__file__))

if __name__ == '__main__':
    root = os.path.join(mydir, 'experiments', 'deploy')
    config = Config.load(os.path.join(root, 'config.json'))
    with open(os.path.join(root, 'featurizer.pkl')) as f:
        featurizer = pkl.load(f)
    typechecker = TypeCheckAdaptor(os.path.join(mydir, 'data', 'raw', 'typecheck.csv'), featurizer.vocab)

    model = get_model(config, featurizer.vocab, typechecker)
    model.load_weights(os.path.join(root, 'best_weights'))

    dev_generator = KBPDataAdaptor().online_to_examples(disable_interrupts='victor'!=os.environ['USER'])
    cache = Cache()
    max_cache_size = 2**15
    log = open(os.path.join(mydir, 'kbp.log'), 'wb')

    def process_cache(cache):
        for length, examples in cache.batches():
            X, Y, types = featurizer.to_matrix(examples)
            prob = model.predict(X, verbose=0)['p_relation']
            prob *= typechecker.get_valid_cpu(types[:, 0], types[:, 1])
            pred = prob.argmax(axis=1)
            confidence = np_softmax(prob)[np.arange(len(pred)), pred]
            for ex, rel, conf in zip(examples, pred, confidence):
Exemple #23
0
 def test_recomputation_pipelined_model_by_name(self):
     args = TestRecomputation.default_args()
     args.pipeline_splits = ["layer2"]
     args.recompute_checkpoints = ["layer2/0/conv2"]
     model = models.get_model(args, {"out": 1000}, pretrained=False)
     TestRecomputation.check_recompute(model, "layer2/0/conv2")
Exemple #24
0
def parse_arguments():
    common_parser = utils.get_common_parser()
    parser = argparse.ArgumentParser(parents=[common_parser], description='CNN inference in PopTorch')
    parser.add_argument('--data', choices=['real', 'synthetic'], default='real', help="Choose data")
    parser.add_argument('--iterations', type=int, default=100, help='number of program iterations')
    parser.add_argument('--precision', choices=['16.16', '32.32'], default='16.16', help="Precision of Ops(weights/activations/gradients) and Master data types: 16.16, 32.32")
    opts = utils.parse_with_config(parser, "configs.yml")
    return opts


if __name__ == '__main__':
    opts = parse_arguments()
    utils.Logger.setup_logging_folder(opts)
    model_opts = poptorch.Options()
    model_opts.replicationFactor(opts.replicas)
    model_opts.deviceIterations(opts.device_iterations)

    dataloader = data.get_data(opts, model_opts, train=False, async_dataloader=False)
    model = models.get_model(opts, data.datasets_info[opts.data], pretrained=True)
    model.eval()

    if opts.data == "synthetic":
        model_opts.Popart.set("syntheticDataMode", 2)
    if opts.half_partial:
        model_opts.Popart.set("partialsTypeMatMuls", "half")
        model_opts.Popart.set("convolutionOptions", {'partialsType': 'half'})

    inference_model = poptorch.inferenceModel(model, model_opts)
    benchmark(inference_model, dataloader, opts)
Exemple #25
0
 def test_recomputation_normalized_model_by_name(self):
     args = TestRecomputation.default_args()
     args.normalization_location = "ipu"
     args.recompute_checkpoints = ["layer2/0/conv2"]
     model = models.get_model(args, {"out": 1000}, pretrained=False)
     TestRecomputation.check_recompute(model.model, "layer2/0/conv2")
def train_and_validate(config):

    # data loaders
    trainloader, testloader = get_dataloaders(config)

    # model
    bn_types = ['base']
    if config.perturb_vae: bn_types.append('texture')
    if config.aug_stn: bn_types.append('stn')
    if config.deform_vae: bn_types.append('deform')

    # if config.bn_num == 1:
    #     target_net = get_model(config, num_class(config.dataset))
    # else:
    target_net = get_model(config,
                           num_class(config.dataset),
                           bn_types=bn_types)

    model = Augment(target_net=target_net, config=config)

    start_epoch = 0
    best_test_acc = 0.0
    test_acc = 0.0
    if config.resume:
        best_test_acc, test_acc, start_epoch = \
            utils.load_checkpoint(config, model.target_net, model.target_net_optim)

    print('trainloader length: {}'.format(len(trainloader)))
    print('testloader length: {}'.format(len(testloader)))

    exp_dir = utils.get_log_dir_path(config.exp_dir, config.exp_id)
    if not os.path.exists(exp_dir):
        os.makedirs(exp_dir)

    print('exp_dir: {}'.format(exp_dir))
    log_file = os.path.join(exp_dir, 'log.txt')
    names = ['epoch', 'lr', 'Train Acc', 'Test Acc', 'Best Test Acc']
    with open(log_file, 'a') as f:
        f.write('batch size: {}\n'.format(config.batch_size))
        f.write('lr: {}\n'.format(config.lr))
        f.write('momentum: {}\n'.format(config.momentum))
        f.write('weight_decay: {}\n'.format(config.weight_decay))
        for per_name in names:
            f.write(per_name + '\t')
        f.write('\n')
    # print('=> Training the base model')
    # print('start_epoch {}'.format(start_epoch))
    # print(type(start_epoch))
    # exit()
    print('target net grad clip: {}'.format(config.grad_clip))
    for epoch in range(start_epoch, config.epochs):
        # lr = adjust_learning_rate(optimizer, epoch, model.module, config)
        lr = model.target_net_optim.param_groups[0]['lr']
        print('lr: {}'.format(lr))
        # inner_lr = get_lr_cosine_decay(config, epoch)
        # print('inner_lr: {}'.format(inner_lr))
        # train for one epoch
        train_acc = train_epoch_multi_bns(trainloader, model, epoch, config)
        # evaluate on test set
        # print('testing epoch ...')
        test_acc = validate_epoch(testloader, model, config)
        # remember best acc, evaluate on test set and save checkpoint
        is_best = test_acc > best_test_acc
        if is_best:
            best_test_acc = test_acc

        utils.save_checkpoint(
            model, {
                'epoch':
                epoch + 1,
                'state_dict':
                model.target_net.state_dict(),
                'optimizer':
                model.target_net_optim.state_dict(),
                'perturb_vae_state_dict':
                model.perturb_vae.state_dict() if model.perturb_vae else None,
                'perturb_vae_optimizer':
                model.perturb_vae_optim.state_dict()
                if model.perturb_vae else None,
                'aug_stn_state_dict':
                model.aug_stn.state_dict() if model.aug_stn else None,
                'aug_stn_optimizer':
                model.aug_stn_optim.state_dict() if model.aug_stn else None,
                'deform_vae_state_dict':
                model.deform_vae.state_dict() if model.deform_vae else None,
                'deform_vae_optimizer':
                model.deform_vae_optim.state_dict()
                if model.deform_vae else None,
                'test_acc':
                test_acc,
                'best_test_acc':
                best_test_acc,
            }, is_best, exp_dir)

        values = [train_acc, test_acc, best_test_acc]
        with open(log_file, 'a') as f:
            f.write('{:d}\t'.format(epoch))
            f.write('{:g}\t'.format(lr))
            for per_value in values:
                f.write('{:2.2f}\t'.format(per_value))
            f.write('\n')
        print('exp_dir: {}'.format(exp_dir))
Exemple #27
0
def main():
    # Handle parameters
    args = util.get_args()

    # Select gpu
    device = torch.device(args.device)
    args.device = device

    # Load data
    train_loader, val_loader, test_loader = util.get_dataloader(args)

    train_dataloader, train_val_dataloader = train_loader
    val_dataloader, val_val_dataloader = val_loader
    test_dataloader, test_val_dataloader = test_loader

    args.train_size, args.nSeries = train_dataloader.dataset.X.shape
    args.val_size, args.val_nSeries = val_dataloader.dataset.X.shape
    args.test_size, args.test_nSeries = test_dataloader.dataset.X.shape

    # Create logger
    logger = util.Logger(args)

    # Display arguments
    util.print_args(args)

    # Create model
    model = models.get_model(args)

    # Create imputation engine

    engine = util.ImpEngine.from_args(model, scaler=None, args=args)

    # Training

    if args.impset == 'train':
        data_loader = train_dataloader
        val_loader = train_val_dataloader
    elif args.impset == 'val':
        data_loader = val_dataloader
        val_loader = val_val_dataloader
    elif args.impset == 'test':
        data_loader = test_dataloader
        val_loader = test_val_dataloader
    else:
        raise NotImplementedError

    if not args.test:
        iterator = trange(args.num_epoch)

        try:
            if os.path.isfile(logger.best_model_save_path):
                print('Model checkpoint exist!')
                print('Load model checkpoint? (y/n)')
                _in = input()
                if _in == 'y' or _in == 'yes':
                    print('Loading model...')
                    engine.model.load_state_dict(
                        torch.load(logger.best_model_save_path))
                else:
                    print('Training new model')

            for epoch in iterator:
                loss = engine.train(data_loader)
                engine.scheduler.step()
                with torch.no_grad():
                    # metrics = (val_loss, rse, mae, mape, mse, rmse)
                    Xhat_val, val_metrics = engine.validation(
                        data_loader, val_loader)

                    m = dict(train_loss=loss,
                             val_loss=val_metrics[0],
                             val_rse=val_metrics[1],
                             val_mae=val_metrics[2],
                             val_mape=val_metrics[3],
                             val_mse=val_metrics[4],
                             val_rmse=val_metrics[5])

                # report stats
                description = logger.summary(m, engine.model)
                if logger.stop:
                    break
                description = 'Epoch: {} '.format(epoch) + description
                iterator.set_description(description)

        except KeyboardInterrupt:
            pass

    # data recovery
    engine.model.load_state_dict(torch.load(logger.best_model_save_path))
    with torch.no_grad():
        # metrics = (rse, mae, mape, mse, rmse)
        imp_X, metrics, metrics_li = engine.imputation(data_loader)

        m = dict(imp_rse=metrics[0],
                 imp_mae=metrics[1],
                 imp_mape=metrics[2],
                 imp_mse=metrics[3],
                 imp_rmse=metrics[4])
        # m_li = dict(imp_rse=metrics_li[0], imp_mae=metrics_li[1], imp_mape=metrics_li[2], imp_mse=metrics_li[3],
        #             imp_rmse=metrics_li[4])

        logger.imputation_summary(m=m,
                                  X=data_loader.dataset.X,
                                  imp_X=imp_X,
                                  W=data_loader.dataset.W,
                                  save_imp=True)
Exemple #28
0
def train(args):
    # parse config
    config = parse_config(args.config)
    train_config = merge_configs(config, 'train', vars(args))
    valid_config = merge_configs(config, 'valid', vars(args))
    print_configs(train_config, 'Train')
    train_model = models.get_model(args.model_name, train_config, mode='train')
    valid_model = models.get_model(args.model_name, valid_config, mode='valid')

    # build model
    startup = fluid.Program()
    train_prog = fluid.Program()
    if args.enable_ce:
        startup.random_seed = 1000
        train_prog.random_seed = 1000
    with fluid.program_guard(train_prog, startup):
        with fluid.unique_name.guard():
            train_model.build_input(not args.no_use_pyreader)
            train_model.build_model()
            # for the input, has the form [data1, data2,..., label], so train_feeds[-1] is label
            train_feeds = train_model.feeds()
            train_feeds[-1].persistable = True
            # for the output of classification model, has the form [pred]
            # for the output of detection model, has the form [loc_pred, cls_pred]
            train_outputs = train_model.outputs()
            for output in train_outputs:
                output.persistable = True
            train_losses = train_model.loss()
            if isinstance(train_losses, list) or isinstance(
                    train_losses, tuple):
                # for detection model, train_losses has the form [total_loss, loc_loss, cls_loss]
                train_loss = train_losses[0]
                for item in train_losses:
                    item.persistable = True
            else:
                train_loss = train_losses
                train_loss.persistable = True
            # outputs, loss, label should be fetched, so set persistable to be true
            optimizer = train_model.optimizer()
            optimizer.minimize(train_loss)
            train_pyreader = train_model.pyreader()

    valid_prog = fluid.Program()
    with fluid.program_guard(valid_prog, startup):
        with fluid.unique_name.guard():
            valid_model.build_input(not args.no_use_pyreader)
            valid_model.build_model()
            valid_feeds = valid_model.feeds()
            # for the output of classification model, has the form [pred]
            # for the output of detection model, has the form [loc_pred, cls_pred]
            valid_outputs = valid_model.outputs()
            valid_losses = valid_model.loss()
            valid_pyreader = valid_model.pyreader()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup)

    if args.resume:
        # if resume weights is given, load resume weights directly
        assert os.path.exists(args.resume), \
                "Given resume weight dir {} not exist.".format(args.resume)

        def if_exist(var):
            return os.path.exists(os.path.join(args.resume, var.name))

        fluid.io.load_vars(exe,
                           args.resume,
                           predicate=if_exist,
                           main_program=train_prog)
    else:
        # if not in resume mode, load pretrain weights
        if args.pretrain:
            assert os.path.exists(args.pretrain), \
                    "Given pretrain weight dir {} not exist.".format(args.pretrain)
        pretrain = args.pretrain or train_model.get_pretrain_weights()
        if pretrain:
            train_model.load_pretrain_params(exe, pretrain, train_prog, place)

    build_strategy = fluid.BuildStrategy()
    build_strategy.enable_inplace = True
    if args.model_name in ['CTCN']:
        build_strategy.enable_sequential_execution = True
    #build_strategy.memory_optimize = True

    train_exe = fluid.ParallelExecutor(use_cuda=args.use_gpu,
                                       loss_name=train_loss.name,
                                       main_program=train_prog,
                                       build_strategy=build_strategy)
    valid_exe = fluid.ParallelExecutor(use_cuda=args.use_gpu,
                                       share_vars_from=train_exe,
                                       main_program=valid_prog)

    # get reader
    bs_denominator = 1
    if (not args.no_use_pyreader) and args.use_gpu:
        bs_denominator = train_config.TRAIN.num_gpus
    train_config.TRAIN.batch_size = int(train_config.TRAIN.batch_size /
                                        bs_denominator)
    valid_config.VALID.batch_size = int(valid_config.VALID.batch_size /
                                        bs_denominator)
    train_reader = get_reader(args.model_name.upper(), 'train', train_config)
    valid_reader = get_reader(args.model_name.upper(), 'valid', valid_config)

    # get metrics
    train_metrics = get_metrics(args.model_name.upper(), 'train', train_config)
    valid_metrics = get_metrics(args.model_name.upper(), 'valid', valid_config)

    if isinstance(train_losses, tuple) or isinstance(train_losses, list):
        # for detection
        train_fetch_list = [item.name for item in train_losses] + \
                [x.name for x in train_outputs] + [train_feeds[-1].name]
        valid_fetch_list = [item.name for item in valid_losses] + \
                [x.name for x in valid_outputs] + [valid_feeds[-1].name]
    else:
        # for classification
        train_fetch_list = [train_losses.name] + [
            x.name for x in train_outputs
        ] + [train_feeds[-1].name]
        valid_fetch_list = [valid_losses.name] + [
            x.name for x in valid_outputs
        ] + [valid_feeds[-1].name]

    epochs = args.epoch or train_model.epoch_num()

    if args.no_use_pyreader:
        train_feeder = fluid.DataFeeder(place=place, feed_list=train_feeds)
        valid_feeder = fluid.DataFeeder(place=place, feed_list=valid_feeds)
        train_without_pyreader(exe,
                               train_prog,
                               train_exe,
                               train_reader,
                               train_feeder,
                               train_fetch_list,
                               train_metrics,
                               epochs=epochs,
                               log_interval=args.log_interval,
                               valid_interval=args.valid_interval,
                               save_dir=args.save_dir,
                               save_model_name=args.model_name,
                               test_exe=valid_exe,
                               test_reader=valid_reader,
                               test_feeder=valid_feeder,
                               test_fetch_list=valid_fetch_list,
                               test_metrics=valid_metrics)
    else:
        train_pyreader.decorate_paddle_reader(train_reader)
        valid_pyreader.decorate_paddle_reader(valid_reader)
        train_with_pyreader(exe,
                            train_prog,
                            train_exe,
                            train_pyreader,
                            train_fetch_list,
                            train_metrics,
                            epochs=epochs,
                            log_interval=args.log_interval,
                            valid_interval=args.valid_interval,
                            save_dir=args.save_dir,
                            save_model_name=args.model_name,
                            enable_ce=args.enable_ce,
                            test_exe=valid_exe,
                            test_pyreader=valid_pyreader,
                            test_fetch_list=valid_fetch_list,
                            test_metrics=valid_metrics)
Exemple #29
0
def train(model_type,
          num_of_epochs,
          data_set,
          img_width=150,
          optimizer_type='adam',
          print_summary=False,
          batch_size=32,
          learning_rate=5e-5,
          weight_path=None,
          fc_layers=None,
          dropout=None,
          generator='default',
          dyn_lr=False,
          initial_epoch=0,
          skip_first_stage=False):
    model, second_stage, first_stage = get_model(model_type,
                                                 img_width,
                                                 print_summary=print_summary,
                                                 fc_layers=fc_layers,
                                                 dropout=dropout)
    # Run first stage
    if first_stage is not None and second_stage is not None:
        for layer in model.layers[:-first_stage]:
            layer.trainable = False
        for layer in model.layers[-first_stage:]:
            layer.trainable = True
    # model_opt = get_optimizer(optimizer_type, learning_rate)
    if weight_path is not None and len(weight_path) > 0:
        print('[INFO] loading weights')
        model.load_weights(weight_path)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    train_generator, validation_generator = get_generators(
        generator, img_width, batch_size, model_type)

    # train the convolutional neural network
    if not skip_first_stage:
        print('[INFO] Start first stage')
        model.fit_generator(generator=train_generator,
                            epochs=2,
                            steps_per_epoch=18304 / batch_size,
                            validation_steps=3328 / batch_size,
                            validation_data=validation_generator,
                            callbacks=get_callbacks(model_type, 0.001, False),
                            initial_epoch=0)
    # Run second stage
    if first_stage is not None and second_stage is not None:
        for layer in model.layers[:second_stage]:
            layer.trainable = False
        for layer in model.layers[second_stage:]:
            layer.trainable = True

    model_opt = get_optimizer(optimizer_type, learning_rate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=model_opt,
                  metrics=['accuracy'])

    print('[INFO] Run train process')
    # train the convolutional neural network
    model.fit_generator(generator=train_generator,
                        epochs=num_of_epochs + 2,
                        steps_per_epoch=18304 / batch_size,
                        validation_steps=3328 / batch_size,
                        validation_data=validation_generator,
                        callbacks=get_callbacks(model_type, learning_rate,
                                                dyn_lr),
                        initial_epoch=2 + initial_epoch)
    print('[INFO] End train process')
Exemple #30
0
    #Load data
    test, test_bands = utils.read_jason(file='test.json', loc='../input/')
    test_X_dup = utils.rescale(test_bands)
    test_meta = test['inc_angle'].values

    tmp = dt.datetime.now().strftime("%Y-%m-%d-%H-%M")
    file_weights = '../weights/weights_current.hdf5'

    if os.path.isfile(file_weights):

        #define and load model
        nb_filters = params.nb_filters
        nb_dense = params.nb_dense
        weights_file = params.weights_file
        model = models.get_model(img_shape=(75, 75, 2),
                                 f=nb_filters,
                                 h=nb_dense)
        model.load_weights(weights_file)

        #
        batch_size = params.batch_size_test

        if params.validate_before_test:

            train, train_bands = utils.read_jason(file='train.json',
                                                  loc='../input/')
            train_X = utils.rescale(train_bands)
            train_meta = train['inc_angle'].values
            train_y = train[target].values
            print('\nPredict training data as validation: {} {}'.format(
                train_X.shape, train_meta.shape),
Exemple #31
0
def train(args):

    # Setup Dataloader
    data_loader = get_loader('doc3dwc')
    data_path = args.data_path
    t_loader = data_loader(data_path,
                           is_transform=True,
                           img_size=(args.img_rows, args.img_cols),
                           augmentations=False)
    v_loader = data_loader(data_path,
                           is_transform=True,
                           split='val',
                           img_size=(args.img_rows, args.img_cols))

    n_classes = t_loader.n_classes
    trainloader = data.DataLoader(t_loader,
                                  batch_size=args.batch_size,
                                  num_workers=8,
                                  shuffle=True)
    valloader = data.DataLoader(v_loader,
                                batch_size=args.batch_size,
                                num_workers=8)

    # Setup Model
    model = get_model(args.arch, n_classes, in_channels=3)
    model = torch.nn.DataParallel(model,
                                  device_ids=range(torch.cuda.device_count()))
    model.cuda()

    # Activation
    htan = nn.Hardtanh(0, 1.0)

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.l_rate,
                                 weight_decay=5e-4,
                                 amsgrad=True)

    # LR Scheduler
    sched = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       mode='min',
                                                       factor=0.5,
                                                       patience=5,
                                                       verbose=True)

    # Losses
    MSE = nn.MSELoss()
    loss_fn = nn.L1Loss()
    gloss = grad_loss.Gradloss(window_size=5, padding=2)

    epoch_start = 0
    if args.resume is not None:
        if os.path.isfile(args.resume):
            print("Loading model and optimizer from checkpoint '{}'".format(
                args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['model_state'])
            # optimizer.load_state_dict(checkpoint['optimizer_state'])
            print("Loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            epoch_start = checkpoint['epoch']
        else:
            print("No checkpoint found at '{}'".format(args.resume))

    # Log file:
    if not os.path.exists(args.logdir):
        os.makedirs(args.logdir)
    # activation_dataset_lossparams_augmentations_trainstart
    experiment_name = 'htan_doc3d_l1grad_bghsaugk_scratch'
    log_file_name = os.path.join(args.logdir, experiment_name + '.txt')
    if os.path.isfile(log_file_name):
        log_file = open(log_file_name, 'a')
    else:
        log_file = open(log_file_name, 'w+')

    log_file.write('\n---------------  ' + experiment_name +
                   '  ---------------\n')
    log_file.close()

    # Setup tensorboard for visualization
    if args.tboard:
        # save logs in runs/<experiment_name>
        writer = SummaryWriter(comment=experiment_name)

    best_val_mse = 99999.0
    global_step = 0

    for epoch in range(epoch_start, args.n_epoch):
        avg_loss = 0.0
        avg_l1loss = 0.0
        avg_gloss = 0.0
        train_mse = 0.0
        model.train()

        for i, (images, labels) in enumerate(trainloader):
            images = Variable(images.cuda())
            labels = Variable(labels.cuda())

            optimizer.zero_grad()
            outputs = model(images)
            pred = htan(outputs)
            g_loss = gloss(pred, labels)
            l1loss = loss_fn(pred, labels)
            loss = l1loss  # +(0.2*g_loss)
            avg_l1loss += float(l1loss)
            avg_gloss += float(g_loss)
            avg_loss += float(loss)
            train_mse += float(MSE(pred, labels).item())

            loss.backward()
            optimizer.step()
            global_step += 1

            if (i + 1) % 10 == 0:
                print("Epoch[%d/%d] Batch [%d/%d] Loss: %.4f" %
                      (epoch + 1, args.n_epoch, i + 1, len(trainloader),
                       avg_loss / 10.0))
                avg_loss = 0.0

            if args.tboard and (i + 1) % 10 == 0:
                show_wc_tnsboard(global_step, writer, images, labels, pred, 8,
                                 'Train Inputs', 'Train WCs',
                                 'Train Pred. WCs')
                writer.add_scalars(
                    'Train', {
                        'WC_L1 Loss/train': avg_l1loss / (i + 1),
                        'WC_Grad Loss/train': avg_gloss / (i + 1)
                    }, global_step)

        train_mse = train_mse / len(trainloader)
        avg_l1loss = avg_l1loss / len(trainloader)
        avg_gloss = avg_gloss / len(trainloader)
        print("Training L1:%4f" % (avg_l1loss))
        print("Training MSE:'{}'".format(train_mse))
        train_losses = [avg_l1loss, train_mse, avg_gloss]

        lrate = get_lr(optimizer)
        write_log_file(experiment_name, train_losses, epoch + 1, lrate,
                       'Train')

        model.eval()
        val_loss = 0.0
        val_mse = 0.0
        val_bg = 0.0
        val_fg = 0.0
        val_gloss = 0.0
        val_dloss = 0.0
        for i_val, (images_val, labels_val) in tqdm(enumerate(valloader)):
            with torch.no_grad():
                images_val = Variable(images_val.cuda())
                labels_val = Variable(labels_val.cuda())

                outputs = model(images_val)
                pred_val = htan(outputs)
                g_loss = gloss(pred_val, labels_val).cpu()
                pred_val = pred_val.cpu()
                labels_val = labels_val.cpu()
                loss = loss_fn(pred_val, labels_val)
                val_loss += float(loss)
                val_mse += float(MSE(pred_val, labels_val))
                val_gloss += float(g_loss)

        val_loss = val_loss / len(valloader)
        val_mse = val_mse / len(valloader)
        val_gloss = val_gloss / len(valloader)
        print("val loss at epoch {}:: {}".format(epoch + 1, val_loss))
        print("val MSE: {}".format(val_mse))

        if args.tboard:
            show_wc_tnsboard(epoch + 1, writer, images_val, labels_val, pred,
                             8, 'Val Inputs', 'Val WCs', 'Val Pred. WCs')
            writer.add_scalars('L1', {
                'L1_Loss/train': avg_l1loss,
                'L1_Loss/val': val_loss
            }, epoch + 1)
            writer.add_scalars('GLoss', {
                'Grad Loss/train': avg_gloss,
                'Grad Loss/val': val_gloss
            }, epoch + 1)

        val_losses = [val_loss, val_mse, val_gloss]
        write_log_file(experiment_name, val_losses, epoch + 1, lrate, 'Val')

        # reduce learning rate
        sched.step(val_mse)

        if val_mse < best_val_mse:
            best_val_mse = val_mse
            state = {
                'epoch': epoch + 1,
                'model_state': model.state_dict(),
                'optimizer_state': optimizer.state_dict(),
            }
            torch.save(
                state, args.logdir + "{}_{}_{}_{}_{}_best_model.pkl".format(
                    args.arch, epoch + 1, val_mse, train_mse, experiment_name))

        if (epoch + 1) % 10 == 0:
            state = {
                'epoch': epoch + 1,
                'model_state': model.state_dict(),
                'optimizer_state': optimizer.state_dict(),
            }
            torch.save(
                state, args.logdir + "{}_{}_{}_{}_{}_model.pkl".format(
                    args.arch, epoch + 1, val_mse, train_mse, experiment_name))
################################################################## REPRODUCIBILITY

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)

################################################################## MODEL LOADING

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if torch.cuda.is_available():
    select_devices(num_gpus_to_use=args.n_gpus)

model = get_model(args.model)

if torch.cuda.is_available():
    model = model.cuda()
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
model.to(device)

if args.checkpoint is None:
    args.checkpoint = args.model

################################################################## TRAINING HYPERPARAMETERS

trainloader, testloader = get_cifar_loaders(args.data_loc)
optimizer = optim.SGD(
    [w for name, w in model.named_parameters() if not "mask" in name],
Exemple #33
0
def test(args, img_path, fname):
    wc_model_file_name = os.path.split(args.wc_model_path)[1]
    wc_model_name = wc_model_file_name[:wc_model_file_name.find('_')]

    bm_model_file_name = os.path.split(args.bm_model_path)[1]
    bm_model_name = bm_model_file_name[:bm_model_file_name.find('_')]

    wc_n_classes = 3
    bm_n_classes = 2

    wc_img_size = (256, 256)
    bm_img_size = (128, 128)

    # Setup image
    print("Read Input Image from : {}".format(img_path))
    imgorg = cv2.imread(img_path)
    imgorg = cv2.cvtColor(imgorg, cv2.COLOR_BGR2RGB)
    img = cv2.resize(imgorg, wc_img_size)
    img = img[:, :, ::-1]
    img = img.astype(float) / 255.0
    img = img.transpose(2, 0, 1)  # NHWC -> NCHW
    img = np.expand_dims(img, 0)
    img = torch.from_numpy(img).float()

    # Predict
    htan = nn.Hardtanh(0, 1.0)
    wc_model = get_model(wc_model_name, wc_n_classes, in_channels=3)
    if DEVICE.type == 'cpu':
        wc_state = convert_state_dict(
            torch.load(args.wc_model_path, map_location='cpu')['model_state'])
    else:
        wc_state = convert_state_dict(
            torch.load(args.wc_model_path)['model_state'])
    wc_model.load_state_dict(wc_state)
    wc_model.eval()
    bm_model = get_model(bm_model_name, bm_n_classes, in_channels=3)
    if DEVICE.type == 'cpu':
        bm_state = convert_state_dict(
            torch.load(args.bm_model_path, map_location='cpu')['model_state'])
    else:
        bm_state = convert_state_dict(
            torch.load(args.bm_model_path)['model_state'])
    bm_model.load_state_dict(bm_state)
    bm_model.eval()

    if torch.cuda.is_available():
        wc_model.cuda()
        bm_model.cuda()
        images = Variable(img.cuda())
    else:
        images = Variable(img)

    with torch.no_grad():
        wc_outputs = wc_model(images)
        pred_wc = htan(wc_outputs)
        bm_input = F.interpolate(pred_wc, bm_img_size)
        outputs_bm = bm_model(bm_input)

    # call unwarp
    uwpred = unwarp(imgorg, outputs_bm)

    if args.show:
        f1, axarr1 = plt.subplots(1, 2)
        axarr1[0].imshow(imgorg)
        axarr1[1].imshow(uwpred)
        plt.show()

    # Save the output
    outp = os.path.join(args.out_path, fname)
    cv2.imwrite(outp, uwpred[:, :, ::-1] * 255)
def use_model(language, domain, source, type):
    models_supported_languages[language] = True
    model = models.get_model(models.BaseModel(language=language,domain=domain, source=source, type=type))
    if not model.pipeline: model.load()
Exemple #35
0
def final_predict(models,
                  folds,
                  shape,
                  TTA=False,
                  posprocess=False,
                  swa=False,
                  minsizes=None,
                  thresholds=None,
                  fixshape=False,
                  multimodel=False):

    sub_df, test_imgs = get_test_data()
    print(test_imgs.shape[0])
    # batch_idx = list(range(test_imgs.shape[0]))
    test_df = []
    batch_pred_emsemble = []
    submission_name = ''

    for smmodel, backbone in models:
        print('Predicting {} {}'.format(smmodel, backbone))
        opt = Adam()
        model_masks = []
        submission_name = submission_name + str(smmodel) + '_' + str(
            backbone) + '_'

        for i in range(0, test_imgs.shape[0], 860):
            batch_idx = list(range(i, min(test_imgs.shape[0], i + 860)))
            fold_result = []
            batch_pred_resized = np.zeros((len(batch_idx), 350, 525, 4),
                                          dtype=np.float16)

            for i in folds:
                model = get_model(smmodel, backbone, opt, dice_coef_loss_bce,
                                  [dice_coef])

                if multimodel:
                    batch_pred_masks = predict_multimodel(
                        i, smmodel, backbone, model, batch_idx, test_imgs,
                        shape, sub_df, TTA, swa)
                else:
                    batch_pred_masks = predict_fold(i, smmodel, backbone,
                                                    model, batch_idx,
                                                    test_imgs, shape, sub_df,
                                                    TTA, swa)
                # print(np.array(batch_pred_masks).shape)

                for i in range(batch_pred_masks.shape[0]):
                    batch_pred_resized[i, :, :, :] = np_resize(
                        batch_pred_masks[i, :, :, :],
                        (350, 525)).astype(np.float16)

                del batch_pred_masks
                gc.collect()
                fold_result.append(batch_pred_resized.astype(np.float16))

            batch_pred_masks = np.mean(fold_result, axis=0, dtype=np.float16)
            del fold_result
            gc.collect()

            model_masks.extend(batch_pred_masks.astype(np.float16))
            del batch_pred_masks
            gc.collect()

        batch_pred_emsemble.append(model_masks)

        del model, model_masks
        gc.collect()

    batch_pred_emsemble = np.mean(batch_pred_emsemble,
                                  axis=0,
                                  dtype=np.float16)

    if TTA:
        submission_name += '_tta'

    save_prediction(batch_pred_emsemble, submission_name)
    batch_idx = list(range(test_imgs.shape[0]))
    # print(pred_emsemble.shape)
    batch_pred_emsemble = np.array(
        predict_postprocess(batch_idx,
                            posprocess,
                            batch_pred_emsemble,
                            shape=shape,
                            minsize=minsizes,
                            threshold=thresholds,
                            fixshape=fixshape))

    test_df = convert_masks_for_submission(batch_idx, test_imgs, sub_df,
                                           batch_pred_emsemble)
    submission_name = submission_name + '.csv'
    generate_submission(test_df, submission_name)
Exemple #36
0
     X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
     class_weight = "balanced" if CLASS_BALANCING else None
     clf = sklearn.neighbors.KNeighborsClassifier(weights="distance")
     clf = sklearn.model_selection.GridSearchCV(
         clf, {"n_neighbors": [1, 3, 5, 10, 20]}, verbose=5, n_jobs=4)
     clf.fit(X_train, y_train)
     clf.fit(X_train, y_train)
     save_model(clf, MODEL, DATASET)
     prediction = clf.predict(img.reshape(-1, N_BANDS))
     prediction = prediction.reshape(img.shape[:2])
 else:
     if CLASS_BALANCING:
         weights = compute_imf_weights(train_gt, N_CLASSES, IGNORED_LABELS)
         hyperparams["weights"] = torch.from_numpy(weights)
     # Neural network
     model, optimizer, loss, hyperparams = get_model(MODEL, **hyperparams)
     # Split train set in train/val
     train_gt, val_gt = sample_gt(train_gt, 0.95, mode="random")
     # Generate the dataset
     train_dataset = HyperX(img, train_gt, **hyperparams)
     train_loader = data.DataLoader(
         train_dataset,
         batch_size=hyperparams["batch_size"],
         # pin_memory=hyperparams['device'],
         shuffle=True,
     )
     val_dataset = HyperX(img, val_gt, **hyperparams)
     val_loader = data.DataLoader(
         val_dataset,
         # pin_memory=hyperparams['device'],
         batch_size=hyperparams["batch_size"],
def main(device, args):

    loss1_func = nn.CrossEntropyLoss()
    loss2_func = softmax_kl_loss

    dataset_kwargs = {
        'dataset': args.dataset,
        'data_dir': args.data_dir,
        'download': args.download,
        'debug_subset_size': args.batch_size if args.debug else None
    }
    dataloader_kwargs = {
        'batch_size': args.batch_size,
        'drop_last': True,
        'pin_memory': True,
        'num_workers': args.num_workers,
    }
    dataloader_unlabeled_kwargs = {
        'batch_size': args.batch_size * 5,
        'drop_last': True,
        'pin_memory': True,
        'num_workers': args.num_workers,
    }
    dataset_train = get_dataset(transform=get_aug_fedmatch(args.dataset, True),
                                train=True,
                                **dataset_kwargs)

    if args.iid == 'iid':
        dict_users_labeled, dict_users_unlabeled = iid(dataset_train,
                                                       args.num_users,
                                                       args.label_rate)
    else:
        dict_users_labeled, dict_users_unlabeled = noniid(
            dataset_train, args.num_users, args.label_rate)
    train_loader_unlabeled = {}

    # define model
    model_glob = get_model('fedfixmatch', args.backbone).to(device)
    if torch.cuda.device_count() > 1:
        model_glob = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_glob)

    model_local_idx = set()

    user_epoch = {}
    lr_scheduler = {}
    accuracy = []
    class_criterion = nn.CrossEntropyLoss(size_average=False, ignore_index=-1)
    if args.dataset == 'cifar' and args.iid != 'noniid_tradition':
        consistency_criterion = softmax_kl_loss
    else:
        consistency_criterion = softmax_mse_loss

    for iter in range(args.num_epochs):

        model_glob.train()
        optimizer = torch.optim.SGD(model_glob.parameters(),
                                    lr=0.01,
                                    momentum=0.5)

        train_loader_labeled = torch.utils.data.DataLoader(
            dataset=DatasetSplit(dataset_train, dict_users_labeled),
            shuffle=True,
            **dataloader_kwargs)

        for batch_idx, ((img, img_ema),
                        label) in enumerate(train_loader_labeled):

            img, img_ema, label = img.to(args.device), img_ema.to(
                args.device), label.to(args.device)
            input_var = torch.autograd.Variable(img)
            ema_input_var = torch.autograd.Variable(img_ema, volatile=True)
            target_var = torch.autograd.Variable(label)
            minibatch_size = len(target_var)
            labeled_minibatch_size = target_var.data.ne(-1).sum()
            ema_model_out = model_glob(ema_input_var)
            model_out = model_glob(input_var)
            if isinstance(model_out, Variable):
                logit1 = model_out
                ema_logit = ema_model_out
            else:
                assert len(model_out) == 2
                assert len(ema_model_out) == 2
                logit1, logit2 = model_out
                ema_logit, _ = ema_model_out
            ema_logit = Variable(ema_logit.detach().data, requires_grad=False)
            class_logit, cons_logit = logit1, logit1
            classification_weight = 1
            class_loss = classification_weight * class_criterion(
                class_logit, target_var) / minibatch_size
            ema_class_loss = class_criterion(ema_logit,
                                             target_var) / minibatch_size
            consistency_weight = get_current_consistency_weight(iter)
            consistency_loss = consistency_weight * consistency_criterion(
                cons_logit, ema_logit) / minibatch_size
            loss = class_loss + consistency_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        del train_loader_labeled
        gc.collect()
        torch.cuda.empty_cache()

        if iter % 1 == 0:
            test_loader = torch.utils.data.DataLoader(dataset=get_dataset(
                transform=get_aug(args.dataset, False, train_classifier=False),
                train=False,
                **dataset_kwargs),
                                                      shuffle=False,
                                                      **dataloader_kwargs)
            model_glob.eval()
            acc, loss_train_test_labeled = test_img(model_glob, test_loader,
                                                    args)
            accuracy.append(str(acc))
            del test_loader
            gc.collect()
            torch.cuda.empty_cache()

        w_locals, loss_locals, loss0_locals, loss2_locals = [], [], [], []

        m = max(int(args.frac * args.num_users), 1)
        idxs_users = np.random.choice(range(args.num_users), m, replace=False)

        for idx in idxs_users:
            if idx in user_epoch.keys():
                user_epoch[idx] += 1
            else:
                user_epoch[idx] = 1

            loss_local = []
            loss0_local = []
            loss2_local = []

            model_local = copy.deepcopy(model_glob).to(args.device)

            train_loader_unlabeled = torch.utils.data.DataLoader(
                dataset=DatasetSplit(dataset_train, dict_users_unlabeled[idx]),
                shuffle=True,
                **dataloader_unlabeled_kwargs)

            optimizer = torch.optim.SGD(model_local.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay,
                                        nesterov=False)

            model_local.train()

            for i, ((images1, images2),
                    labels) in enumerate(train_loader_unlabeled):

                img, img_ema, label = img.to(args.device), img_ema.to(
                    args.device), label.to(args.device)
                adjust_learning_rate(optimizer, user_epoch[idx], batch_idx,
                                     len(train_loader_unlabeled), args)
                input_var = torch.autograd.Variable(img)
                ema_input_var = torch.autograd.Variable(img_ema, volatile=True)
                target_var = torch.autograd.Variable(label)
                minibatch_size = len(target_var)
                labeled_minibatch_size = target_var.data.ne(-1).sum()
                ema_model_out = model_local(ema_input_var)
                model_out = model_local(input_var)
                if isinstance(model_out, Variable):
                    logit1 = model_out
                    ema_logit = ema_model_out
                else:
                    assert len(model_out) == 2
                    assert len(ema_model_out) == 2
                    logit1, logit2 = model_out
                    ema_logit, _ = ema_model_out
                ema_logit = Variable(ema_logit.detach().data,
                                     requires_grad=False)
                class_logit, cons_logit = logit1, logit1

                consistency_weight = get_current_consistency_weight(
                    user_epoch[idx])
                consistency_loss = consistency_weight * consistency_criterion(
                    cons_logit, ema_logit) / minibatch_size

                Lprox = 1 / 2 * dist(model_local.state_dict(),
                                     model_glob.state_dict())

                loss = consistency_loss + Lprox
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            w_locals.append(copy.deepcopy(model_local.state_dict()))

            del model_local
            gc.collect()
            del train_loader_unlabeled
            gc.collect()
            torch.cuda.empty_cache()

        w_glob = FedAvg(w_locals)
        model_glob.load_state_dict(w_glob)

        #         loss_avg = sum(loss_locals) / len(loss_locals)

        if iter % 1 == 0:
            print('Round {:3d}, Acc {:.2f}%'.format(iter, acc))
Exemple #38
0
def train():
    # Initial
    # mkdir snapshotPath
    if not os.path.exists(config.snapshot_path):
       os.mkdir(config.snapshot_path)

    # Setup Dataloader
    t_loader = cityscapesLoader(config.trainList, split = 'train', batchSize = config.train_batch_size, imgSize = config.imgSize, is_augmentation = False, randomResize = False)
    v_loader = cityscapesLoader(config.valList, split = 'val', imgSize = config.imgSize)

    n_classes = t_loader.n_classes
    imgSize = t_loader.imgSize
    trainloader = data.DataLoader(t_loader, batch_size=config.train_batch_size, num_workers=8)#not shuffle here, it will break because diffient shape
    valloader = data.DataLoader(v_loader, batch_size=config.test_batch_size, num_workers=8)

    # Setup Metrics for Iou calculate
    running_metrics = runningScore(n_classes)

    # Setup Model
    model = get_model(config.arch, n_classes, imgSize)
    finetune_params = model.finetune_params
    #model = yolov3SPP(version='cityscapes', n_classes=19)
    model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
    model.cuda()
    
    # Check if model has custom optimizer / loss
    if hasattr(model.module, 'optimizer'):
        optimizer = model.module.optimizer
    else:
        # freeze the param
        #optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=config.base_lr, momentum=config.momentum, weight_decay=config.weight_decay)
        # finetune the param
        train_params = []
        for idx , param in enumerate(model.parameters()):
            if idx > len(finetune_params):
                train_params.append(param)
        optimizer = torch.optim.SGD([{'params': finetune_params}, {'params': train_params, 'lr': config.base_lr * 1}],\
                                     lr=config.base_lr, momentum=config.momentum, weight_decay=config.weight_decay)
        #for param_group in optimizer.param_groups:
        #    print("{} : {}".format(param_group['params'], param_group['lr']))
        # nomal optimizer
        #optimizer = torch.optim.SGD(model.parameters(), lr=config.base_lr, momentum=config.momentum, weight_decay=config.weight_decay)

    # learning method
    #scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.lr_decay_epochs, gamma=config.lr_decay)

    if config.resume is not None:                                         
        if os.path.isfile(config.resume):
            print("Loading model and optimizer from checkpoint '{}'".format(config.resume))
            checkpoint = torch.load(config.resume)
            model.load_state_dict(checkpoint['model_state'])
            optimizer.load_state_dict(checkpoint['optimizer_state'])
            start_epoch = checkpoint['epoch']
            print("Loaded checkpoint '{}' (epoch {})"                    
                  .format(config.resume, checkpoint['epoch']))
        else:
            print("No checkpoint found at '{}'".format(config.resume)) 
    else:
        #load_pretrained_model
        print("Loading pretrained Model: {}".format(config.pretrainedModel))
        start_epoch = 0 
        if config.pretrainedModel.split(".")[-1] == "pth":
            model.load_state_dict(torch.load(config.pretrainedModel))
        else:
            model.load_state_dict(torch.load(config.pretrainedModel)['model_state'])

    # initial visdom
    if config.visdomTrain:
        fig=plt.figure()
        # train visdom
        ax1 = fig.add_subplot(2,1,1)
        ax1.axis([start_epoch * len(trainloader), (start_epoch+1) * len(trainloader), 0, 1])
        ax1.plot(-1, -1, 'bo', label = 'LossSeg')
        ax1.plot(-1, -1, 'r^', label = 'lossDet')
        ax1.legend(loc='upper left')
        plt.title('LossSeg vs lossDet')
        # val visdom
        ax2 = fig.add_subplot(2,1,2)
        ax2.axis([start_epoch * len(trainloader), (start_epoch+1) * len(trainloader), 0, 1])
        ax2.plot(-1, -1, 'cs', label = 'LossSegVal')
        ax2.plot(-1, -1, 'y*', label = 'lossDetVal')
        ax2.legend(loc='upper left')

    bestIou = -100.0 
    bestmAP = -100.0
    lossSegDict = {}
    lossDetDict = {}
    for epoch in range(start_epoch, config.max_epoch):
        # update axis for visdom
        if config.visdomTrain:
            ax1.axis([start_epoch * len(trainloader), (epoch+1) * len(trainloader), 0, 1])
            ax2.axis([start_epoch * len(trainloader), (epoch+1) * len(trainloader), 0, 1])

        # model train pocess
        model.train()
        for i, (images, labels, segMaps) in enumerate(trainloader): 
            currentIter = epoch * len(trainloader) + i     
            poly_lr_scheduler(optimizer, config.base_lr, currentIter, max_iter = config.max_epoch * len(trainloader))

            images = Variable(images.cuda())
            labels = Variable(labels.cuda())
            segMaps = Variable(segMaps.cuda())

            optimizer.zero_grad()
            loss_seg, loss_det = model(images, labels, segMaps)# 

            # fuse loss
            # loss = loss_seg + loss_det

            loss_seg.backward()
            optimizer.step()

            if (i+1) % 20 == 0:
                if config.visdomTrain:
                    lossSegDict[currentIter] = loss_seg.data[0]
                    lossDetDict[currentIter] = loss_det.data[0]
                    for perEpoch, lossSeg in lossSegDict.items():

                        ax1.plot(perEpoch, lossSeg, 'bo', label = 'LossSeg')
                        ax1.plot(perEpoch, lossDetDict[perEpoch], 'r^', label = 'lossDet')
                        plt.pause(0.033)

                print("[Epoch %d/%d, Batch %d/%d] Learning_rate: %.7f Loss_seg: %.4f Loss_det: %.4f" % \
                (epoch+1, config.max_epoch, i, len(trainloader), optimizer.param_groups[0]['lr'], loss_seg.data[0], loss_det.data[0]))#

        # model eval pocess
        lossSegVal = []
        lossDetVal = []
        model.eval()
        APs = []
        for i_val, (images_val, labels_val, segMap_val) in tqdm(enumerate(valloader)):
            images_val = Variable(images_val.cuda(), volatile=True)
            labels_val = Variable(labels_val.cuda(), volatile=True)
            segMap_val = Variable(segMap_val.cuda(), volatile=True)

            outputSeg, outputDet = model(images_val)       
            #loss_segVal, loss_detVal = model(images_val, labels_val, segMap_val)   
            #lossSegVal.append(loss_segVal.data[0]) 
            #lossDetVal.append(loss_detVal.data[0])

            pred = outputSeg.data.max(1)[1].cpu().numpy()
            gt = segMap_val.data.cpu().numpy()
            running_metrics.update(gt, pred)

            AP = evalDet(outputDet, labels_val, config.numClasses, config.imgSize, config.confThresh, config.iouThresh)
            APs.append(AP) 
  
        # output valid loss
        #print("[Epoch %d/%d] Loss_segVal: %.4f Loss_detVal: %.4f\n" % \
        #     (epoch+1, config.max_epoch, np.mean(lossSegVal), np.mean(lossDetVal)))  

        score, class_iou = running_metrics.get_scores()
        for k, v in score.items():
            print(k, v)
        running_metrics.reset()

        print("Mean Average Precision: %.4f" % np.mean(APs))

        if config.visdomVal:
            ax2.plot((epoch+1) * len(trainloader), np.mean(lossSegVal), 'cs', label = 'LossSegVal')
            ax2.plot((epoch+1) * len(trainloader), np.mean(lossDetVal), 'y*', label = 'lossDetVal')
            plt.pause(0.033)  

        # write result to log
        with open('MTSD.log', 'a') as f:
            f.write("++++++++++MTSD Result+++++++++++++\nepoch: {} \nDetection result: \nMean Iou: {} \nSegmentation result: \nmAP: {}\n".\
                     format(epoch+1, score['Mean IoU : \t'], np.mean(APs)))

        if score['Mean IoU : \t'] >= bestIou:# or np.mean(APs) > bestmAp:
            bestIou = score['Mean IoU : \t']
            bestmAp = np.mean(APs)
            state = {'epoch': epoch+1,
                     'model_state': model.state_dict(),
                     'optimizer_state' : optimizer.state_dict(),}
            torch.save(state, "{}/{}_best_model.pkl".format(config.snapshot_path, config.arch))
Exemple #39
0
def main(opt):
    
    # make folder
    base_path = 'result'
    os.makedirs(base_path, exist_ok=True)
    result_path = make_folder(base_path, opt.save_folder)      
    
    # Dataset
    print(f'Preparing Dataset....{opt.dataset}')
    train_transform = transforms.Compose([
        transforms.Resize((256,256)),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
    ])
    
    test_transform = transforms.Compose([
        transforms.Resize((256,256)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
    ])
    
    train_set, test_set = get_dataset(opt.dataset, train_transform, test_transform)
    if opt.testing:
        train_set = Subset(train_set, range(opt.train_batch_size))
        test_set = Subset(test_set, range(opt.test_batch_size))
    
    # Load Dataset
    train_loader = DataLoader(train_set, batch_size=opt.train_batch_size, shuffle=True)
    test_loader = DataLoader(test_set, batch_size=opt.test_batch_size, shuffle=False)

     
    # GPU
    device = 'cuda' if (torch.cuda.is_available() and opt.cuda) else 'cpu'
    print(f'Using {device}')
    
    # model
    from torchvision.models import vgg16_bn
    print(f'Preparing Model....{opt.model}')
    model = get_model(opt.model, opt.num_classes, pretrained=opt.pretrained)
    model.to(device)
    
    # resuming
    if opt.resume:
        print('Resuming from checkpoint')
        assert os.path.isdir(f'{opt.resume}')
        
        checkpoint = torch.load(f'{opt.resume}/{opt.model}_ckpt.pth')
        model.load_state_dict(checkpoint['model'])
        
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']
        train_result = checkpoint['train_result']
        test_result = checkpoint['test_result']
        
    else:
        start_epoch = 0
        best_acc = 0
        train_result, test_result = [], [] 
        
        
    # optmizer
    loss_func = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)
    
    # Training
    start = time.time()
    
    for e in range(start_epoch, start_epoch+opt.epoch):
        train_result += train(model, train_loader, optimizer, loss_func, device, start_epoch, scheduler, e)
        test_result += test(model, test_loader, loss_func, device, start_epoch, e)
        scheduler.step()
        
        # Save checkpoint
        if test_result[1::2][-1] > best_acc:
            print(f'Saving Model....({result_path})')
            state = {
                'model': model.state_dict(),
                'epoch': e+1,
                'acc': test_result[1::2][-1],
                'train_result': train_result,
                'test_result': test_result
            }
            torch.save(state, f'{result_path}/{opt.model}_ckpt.pth')
            best = test_result[1::2][-1]
            
        # Save Result
        if opt.save_result:
            print(f'Saving Result....({result_path})')
            save_result(train_result, test_result, result_path)
            
    end = time.time()
    with open(f'{result_path}/time_log.txt', 'w') as f:
        f.write(str(datetime.timedelta(seconds=end-start)))
        f.close()
Exemple #40
0
 def test2(self):
     self.assertEquals(hmodels.get_history_parent(hmodels.get_model('historical', "HistoricalTestHistorySimple")), TestHistorySimple)
Exemple #41
0
                        img_size=(args.img_rows, args.img_cols),
                        augmentations=data_aug_te)

trainloader = DataLoader(tr_loader,
                         batch_size=args.batch_size,
                         num_workers=args.workers,
                         shuffle=True,
                         pin_memory=True)
testloader = DataLoader(te_loader,
                        batch_size=args.batch_size,
                        num_workers=args.workers,
                        shuffle=True,
                        pin_memory=True)

# define model or load model
net = get_model(args.arch, n_classes=None)
if USE_CUDA:
    net.cuda()

if args.resume is not None:
    pre_params = torch.load(args.resume)
    net.init_params(pre_params)

reconstruction_function = nn.BCELoss()
reconstruction_function.reduction = 'sum'


def loss_function(recon_x, x, mu, logvar):
    BCE = reconstruction_function(recon_x, x)
    # x_for_tf = x.data.numpy().transpose((0,2,3,1))
    # recon_x_for_tf = recon_x.data.numpy().transpose((0,2,3,1))
Exemple #42
0
 def test3(self):
     self.assertEquals(hmodels.get_history_model(hmodels.get_model('historical', "HistoricalTestHistorySimple")()), self.history_model)
Exemple #43
0
def visEmbed(exp_dict):
    src_loader = datasets.get_loader(exp_dict["src_dataset"],
                                     "train",
                                     batch_size=exp_dict["src_batch_size"])

    tgt_val_loader = datasets.get_loader(exp_dict["tgt_dataset"],
                                         "val",
                                         batch_size=exp_dict["tgt_batch_size"])

    src_model, src_opt = models.get_model(exp_dict["src_model"],
                                          exp_dict["n_outputs"])
    src_model.load_state_dict(torch.load(exp_dict["path"] + "/model_src.pth"))

    tgt_model, tgt_opt = models.get_model(exp_dict["tgt_model"],
                                          exp_dict["n_outputs"])
    tgt_model.load_state_dict(torch.load(exp_dict["path"] + "/model_tgt.pth"))

    X, X_tgt = losses.extract_embeddings(src_model, src_loader)

    Y, Y_tgt = losses.extract_embeddings(tgt_model, tgt_val_loader)

    X, X_tgt = X[:500], X_tgt[:500]
    Y, Y_tgt = Y[:500], Y_tgt[:500]

    src_kmeans = KMeans(n_clusters=10)
    src_kmeans.fit(X)
    Xc = src_kmeans.cluster_centers_

    clf = neighbors.KNeighborsClassifier(n_neighbors=2)
    clf.fit(X, X_tgt)
    Xc_tgt = clf.predict(Xc)

    # acc_tgt = test.validate(src_model, tgt_model,
    #                                 src_loader,
    #                                 tgt_val_loader)

    tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
    #tsne.fit(Y[:500])
    S_tsne = tsne.fit_transform(np.vstack([Y, X, Xc]))
    #X_tsne = tsne.transform(X[:500])
    Y_tsne = S_tsne[:Y.shape[0]]
    X_tsne = S_tsne[Y.shape[0]:-10]
    Xc_tsne = S_tsne[-10:]
    # plt.mpl.rcParams['grid.color'] = 'k'
    # plt.mpl.rcParams['grid.linestyle'] = ':'
    # plt.mpl.rcParams['grid.linewidth'] = 0.5
    # Y_labels = Y_labels
    # X_labels = X_labels

    # scatter(Y_tsne, Y_tgt+1, win="1", title="target - {}".format(exp_dict["tgt_dataset"]))
    # scatter(X_tsne, X_tgt+1, win="2",title="source - {}".format(exp_dict["src_dataset"]))

    colors = [
        "b", "g", "r", "c", "m", "y", "gray", "w", "chocolate", "olive", "pink"
    ]

    if 1:
        fig = plt.figure(figsize=(6, 6))
        plt.grid(linestyle='dotted')
        plt.scatter(X_tsne[:, 0], X_tsne[:, 1], alpha=0.6, edgecolors="black")

        for c in range(10):
            ind = Xc_tgt == c
            color = colors[c + 1]
            plt.scatter(Xc_tsne[ind][:, 0],
                        Xc_tsne[ind][:, 1],
                        s=250,
                        c=color,
                        edgecolors="black",
                        marker="*")
        # plt.axes().set_aspect('equal', 'datalim')
        plt.xlabel("t-SNE Feature 2")
        plt.ylabel("t-SNE Feature 1")
        title = "Source Dataset ({}) - Center: {} - Adv: {}".format(
            exp_dict["src_dataset"].upper().replace("BIG", ""),
            exp_dict["options"]["center"], exp_dict["options"]["disc"])
        plt.title(title)
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])
        plt.savefig("src_{}.pdf".format(exp_dict["exp_name"].replace(" ", "")),
                    bbox_inches='tight',
                    transparent=False)

        plt.savefig("src_{}.png".format(exp_dict["exp_name"]),
                    bbox_inches='tight',
                    transparent=False)
        # ms.visplot(fig)

    if 1:

        fig = plt.figure(figsize=(6, 6))
        plt.grid(linestyle='dotted')
        for c in range(10):
            ind = Y_tgt == c
            color = colors[c + 1]

            plt.scatter(Y_tsne[ind][:, 0],
                        Y_tsne[ind][:, 1],
                        alpha=0.6,
                        c=color,
                        edgecolors="black")

        for c in range(10):
            ind = Xc_tgt == c
            color = colors[c + 1]
            plt.scatter(Xc_tsne[ind][:, 0],
                        Xc_tsne[ind][:, 1],
                        s=350,
                        c=color,
                        edgecolors="black",
                        marker="*")
        # plt.axes().set_aspect('equal', 'datalim')
        plt.xlabel("t-SNE Feature 2")
        plt.ylabel("t-SNE Feature 1")
        title = "Target Dataset ({}) - Center: {} - Adv: {}".format(
            exp_dict["tgt_dataset"].upper().replace("BIG", ""),
            exp_dict["options"]["center"], exp_dict["options"]["disc"])
        plt.title(title)
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])
        plt.savefig("tgt_{}.pdf".format(exp_dict["exp_name"]),
                    bbox_inches='tight',
                    transparent=False)

        plt.savefig("tgt_{}.png".format(exp_dict["exp_name"]),
                    bbox_inches='tight',
                    transparent=False)
        help="whitch action to do: save, train, test")
    op.add_option("--predict", action="store", type=str, dest="predict",
        help="predict the class of this input")
    (opts, args) = op.parse_args()

    if opts.stdout:
        stdout = opts.stdout
        sys.stdout = logger.Logger(settings.stdout_path + stdout + '__stdout.log')
        #Display progress logs on stdout
        logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')

    if not opts.model_id:
        raise Exception("must specify --model_id")

    model_id = opts.model_id
    model = models.get_model(models.BaseModel(model_id=model_id), True)

    action = opts.action
    predict = opts.predict
    if action == "train" or action == "save":
        train(model)
    if action == "save":
        model.save()
    if predict:
        print("predict: %s" % predict)
        prediction = model.pipeline.predict([predict])
        print(prediction)
        print(model.pipeline.score([predict],prediction))
        if hasattr(model.pipeline,'predict_proba'):
            print("proba:")
            try:
Exemple #45
0
def advs_train(dataset='cifar-10',
               loss_name='ce',
               epochs=120,
               dynamic_epoch=100,
               batch_size=128,
               fosc_max=0.5,
               epsilon=0.031):
    """
    Adversarial training with PGD attack.
    """
    print(
        'DynamicAdvsTrain - Data set: %s, loss: %s, epochs: %s, dynamic_epoch: %s, batch: %s, epsilon: %s'
        % (dataset, loss_name, epochs, dynamic_epoch, batch_size, epsilon))

    X_train, Y_train, X_test, Y_test = get_data(dataset,
                                                clip_min=0.,
                                                clip_max=1.,
                                                onehot=True)

    n_images = X_train.shape[0]
    image_shape = X_train.shape[1:]
    n_class = Y_train.shape[1]
    print("n_images:", n_images, "n_class:", n_class, "image_shape:",
          image_shape)

    model = get_model(dataset,
                      input_shape=image_shape,
                      n_class=n_class,
                      softmax=True)
    # model.summary()

    # create loss
    if loss_name == 'ce':
        loss = cross_entropy
    else:
        print("New loss function should be defined first.")
        return

    optimizer = SGD(lr=0.01, decay=1e-4, momentum=0.9)

    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])

    # data augmentation
    if dataset in ['mnist']:
        datagen = ImageDataGenerator()
    elif dataset in ['cifar-10']:
        datagen = ImageDataGenerator(rotation_range=10,
                                     width_shift_range=0.2,
                                     height_shift_range=0.2,
                                     horizontal_flip=True)
    else:
        datagen = ImageDataGenerator(width_shift_range=0.2,
                                     height_shift_range=0.2,
                                     horizontal_flip=True)

    datagen.fit(X_train)

    # pgd attack for training
    attack = LinfPGDAttack(model,
                           epsilon=epsilon,
                           eps_iter=epsilon / 4,
                           nb_iter=10,
                           random_start=True,
                           loss_func='xent',
                           clip_min=np.min(X_train),
                           clip_max=np.max(X_train))

    # initialize logger
    mylogger = Logger(K.get_session(),
                      model,
                      X_train,
                      Y_train,
                      X_test,
                      Y_test,
                      dataset,
                      loss_name,
                      epochs,
                      suffix='%s' % epsilon)

    batch_iterator = datagen.flow(X_train, Y_train, batch_size=batch_size)

    start_time = time.time()

    for ep in range(epochs):
        # learning rate decay
        if (ep + 1) == 60:
            lr = float(K.get_value(model.optimizer.lr))
            K.set_value(model.optimizer.lr, lr / 10.0)

        if (ep + 1) == 100:
            lr = float(K.get_value(model.optimizer.lr))
            K.set_value(model.optimizer.lr, lr / 10.0)
        lr = float(K.get_value(model.optimizer.lr))

        # a simple linear decreasing of fosc
        fosc = fosc_max - fosc_max * (ep * 1.0 / dynamic_epoch)
        fosc = np.max([fosc, 0.0])

        steps_per_epoch = int(X_train.shape[0] / batch_size)
        pbar = tqdm(range(steps_per_epoch))
        for it in pbar:
            batch_x, batch_y = batch_iterator.next()
            batch_advs, fosc_batch = attack.perturb(K.get_session(), batch_x,
                                                    batch_y, batch_size, ep,
                                                    fosc)

            probs = model.predict(batch_advs)
            loss_weight = np.max(-batch_y * np.log(probs + 1e-12), axis=1)

            if it == 0:
                fosc_all = fosc_batch
            else:
                fosc_all = np.concatenate((fosc_all, fosc_batch), axis=0)

            if ep == 0:
                loss, acc = model.train_on_batch(batch_advs, batch_y)
            else:
                loss, acc = model.train_on_batch(batch_advs,
                                                 batch_y,
                                                 sample_weight=loss_weight)
            pbar.set_postfix(acc='%.4f' % acc, loss='%.4f' % loss)

        print('All time:', time.time() - start_time)

        log_path = './log'

        file_name = os.path.join(
            log_path, 'BatchSize_{}_Epoch_{}_fosc.npy'.format(batch_size, ep))
        np.save(file_name, fosc_all)

        val_loss, val_acc = model.evaluate(X_test,
                                           Y_test,
                                           batch_size=batch_size,
                                           verbose=0)
        logs = {
            'acc': acc,
            'loss': loss,
            'val_acc': val_acc,
            'val_loss': val_loss
        }

        print(
            "Epoch %s - loss: %.4f - acc: %.4f - val_loss: %.4f - val_acc: %.4f"
            % (ep, loss, acc, val_loss, val_acc))

        # save the log and model every epoch
        mylogger.on_epoch_end(epoch=ep, logs=logs)
        model.save_weights("model/advs_%s_%s_%s_%s.hdf5" %
                           (dataset, loss_name, epsilon, ep))
Exemple #46
0
    if not os.path.isdir(todir):
        os.makedirs(todir)
    print 'saving'
    config.save(os.path.join(todir, 'config.json'))
    with open(os.path.join(todir, 'featurizer.pkl'), 'wb') as f:
        pkl.dump(dataset.featurizer, f, protocol=pkl.HIGHEST_PROTOCOL)

    typechecker = TypeCheckAdaptor(os.path.join(mydir, 'data', 'raw', 'typecheck.csv'), dataset.featurizer.vocab)
    scoring_labels = [i for i in xrange(len(dataset.featurizer.vocab['rel'])) if i != dataset.featurizer.vocab['rel']['no_relation']]

    invalids = dataset.train.remove_invalid_examples(typechecker)
    print 'removed', len(invalids), 'invalid training examples'
    invalids = dataset.dev.remove_invalid_examples(typechecker)
    print 'removed', len(invalids), 'invalid dev examples'

    model = get_model(config, dataset.featurizer.vocab, typechecker)
    trainer = Trainer(todir, model, typechecker, scoring_labels)
    best_scores = trainer.train(dataset.train, dataset.dev, max_epoch=config.max_epoch)

    model.save_weights(os.path.join(todir, 'best_weights'), overwrite=True)

    with open(os.path.join(todir, 'classification_report.txt'), 'wb') as f:
        report = classification_report(best_scores['targs'], best_scores['preds'], target_names=dataset.featurizer.vocab['rel'].index2word)
        f.write(report)
    print report

    from plot_utils import plot_confusion_matrix, plot_histogram, get_sorted_labels
    order, labels, counts = get_sorted_labels(best_scores['targs'], dataset.featurizer.vocab)
    fig = plot_confusion_matrix(best_scores['targs'], best_scores['preds'], order, labels)
    fig.savefig(os.path.join(todir, 'confusion_matrix.png'))
args = parser.parse_args()

load_dir = args.load_dir
text_path = "/home/ubuntu/.keras/datasets/nietzsche.txt"
save_dir = args.save_dir or load_dir


from_scratch = False
maxlen = 20
epochs = args.epochs

text = open(text_path).read().lower()
X, y, chars = lstm_trainset(text, maxlen)

history = LossHistory(save_dir)
# checkpointer = ModelCheckpoint(filepath=os.path.join(save_dir, "weights.hdf5"), verbose=1, save_best_only=True)

if load_dir:
    model, model_props = load_model(load_dir)
else:
    model_props = {
        'maxlen': maxlen,
        'step': 1,
        'chars': chars
    }

    model = get_model(model_props)

model.fit(X, y, batch_size=128, nb_epoch=epochs, verbose=1, callbacks=[history])
save_model(model, model_props, save_dir)