def test_load_data(self):
        # Test loading of cityscapes dataset, no resize
        data = data_loader.load_data(self.cityscapes_path, 'cityscapes', resize=False)
        sample_img, sample_label = data[0]
        self.assertIsInstance(data, datasets.Cityscapes)
        self.assertEqual(sample_img.size(), torch.Size([3, 1024, 2048]))
        self.assertEqual(sample_label.size(), torch.Size([1024, 2048]))

        # Test loading of cityscapes dataset, resizing data to 256x256
        data = data_loader.load_data(self.cityscapes_path, 'cityscapes', resize=True)
        sample_img, sample_label = data[0]
        self.assertIsInstance(data, datasets.Cityscapes)
        self.assertEqual(sample_img.size(), torch.Size([3, 256, 256]))
        self.assertEqual(sample_label.size(), torch.Size([256, 256]))

        # Test loading of tinyimagenet dataset, no resize
        data = data_loader.load_data(self.imagenet_path, 'imagenet', resize=False)
        sample_img, sample_label = data[0]
        self.assertIsInstance(data, datasets.ImageFolder)
        self.assertEqual(sample_img.size(), torch.Size([3, 64, 64]))
        self.assertIsInstance(sample_label, int)

        # Test loading of tinyimagenet dataset, resizing data to 256x256
        data = data_loader.load_data(self.imagenet_path, 'imagenet', resize=True)
        sample_img, sample_label = data[0]
        self.assertIsInstance(data, datasets.ImageFolder)
        self.assertEqual(sample_img.size(), torch.Size([3, 256, 256]))
        self.assertIsInstance(sample_label, int)
def main():

    data_loader, class_to_idx, dataset_sizes = load_data(
        data_folder=args.input_data,
        batch_size=1,
        train_flag=False,
        kwargs={'num_workers': 0})

    # models for fuzzing: alexnet, vgg19, mobilenet_v2, vgg16
    model_1, model_2, model_3 = load_model()

    # neuron coverage
    coverage_table_init = CoverageTable.CoverageTableInit()
    model_layer_dict_1, model_1_layer_names, \
    model_layer_dict_2, model_2_layer_names, \
    model_layer_dict_3, model_3_layer_names = \
        coverage_table_init.init_deepxplore_coverage_tables(model_1, model_2, model_3)

    # params of Fuzzer
    data = [data_loader, class_to_idx]
    models = [model_1, model_2, model_3]
    model_layer_dicts = [
        model_layer_dict_1, model_layer_dict_2, model_layer_dict_3
    ]
    model_layer_names = [
        model_1_layer_names, model_2_layer_names, model_3_layer_names
    ]

    fuzzer = Fuzzer.Fuzzer(data, args, models, model_layer_dicts,
                           model_layer_names, DEVICE)
    fuzzer.loop()
def test_vocabulary_processor():
    from tensorflow.contrib import learn
    import sys
    sys.path.append("..")
    from utils.data_loader import load_data, batch_iter
    processor = learn.preprocessing.VocabularyProcessor.restore(
        "../temp/vocab")
    processor.max_document_length = 100
    data, label = load_data("test")
    test_data = []
    for d, l in zip(data, label):
        # for each task in data
        d = list(processor.transform(d))  # generator -> list
        d = np.array(d)
        l = np.array(l)
        print(np.shape(d))
        print(np.shape(l))
        test_data.append(d)
    td = np.array(test_data[2])
    print(np.shape(td))

    for task, batch in batch_iter(test_data, label, 7, 3, False):
        x, y = zip(*batch)
        print(task)
        break
def main():
    # ハイパーパラメータ
    epoch = 5
    lr = 0.001
    batch_size = 128

    # GPU or CPUの自動判別
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # データセットの保存先
    root_name = './data'

    # datasetの呼び出し
    train_loader, test_loader, classes = load_data(root_name, batch_size)

    # network呼び出し
    model = Net().to(device)

    # modelの動作クラス
    ope = ModelOperation(model, device)

    # 学習
    ope.train_model(train_loader, epoch, lr)

    # modelのsave
    model_path = 'model.pth'
    torch.save(model.state_dict(), model_path)

    # 評価
    ope.test_model(test_loader)
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--rootpath', type=str, default='/mnt/KRED_publish/data/', help='root path of data')

    ##turning paras
    parser.add_argument('--learning_rate', action='store_true', default=0.0001, help='learning rate')
    parser.add_argument('--epoch', action='store_true', default=100, help='epoch num')
    parser.add_argument('--batch_size', action='store_true', default=16, help='batch size')
    parser.add_argument('--l2_regular', action='store_true', default=0.00001, help='l2 regular')

    ##task specific parameter
    parser.add_argument('--training_type', action='store_true', default="single_task", help='single_task training or multi-task training')
    parser.add_argument('--task', action='store_true', default="user2item", help='task types: user2item, item2item, vert_classify, pop_predict, local_news')

    parser.add_argument('--news_entity_num', action='store_true', default=20, help='fix a news entity num to news_entity_num')
    parser.add_argument('--entity_neighbor_num', action='store_true', default=20, help='nerighbor num for a entity')
    parser.add_argument('--user_his_num', action='store_true', default=20, help='user history num')
    parser.add_argument('--negative_num', action='store_true', default=6, help='1 postive and negative_num-1 negative in training set')
    parser.add_argument('--smooth_lamda', action='store_true', default=10, help='smooth_lamda in softmax in loss function')

    parser.add_argument('--embedding_dim', action='store_true', default=90, help='embedding dim for enity_embedding dv uv')
    parser.add_argument('--layer_dim', action='store_true', default=128, help='layer dim')

    parser.add_argument('--logdir', action='store_true', default="EXP_num", help='the dir for save predict results')


    args = parser.parse_args()

    data = load_data(args)

    train_test(args, data)
Beispiel #6
0
def model_test():
    print()
    print('############ offline testing ############')
    print()

    model = torch.load(args.target_model)
    model = model.eval()

    data_loaders, class_to_idx, dataset_sizes = load_data(args.test_data,
                                                          args.batch_size,
                                                          train_flag=False,
                                                          kwargs=kwargs)
    print('number of the adversarial samples: {}'.format(dataset_sizes))

    correct = 0
    total = 0

    with torch.no_grad():
        for (images, labels) in data_loaders:
            images, labels = images.to(DEVICE), labels.to(DEVICE)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy of the target model on the test images: %d %%' %
          (100 * correct / total))
Beispiel #7
0
def load_and_stack_data(load_file_names=None):
    with open(load_file_names) as f:
        file_name_list = f.readlines()
    file_name_list = [x.strip() for x in file_name_list]

    states_list = []
    deltas_list = []
    actions_list = []
    weights_list = []

    for file_name in file_name_list:
        file_dir = os.path.join(os.getcwd(), "testing_pipeline", file_name)
        states, deltas, actions, weights = load_data(file_dir)
        states_list.append(states)
        deltas_list.append(deltas)
        actions_list.append(actions)
        weights_list.append(weights)

    states_full = np.concatenate(states_list)
    deltas_full = np.concatenate(deltas_list)
    actions_full = np.concatenate(actions_list)
    weights_full = np.concatenate(weights_list)

    weights_full = np.expand_dims(weights_full, axis=-1)
    return states_full, deltas_full, actions_full, weights_full
Beispiel #8
0
    def get_avg_activated_channels(self,
                                   layers,
                                   data_path,
                                   data_type,
                                   sample_size=100):
        '''
        Computes the average number number of channels activated in each layer
        by inputs from the specified dataset.
        '''
        layer_activations = []
        for layer in layers:
            activations = LayerActivations(layer)
            layer_activations.append(activations)

        dataset = load_data(data_path, data_type)
        sampler = torch.utils.data.SubsetRandomSampler(np.arange(sample_size))
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=1,
                                                 sampler=sampler)

        avg_activated_channels = np.zeros(len(layers))
        for x, _ in dataloader:
            _ = self.model(x)
            channels_activations = [
                l.activations[0].detach().numpy() for l in layer_activations
            ]
            avg_activated_channels += [
                len(np.unique(np.nonzero(c)[0])) for c in channels_activations
            ]
        avg_activated_channels = avg_activated_channels / sample_size

        return avg_activated_channels
def main(train_df_path, test_df_path, embedings_file, model_name, stamp):
  experiment_path = './experiments/%s' % stamp

  x_train, targets_train, x_test, train_submission, submission, embedings = load_data(train_df_path, test_df_path,
                                                                                      embedings_file)
  (kfold_data, X_test) = prepare_data_cv(x_train, targets_train, x_test)

  train_probas = np.zeros(shape=(x_train.shape[0], 6))
  test_probas = np.zeros(shape=(x_test.shape[0], 6))

  models_roc = []
  models_train_roc = []

  for idx, data in enumerate(tqdm(kfold_data)):
    X_train, y_train, X_valid, y_valid, val_indices = data

    model = get_model(model_name,
                      embedding_matrix=embedings,
                      dropout_dense=0.4,
                      weight_decay=1e-4)
    callbacks = get_model_callbacks(save_dir=os.path.join(experiment_path, 'fold_%02d' % idx))

    model.fit(X_train, y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCHES,
              validation_data=(X_valid, y_valid),
              shuffle=True,
              callbacks=callbacks, verbose=1)

    model.load_weights(filepath=os.path.join(experiment_path, ('fold_%02d/model/model_weights.hdf5' % idx)))

    proba = model.predict(X_train, batch_size=BATCH_SIZE * 2)
    proba_val = model.predict(X_valid, batch_size=BATCH_SIZE * 2)
    proba_test = model.predict(x_test, batch_size=BATCH_SIZE * 2)

    models_roc.append(roc_auc_score(y_valid, proba_val))
    models_train_roc.append(roc_auc_score(y_train, proba))

    train_probas[val_indices] += proba_val
    test_probas += proba_test / 5.

    print('Train ROC AUC:\nMean: %f\nStd: %f\nMin: %f\nMax: %f\n\n' % (np.mean(models_train_roc),
                                                                       np.std(models_train_roc),
                                                                       np.min(models_train_roc),
                                                                       np.max(models_train_roc)))

    print('Val ROC AUC:\nMean: %f\nStd: %f\nMin: %f\nMax: %f\n\n' % (np.mean(models_roc),
                                                                     np.std(models_roc),
                                                                     np.min(models_roc),
                                                                     np.max(models_roc)))

  for i, cls_name in enumerate(CLASS_NAMES):
    train_submission[cls_name] = train_probas[:, i]
  train_submission.to_csv('./csv/train_%s.csv' % stamp, index=False)

  for i, cls_name in enumerate(CLASS_NAMES):
    submission[cls_name] = test_probas[:, i]
  submission.to_csv('./csv/submission_%s.csv' % stamp, index=False)
    def __init__(self, sequence_length):
        # load data first
        self.processor = learn.preprocessing.VocabularyProcessor.restore(
            "../temp/vocab")
        self.processor.max_document_length = sequence_length
        raw_data, raw_label = load_data("train")
        self.train_data = []
        self.train_label = []
        for rd, rl in zip(raw_data, raw_label):
            # for each task in data
            tmp_data = []
            tmp_label = []
            rd = list(self.processor.transform(rd))  # generator -> list
            for tmp_x, tmp_y in zip(rd, rl):
                tmp_x = tmp_x.tolist()
                if np.sum(tmp_x) != 0:
                    tmp_data.append(tmp_x)
                    tmp_label.append(tmp_y)
            self.train_data.append(tmp_data)
            self.train_label.append(tmp_label)
        del raw_data, raw_label
        print("load training data complete!")

        self.test_data = []
        self.test_label = []
        raw_data, raw_label = load_data("test")
        for rd, rl in zip(raw_data, raw_label):
            tmp_data = []
            tmp_label = []
            rd = list(self.processor.transform(rd))
            for tmp_x, tmp_y in zip(rd, rl):
                tmp_x = tmp_x.tolist()
                if np.sum(tmp_x) != 0:
                    tmp_data.append(tmp_x)
                    tmp_label.append(tmp_label)
            self.test_data.append(tmp_data)
            self.test_label.append(tmp_label)
        del raw_data, raw_label
        print("load test data complete!")
        self.embedding_matrix = self._embedding_matrix_initializer(
        ) if os.path.exists("../data/glove.6B/glove.6B.{}d.txt".format(
            params["global"]["embedding_size"])) else None

        print("read from embedding_matrix complete!")
Beispiel #11
0
def load_data(src, tar, root_dir):
    '''Train data and test data initialization'''

    folder_src = os.path.join(root_dir, src)
    folder_tar = os.path.join(root_dir, tar)

    source_loader = data_loader.load_data(folder_src, CFG['batch_size'], True,
                                          CFG['kwargs'])
    target_train_loader = data_loader.load_data(folder_tar, CFG['batch_size'],
                                                True, CFG['kwargs'])
    target_test_loader = data_loader.load_data(folder_tar, CFG['batch_size'],
                                               False, CFG['kwargs'])

    print(
        'source len: {0}, target train len: {1}, target test len: {2}'.format(
            len(source_loader.dataset), len(target_train_loader.dataset),
            len(target_test_loader.dataset)))

    return source_loader, target_train_loader, target_test_loader
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    root_name = './data'
    _, test_loader, _ = load_data(root_name, 128)

    # 読み込み
    model_path = 'model.pth'
    model = Net().to(device)
    model.load_state_dict(torch.load(model_path, map_location=device))
    print(model)

    # 評価
    ope = ModelOperation(model, device)
    ope.test_model(test_loader)
Beispiel #13
0
 def __init__(self, sequence_length):
     # load data first
     self.processor = learn.preprocessing.VocabularyProcessor.restore(
         "../temp/vocab")
     self.processor.max_document_length = sequence_length
     raw_data, raw_label = load_data("test")
     self.train_data = []
     self.train_label = []
     for rd, rl in zip(raw_data, raw_label):
         # for each task in data
         tmp_data = []
         tmp_label = []
         rd = list(self.processor.transform(rd))  # generator -> list
         for tmp_x, tmp_y in zip(rd, rl):
             tmp_x = tmp_x.tolist()
             if np.sum(tmp_x) != 0:
                 tmp_data.append(tmp_x)
                 tmp_label.append(tmp_y)
         self.train_data.append(tmp_data)
         self.train_label.append(tmp_label)
     del raw_data, raw_label
     print("load training data complete!")
Beispiel #14
0
def main():
    args = get_cli_arguments()

    if (args.checkpoint is not None) and (not os.path.exists(args.checkpoint)):
        sys.exit('Specified checkpoint cannot be found')

    if args.mode == 'train':
        if args.dataset != 'cityscapes':
            sys.exit("Model can only be trained on cityscapes dataset")

        dataset = load_data(args.path, args.dataset, resize=~args.no_resize)

        if args.subset:
            sampler = torch.utils.data.SubsetRandomSampler(np.arange(50))
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=args.batch_size, sampler=sampler)
        else:
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=args.batch_size, shuffle=True)

        model = UNet(num_classes=len(datasets.Cityscapes.classes),
                     pretrained=args.pretrained)

        if not args.pretrained:
            set_parameter_required_grad(model, True)

        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
        if (args.savedir is not None) and (not os.path.exists(args.savedir)):
            os.makedirs(args.savedir)
        train(model,
              dataloader,
              criterion,
              optimizer,
              num_epochs=args.epochs,
              checkpoint_path=args.checkpoint,
              save_path=args.savedir)
        return

    if args.mode == 'test':
        dataset = load_data(args.path,
                            args.dataset,
                            resize=~args.no_resize,
                            split='val')

        if args.subset:
            sampler = torch.utils.data.SubsetRandomSampler(np.arange(50))
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=args.batch_size, sampler=sampler)
        else:
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=args.batch_size, shuffle=True)

        model = UNet(num_classes=len(datasets.Cityscapes.classes),
                     pretrained=args.pretrained)
        validate(model, dataloader, args.checkpoint)

    if args.mode == 'activations':
        if args.model is None:
            sys.exit("Must specify model to use with --model argument")
        dataset = load_data(args.path, args.dataset, resize=~args.no_resize)
        if args.subset:
            sampler = torch.utils.data.SubsetRandomSampler(np.arange(50))
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=args.batch_size, sampler=sampler)
        else:
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=args.batch_size, shuffle=True)

        if args.model == 'unet':
            model = UNet(num_classes=len(datasets.Cityscapes.classes))
            if args.checkpoint:
                checkpoint = torch.load(
                    args.checkpoint, map_location=lambda storage, loc: storage)
                model.load_state_dict(checkpoint['model_state_dict'])
            else:
                print(
                    "NOTE: Getting activations for untrained network. Specified a pretrained model with the "
                    "--checkpoint argument.")
        elif args.model == 'vggmod':
            model = VGGmod()
        else:
            model = UNet(num_classes=len(datasets.Cityscapes.classes))
            if args.checkpoint:
                checkpoint = torch.load(args.checkpoint)
                model.load_state_dict(checkpoint['model_state_dict'])
            set_parameter_required_grad(model, True)
            retrieve_activations(model, dataloader, args.dataset)
            model = VGGmod()

        set_parameter_required_grad(model, True)

        retrieve_activations(model, dataloader, args.dataset)

    if args.mode == 'view_activations':
        file_1 = os.path.join(args.path, 'VGGmod_activations')
        file_2 = os.path.join(args.path, 'UNet_activations_matched')
        if not os.path.exists(file_1) or not os.path.exists(file_2):
            exit(
                "Could not load activations from " + args.path +
                ". If you have not generated activations for both UNet "
                "and VGG11, run instead with the \"--mode activations\" parameter."
            )

        activs1, activs2 = load_activations(file_1, file_2)
        visualize_batch(activs1,
                        activs2,
                        batch_num=args.batch_num,
                        start_layer=args.start_layer,
                        stop_layer=args.stop_layer)

    if args.mode == 'compare_activations':
        file_1 = os.path.join(args.path, 'VGGmod_activations')
        file_2 = os.path.join(args.path, 'UNet_activations')
        match_channels(file_1, file_2, args.type)

    if args.mode == 'view_max_activating':
        channel_vis_driver(args.model, args.checkpoint, args.path,
                           args.dataset, args.conv_layer, args.channels,
                           args.img_size, args.upscale_steps,
                           args.upscale_factor, args.learning_rate,
                           args.opt_steps, args.grid, args.path, args.verbose)
    args = arg_parser.parse_args()
    logs = logger.Logger(args)

    if args.GPU_to_use is not None:
        logs.write_to_log_file("Using GPU #" + str(args.GPU_to_use))

    (
        train_loader,
        valid_loader,
        test_loader,
        loc_max,
        loc_min,
        vel_max,
        vel_min,
    ) = data_loader.load_data(args)

    rel_rec, rel_send = utils.create_rel_rec_send(args, args.num_atoms)

    encoder, decoder, optimizer, scheduler, edge_probs = model_loader.load_model(
        args, loc_max, loc_min, vel_max, vel_min)

    logs.write_to_log_file(encoder)
    logs.write_to_log_file(decoder)

    if args.prior != 1:
        assert 0 <= args.prior <= 1, "args.prior not in the right range"
        prior = np.array([args.prior] + [(1 - args.prior) /
                                         (args.edge_types - 1)
                                         for _ in range(args.edge_types - 1)])
        logs.write_to_log_file("Using prior")
Beispiel #16
0
        ' Source Port', ' Destination Port', ' Flow Duration',
        'Total Length of Fwd Packets', ' Total Length of Bwd Packets',
        'Bwd Packet Length Max', ' Bwd Packet Length Min', 'Flow Bytes/s',
        ' Flow IAT Mean', ' Flow IAT Std', ' Flow IAT Max', ' Flow IAT Min',
        'Fwd IAT Total', ' Fwd IAT Mean', ' Fwd IAT Std', ' Fwd IAT Min',
        'Bwd IAT Total', ' Bwd IAT Mean', ' Bwd IAT Std', ' Bwd IAT Min',
        'Fwd PSH Flags', ' Bwd PSH Flags', ' Fwd URG Flags',
        ' Fwd Header Length', ' Bwd Packets/s', ' Packet Length Mean',
        ' ACK Flag Count', ' Down/Up Ratio', ' Avg Fwd Segment Size',
        ' Fwd Header Length.1', 'Fwd Avg Bytes/Bulk', ' Fwd Avg Packets/Bulk',
        ' Bwd Avg Bytes/Bulk', 'Bwd Avg Bulk Rate', 'Subflow Fwd Packets',
        ' Subflow Fwd Bytes', 'Init_Win_bytes_forward', ' act_data_pkt_fwd',
        ' Active Std', ' Active Min', ' Idle Max'
    ]
    #
    X, Y = load_data(input_file, features_selected, output_file)
    # output_file = '../original_data_no_sample/Wednesday-workingHours.pcap_ISCX_feature_selected.csv'
    # X, Y= load_data_from_files(output_file, sample_ratio=0.05, preprocess=True)
    # print('X.shape:', X.shape, ' Y.shape:', np.asarray(Y).shape)
    print('X[0]:', X[0])
    X = normalize_data(X, axis=0, low=-1, high=1, eps=1e-5)
    print('Normalized X[0]:', X[0])
    print('X.shape:', X.shape, ' Y.shape:', np.asarray(Y).shape)
    print('label:', Counter(Y))

    show_flg = True
    save_flg = True
    in_size = 41
    h_size = 64
    out_size = 1
    dtype = torch.float
Beispiel #17
0
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size


if __name__ == '__main__':

    # Initialize the target model for this run
    model, input_size = initialize_model(args.model_name,
                                         args.num_classes,
                                         feature_extract=True,
                                         use_pretrained=True)
    model.to(DEVICE)
    # Print the model we just instantiated
    print(model)

    data_loaders, dataset_sizes = load_data(input_size)
    print(dataset_sizes)

    model, model_saved_path = model_training(model, data_loaders,
                                             dataset_sizes)
    print('Finished Training')

    # test offline
    model_test(model_saved_path, data_loaders)
def main(arg):

    directory = Path('./saved_predictions/')
    directory.mkdir(exist_ok=True)
    directory = Path('./saved_models/')
    directory.mkdir(exist_ok=True)
    directory = Path('./training_checkpoints/')
    directory.mkdir(exist_ok=True)
    input_yx_size = tuple(args.input_yx_size)
    batch_size = args.batch_size
    epochs = args.epochs
    learning_rate = args.learning_rate
    num_test_samples = args.num_test_samples
    save_weights = args.save_weights
    every = args.every
    num_samples = args.num_samples
    save_train_prediction = args.save_train_prediction
    save_test_prediction = args.save_test_prediction
    verbose = args.verbose
    validation_ratio = args.validation_ratio
    y_axis_len, x_axis_len = input_yx_size
    decay = args.decay
    decay = args.decay
    load_weights = args.load_weights
    y_axis_len, x_axis_len = input_yx_size
    num_points = y_axis_len * x_axis_len
    is_flat_channel_in = args.is_flat_channel_in
    input_points = Input(shape=(num_points, 4))

    x = input_points
    x = Convolution1D(64, 1, activation='relu', input_shape=(num_points, 4))(x)
    x = BatchNormalization()(x)
    x = Convolution1D(128, 1, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Convolution1D(512, 1, activation='relu')(x)
    x = BatchNormalization()(x)
    x = MaxPooling1D(pool_size=num_points)(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(256, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(16,
              weights=[
                  np.zeros([256, 16]),
                  np.array([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
                            1]).astype(np.float32)
              ])(x)
    input_T = Reshape((4, 4))(x)

    # forward net
    g = Lambda(mat_mul, arguments={'B': input_T})(input_points)
    g = Convolution1D(64, 1, input_shape=(num_points, 3), activation='relu')(g)
    g = BatchNormalization()(g)
    g = Convolution1D(64, 1, input_shape=(num_points, 3), activation='relu')(g)
    g = BatchNormalization()(g)

    # feature transformation net
    f = Convolution1D(64, 1, activation='relu')(g)
    f = BatchNormalization()(f)
    f = Convolution1D(128, 1, activation='relu')(f)
    f = BatchNormalization()(f)
    f = Convolution1D(128, 1, activation='relu')(f)
    f = BatchNormalization()(f)
    f = MaxPooling1D(pool_size=num_points)(f)
    f = Dense(512, activation='relu')(f)
    f = BatchNormalization()(f)
    f = Dense(256, activation='relu')(f)
    f = BatchNormalization()(f)
    f = Dense(64 * 64,
              weights=[
                  np.zeros([256, 64 * 64]),
                  np.eye(64).flatten().astype(np.float32)
              ])(f)
    feature_T = Reshape((64, 64))(f)

    # forward net
    g = Lambda(mat_mul, arguments={'B': feature_T})(g)
    seg_part1 = g
    g = Convolution1D(64, 1, activation='relu')(g)
    g = BatchNormalization()(g)
    g = Convolution1D(32, 1, activation='relu')(g)
    g = BatchNormalization()(g)
    g = Convolution1D(32, 1, activation='relu')(g)
    g = BatchNormalization()(g)

    # global_feature
    global_feature = MaxPooling1D(pool_size=num_points)(g)
    global_feature = Lambda(exp_dim, arguments={'num_points':
                                                num_points})(global_feature)

    # point_net_seg
    c = concatenate([seg_part1, global_feature])
    """ c = Convolution1D(512, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(256, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 1, activation='relu')(c)
    c = BatchNormalization()(c) """
    c = Convolution1D(256, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(128, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(64, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(64, 4, activation='relu', strides=4)(c)
    c = BatchNormalization()(c)
    c = Convolution1D(32, 1, activation='relu')(c)
    c = BatchNormalization()(c)
    """ c = Convolution1D(128, 4, activation='relu',strides=4)(c)
    c = Convolution1D(64, 4, activation='relu',strides=4)(c)
    c = Convolution1D(32, 4, activation='relu',strides=4)(c)
    c = Convolution1D(16, 1, activation='relu')(c)
    c = Convolution1D(1, 1, activation='relu')(c) """
    #c = tf.keras.backend.squeeze(c,3);
    c = CuDNNLSTM(64, return_sequences=False)(c)
    #c =CuDNNLSTM(784, return_sequences=False))
    #c =CuDNNLSTM(256, return_sequences=False))

    #c = Reshape([16,16,1])(c)
    c = Reshape([8, 8, 1])(c)
    c = Conv2DTranspose(8, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(2, 2))(c)
    c = Conv2DTranspose(8, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(16, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(64, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(64, (3, 3), padding="valid", activation="relu")(c)
    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)

    #c =Dropout(0.4))

    c = Conv2DTranspose(128, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(2, 2))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(128, (3, 3), padding="valid", activation="relu")(c)

    #c =Dropout(0.4))
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(128, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(2, 2))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(128, (3, 3), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    #c =Dropout(0.4))
    #c =tf.keras.layers.BatchNormalization())
    c = Conv2DTranspose(64, (3, 3), padding="same", strides=(4, 2))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 3), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    #c =Dropout(0.4))
    c = Conv2DTranspose(32, (3, 3),
                        padding="same",
                        activation="relu",
                        strides=(1, 1))(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(32, (3, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(16, (1, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)
    c = Conv2DTranspose(8, (1, 1), padding="valid", activation="relu")(c)
    c = tf.keras.layers.BatchNormalization()(c)

    c = Conv2DTranspose(1, (1, 1), padding="valid")(c)
    """ c =Conv2DTranspose(4, (1,1),padding="same",activation="relu"))
    c =Conv2DTranspose(2, (1,1),padding="same",activation="relu"))
    #c =Dropout(0.4))
    c =Conv2DTranspose(1, (1,1),padding="same")) """
    prediction = tf.keras.layers.Reshape([512, 256])(c)
    """ c1 ,c2  = tf.split(c,[256,256],axis=1,name="split")
    complexNum = tf.dtypes.complex(
        c1,
        c2,
        name=None
    )

    complexNum =tf.signal.ifft2d(
        complexNum,
        name="IFFT"
    )
    real = tf.math.real(complexNum)
    imag = tf.math.imag(complexNum)

    con = concatenate([real,imag])

    prediction  =tf.keras.layers.Reshape([ 512, 256])(con)
    """
    # define model
    model = Model(inputs=input_points, outputs=prediction)
    opt = tf.keras.optimizers.Adam(lr=learning_rate, decay=decay)

    loss = tf.keras.losses.MeanSquaredError()
    mertric = ['mse']
    if args.loss is "MAE":
        loss = tf.keras.losses.MeanAbsoluteError()
        mertric = ['mae']

    model.compile(
        loss=loss,
        optimizer=opt,
        metrics=mertric,
    )

    model.summary()
    if load_weights:
        model.load_weights('./training_checkpoints/cp-best_loss.ckpt')

    #edit data_loader.py if you want to play with data
    input_ks, ground_truth = load_data(num_samples,
                                       is_flat_channel_in=is_flat_channel_in)

    input_ks = input_ks / np.max(input_ks)

    checkpoint_path = "./training_checkpoints/cp-{epoch:04d}.ckpt"
    checkpoint_dir = os.path.dirname(checkpoint_path)

    # Create checkpoint callback
    #do you want to save the model's wieghts? if so set this varaible to true

    cp_callback = []

    NAME = "NUFFT_NET"

    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
    cp_callback.append(tensorboard)
    if save_weights:
        cp_callback.append(
            tf.keras.callbacks.ModelCheckpoint(checkpoint_dir,
                                               save_weights_only=True,
                                               verbose=verbose,
                                               period=every))

    if args.is_train:
        model.fit(input_ks,
                  ground_truth,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_split=validation_ratio,
                  callbacks=cp_callback)

    if args.name_model is not "":
        model.save('./saved_mdoels/' + args.name_model)
    dict_name = './saved_predictions/'
    #return to image size
    x_axis_len = int(x_axis_len / 4)
    np.random.seed(int(time()))

    if save_train_prediction <= num_samples:
        rand_ix = np.random.randint(0, num_samples - 1, save_train_prediction)
        #kspace = np.zeros((save_train_prediction,
        #y_axis_len,input_ks[rand_ix].shape[1]))
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/inputs.npy", input_ks[rand_ix])
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_train_prediction)
        for i in range(save_train_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'prediction%d.npy' % (i + 1), output)

        input_ks, ground_truth = load_data(
            num_test_samples, 'test', is_flat_channel_in=is_flat_channel_in)

        input_ks = input_ks / np.max(input_ks)
    if args.is_eval:
        model.evaluate(input_ks,
                       ground_truth,
                       batch_size,
                       verbose,
                       callbacks=cp_callback)

    if save_test_prediction <= num_test_samples:
        rand_ix = np.random.randint(0, num_test_samples - 1,
                                    save_test_prediction)
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/test_inputs.npy", input_ks[rand_ix])
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_test_prediction)
        for i in range(save_test_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'test_prediction%d.npy' % (i + 1), output)
Beispiel #19
0
                    action='store_true',
                    default=500,
                    help='batch size')
parser.add_argument('--l2_regular',
                    action='store_true',
                    default=0.00001,
                    help='l2 regular')

parser.add_argument('--news_entity_num',
                    action='store_true',
                    default=20,
                    help='fix a news entity num to news_entity_num')
parser.add_argument('--negative_num',
                    action='store_true',
                    default=6,
                    help='negative sampling number')
parser.add_argument('--embedding_dim',
                    action='store_true',
                    default=90,
                    help='embedding dim for enity_embedding dv uv')
parser.add_argument('--layer_dim',
                    action='store_true',
                    default=128,
                    help='layer dim')

args = parser.parse_args()

data = load_data(args)

train_test(args, data)
def main_advertorch():

    data_loaders, class_to_idx, dataset_sizes = load_data(args.input_data,
                                                          args.batch_size,
                                                          train_flag=False,
                                                          kwargs=kwargs)
    print('length of dataset: {}'.format(dataset_sizes))

    if args.model_type == 0:
        # transfer learning based model
        from TransferNet import Transfer_Net
        model_target = Transfer_Net(num_class=10, base_net=args.base_net)
        model_target.load_state_dict(torch.load(args.input_model))
        model_target.to(DEVICE)
        model_target = model_target.predict

    elif args.model_type == 1:
        # normal DNN
        model_target = torch.load(args.target_model)
        model_target.to(DEVICE)
        model_target = model_target.eval()

    # SinglePixelAttack
    adversary = attacks.DDNL2Attack(
        model_target,
        nb_iter=100,
        gamma=0.05,
        init_norm=1.0,
        quantize=True,
        levels=256,
        clip_min=0.0,
        clip_max=1.0,
        targeted=False,
        loss_fn=nn.CrossEntropyLoss(reduction="sum"))
    # adversary = attacks.LinfPGDAttack(model_target, loss_fn=nn.CrossEntropyLoss(reduction="sum"),
    #        eps=0.05, nb_iter=40, eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False)

    running_corrects_adv_untargeted = 0

    if os.path.isdir(args.output_path):
        shutil.rmtree(args.output_path)

    for batch_idx, (inputs, labels) in enumerate(data_loaders):

        cln_data, true_label = inputs.to(DEVICE), labels.to(DEVICE)
        print()
        print('clean data shape: {}, true label: {}'.format(
            cln_data.shape, true_label))

        adv_untargeted = adversary.perturb(cln_data, true_label)

        # predict adversarial samples
        outputs = model_target(adv_untargeted.to(DEVICE))
        _, predicted = torch.max(outputs, 1)
        running_corrects_adv_untargeted += torch.sum(
            predicted == true_label.data)
        print('untargeted perturb predict label: ', predicted)

        # save adversarial images to local
        for idx, adver_seed in enumerate(adv_untargeted):
            for key, value in class_to_idx.items():
                if true_label[idx].item() == value:
                    adver_seed_dir = os.path.join(args.output_path, key)
                    if not os.path.isdir(adver_seed_dir):
                        os.makedirs(adver_seed_dir)

            adver_seed_path = os.path.join(
                adver_seed_dir,
                str(batch_idx) + '_' + str(idx) + '.jpg')
            torchvision.utils.save_image(adver_seed,
                                         adver_seed_path,
                                         normalize=True,
                                         scale_each=True)

    print('running_corrects_adver_untargeted: {}'.format(
        running_corrects_adv_untargeted))
Beispiel #21
0
def create_data(folder_path, file_name, label_val, **kwargs):
    # data = load_data(path.join(getcwd(), 'Data'), 'temp_causal')[0]
    data = load_data(folder_path, file_name)[0]
    labels = create_labels(len(data), label_val)
    return data, labels
        enc_output, enc_hidden = self.encoder(tokenized_result['input'], enc_hidden)
        dec_hidden = enc_hidden

        result = []

        for i in range(len(tokenized_result['input'])):
            cur_enc_output = enc_output[i: i + 1]
            cur_dec_hidden = dec_hidden[i: i + 1]
            int_ext = tokenized_result['input_extended'][i: i + 1]
            oov_len = tokenized_result['oov_len'][i: i + 1]
            oov_list =tokenized_result['oov_list'][i]

            cur_result = self.beam_predict_single_item(cur_enc_output, cur_dec_hidden, beam_size, alpha, return_best, int_ext, oov_list, oov_len)
            result.append(cur_result)

        return result


if __name__ == '__main__':
    embedding_matrix, word_index_dict = load_embedding_matrix()
    train_x, train_y, test_x = load_data()
    x = train_x[:600]
    y = train_y[:600]
    model = AttentionModel(embedding_matrix=embedding_matrix, word_index_dict=word_index_dict)
    model.fit(x, y)

    model.beam_predict(test_x[:2], beam_size=3)
    model.beam_predict(test_x[:2], beam_size=5)
    model.beam_predict(test_x[:2], beam_size=5, return_best=False)
    model.beam_predict(test_x[:2], beam_size=5, alpha=0.5, return_best=False)
Beispiel #23
0
def main(_):
    FLAGS = tf.app.flags.FLAGS

    # ========================================= parameter part begins ========================================== #
    Dx = FLAGS.Dx

    # --------------------- SSM flags --------------------- #
    # should q use true_X to sample? (useful for debugging)
    q_uses_true_X = FLAGS.q_uses_true_X

    # whether use input in q and f
    use_input = FLAGS.use_input

    # --------------------- printing and data saving params --------------------- #

    print_freq = FLAGS.print_freq

    if FLAGS.use_input:
        FLAGS.use_residual = False
    if FLAGS.use_2_q:
        FLAGS.q_uses_true_X = q_uses_true_X = False
    if FLAGS.flow_transition:
        FLAGS.use_input = use_input = False
    if FLAGS.TFS:
        FLAGS.use_input = use_input = False

    tf.set_random_seed(FLAGS.seed)
    np.random.seed(FLAGS.seed)

    # ============================================= dataset part ============================================= #
    # generate data from simulation
    if FLAGS.generateTrainingData:
        model = "fhn"
        hidden_train, hidden_test, obs_train, obs_test, input_train, input_test = \
            generate_dataset(FLAGS.n_train, FLAGS.n_test, FLAGS.time, model=model, Dy=FLAGS.Dy, lb=-2.5, ub=2.5)

    # load data from file
    else:
        hidden_train, hidden_test, obs_train, obs_test, input_train, input_test = \
            load_data(FLAGS.datadir + FLAGS.datadict, Dx, FLAGS.Di, FLAGS.isPython2, use_input, q_uses_true_X)
        FLAGS.n_train, FLAGS.n_test, FLAGS.time = obs_train.shape[
            0], obs_test.shape[0], obs_test.shape[1]

    # clip saving_num to avoid it > n_train or n_test
    FLAGS.MSE_steps = min(FLAGS.MSE_steps, FLAGS.time - 1)
    FLAGS.saving_num = saving_num = min(FLAGS.saving_num, FLAGS.n_train,
                                        FLAGS.n_test)

    print("finished preparing dataset")

    # ============================================== model part ============================================== #
    SSM_model = SSM(FLAGS)

    # SMC class to calculate loss
    SMC_train = SMC(SSM_model, FLAGS, name="log_ZSMC_train")

    # =========================================== data saving part =========================================== #
    # create dir to save results
    Experiment_params = {
        "np": FLAGS.n_particles,
        "t": FLAGS.time,
        "bs": FLAGS.batch_size,
        "lr": FLAGS.lr,
        "epoch": FLAGS.epoch,
        "seed": FLAGS.seed,
        "rslt_dir_name": FLAGS.rslt_dir_name
    }

    RLT_DIR = create_RLT_DIR(Experiment_params)
    save_experiment_param(RLT_DIR, FLAGS)
    print("RLT_DIR:", RLT_DIR)

    # ============================================= training part ============================================ #
    mytrainer = trainer(SSM_model, SMC_train, FLAGS)
    mytrainer.init_data_saving(RLT_DIR)

    history, log = mytrainer.train(obs_train, obs_test, hidden_train,
                                   hidden_test, input_train, input_test,
                                   print_freq)

    # ======================================== final data saving part ======================================== #
    with open(RLT_DIR + "history.json", "w") as f:
        json.dump(history, f, indent=4, cls=NumpyEncoder)

    Xs, y_hat = log["Xs"], log["y_hat"]
    Xs_val = mytrainer.evaluate(Xs, mytrainer.saving_feed_dict)
    y_hat_val = mytrainer.evaluate(y_hat, mytrainer.saving_feed_dict)
    print("finish evaluating training results")

    plot_training_data(RLT_DIR, hidden_train, obs_train, saving_num=saving_num)
    plot_y_hat(RLT_DIR, y_hat_val, obs_test, saving_num=saving_num)

    if Dx == 2:
        plot_fhn_results(RLT_DIR, Xs_val)
    if Dx == 3:
        plot_lorenz_results(RLT_DIR, Xs_val)

    testing_data_dict = {
        "hidden_test": hidden_test[0:saving_num],
        "obs_test": obs_test[0:saving_num]
    }
    learned_model_dict = {"Xs_val": Xs_val, "y_hat_val": y_hat_val}
    data_dict = {
        "testing_data_dict": testing_data_dict,
        "learned_model_dict": learned_model_dict
    }

    with open(RLT_DIR + "data.p", "wb") as f:
        pickle.dump(data_dict, f)

    plot_MSEs(RLT_DIR, history["MSE_trains"], history["MSE_tests"], print_freq)
    plot_R_square(RLT_DIR, history["R_square_trains"],
                  history["R_square_tests"], print_freq)
    plot_log_ZSMC(RLT_DIR, history["log_ZSMC_trains"],
                  history["log_ZSMC_tests"], print_freq)
            sentence_vecs.append(np.subtract(vec, sub))

        return sentence_vecs

    def generate_sentence_vectors(self, text):
        """

        :param text: Input text
        :return: A list of sentences in this text and sentence vectors of these sentences
        """
        sentence_list = self.split_into_sentences(text)
        sentences = []
        trans_sentences = []
        for sentence in sentence_list:
            trans_sentence = process_sentence(sentence)
            trans_sentence = trans_sentence.replace('<start>',
                                                    '').replace('<end>',
                                                                '').strip()
            if trans_sentence is not None and len(trans_sentence) > 0:
                sentences.append(sentence)
                trans_sentences.append(trans_sentence)
        return sentences, self.sentences_to_vecs(trans_sentences)


if __name__ == '__main__':
    train_X, train_Y, test_X = load_data(transform_data=False)
    sent_emd_generator = SentEmbGenerator()
    sentences, sent_vecs = sent_emd_generator.generate_sentence_vectors(
        train_X[0])
    print(sent_vecs)
def main(args):

    directory = Path('./saved_predictions/')
    directory.mkdir(exist_ok=True)
    directory = Path('./saved_models/')
    directory.mkdir(exist_ok=True)
    directory = Path('./training_checkpoints/')
    directory.mkdir(exist_ok=True)
    conv_layers = args.conv_layers
    lstm_layers = args.lstm_layers
    input_yx_size = tuple(args.input_yx_size)
    flatten = args.is_flatten_gt
    is_flat_channel_in = args.is_flat_channel_in
    #dense layers
    dense_layers = args.dense_layers

    #transpose convultional layers of the Decoder
    trans_conv_layers = args.trans_conv_layers
    padding = {}

    for ix in args.padding:
        padding[ix] = True
    #sizes of the kernels
    kernel_sizes = {}
    if len(args.kernel_sizes_decoder) % 3 != 0:
        sys.exit("Error input kernel size of decoder, the \
            input shoud be of this form: #index of layer #kernel width #kernel height"
                 )
    for ix in range(0, len(args.kernel_sizes_decoder), 3):
        i = args.kernel_sizes_decoder[ix]
        kernel_sizes[i] = (args.kernel_sizes_decoder[ix + 1],
                           args.kernel_sizes_decoder[ix + 2])

    #sizes of conv strides
    strides_decoder = {}

    if len(args.strides_decoder) % 3 != 0:
        sys.exit("Error input stride size of conv Layers, the \
            input shoud be of this form: #index of layer #stride width #stride height"
                 )
    for ix in range(0, len(args.strides_decoder), 3):
        i = args.strides_decoder[ix]
        strides_decoder[i] = (args.strides_decoder[ix + 1],
                              args.strides_decoder[ix + 2])

    #Upsampling window size
    upsampling_layers = {}

    if len(args.upsampling_layers) % 3 != 0:
        sys.exit("Error input window size of upsmapling Layers, the \
            input shoud be of this form: #index of layer #kernel width #kernel height"
                 )
    for ix in range(0, len(args.upsampling_layers), 3):
        i = args.upsampling_layers[ix]
        upsampling_layers[i] = (args.upsampling_layers[ix + 1],
                                args.upsampling_layers[ix + 2])

    #Encoder Architecture
    #sizes of the kernels
    kernel_sizes_encoder = {}

    if len(args.kernel_sizes_encoder) % 3 != 0:
        sys.exit("Error input kernel size of encoder, the \
            input shoud be of this form: #index of layer #kernel width #kernel height"
                 )
    for ix in range(0, len(args.kernel_sizes_encoder), 3):
        i = args.kernel_sizes_encoder[ix]
        kernel_sizes_encoder[i] = (args.kernel_sizes_encoder[ix + 1],
                                   args.kernel_sizes_encoder[ix + 2])

    #sizes of conv strides
    strides_encoder = {}
    if len(args.strides_encoder) % 3 != 0:
        sys.exit("Error input stride size of conv Layers, the \
            input shoud be of this form: #index of layer #stride width #stride height"
                 )
    for ix in range(0, len(args.strides_encoder), 3):
        i = args.strides_encoder[ix]
        strides_encoder[i] = (args.strides_encoder[ix + 1],
                              args.strides_encoder[ix + 2])

    #sizes of pooling windows
    pooling_layers = {}

    if len(args.pooling_layers) % 3 != 0:
        sys.exit("Error input window size of pooling layers, the \
            input shoud be of this form: #index of layer #window width #window height"
                 )
    for ix in range(0, len(args.pooling_layers), 3):
        i = args.pooling_layers[ix]
        pooling_layers[i] = (args.pooling_layers[ix + 1],
                             args.pooling_layers[ix + 2])

    #tuple for batch normalization, if the first tuple

    is_batchnorm = (args.is_batchnorm_dense, args.is_batchnorm_conv)
    dropout = args.dropout
    reg = args.reg
    flatten = args.is_flatten_gt

    #optimization settings
    is_CuDNNLSTM = args.is_CuDNNLSTM
    batch_size = args.batch_size
    epochs = args.epochs
    learning_rate = args.learning_rate
    num_test_samples = args.num_test_samples
    save_weights = args.save_weights
    load_weights = args.load_weights
    num_samples = args.num_samples
    save_train_prediction = args.save_train_prediction
    save_test_prediction = args.save_test_prediction
    verbose = args.verbose
    validation_ratio = args.validation_ratio
    decay = args.decay

    #initialize Encoder's arch
    """
    input_shape, conv_intermediate_dims,strides ={}, kernel_sizes={},pooling_layers={},
            is_batchnorm = False,dropout = 0.0,reg=0.0
    """
    encoderArch = [
        input_yx_size, conv_layers, strides_encoder, kernel_sizes_encoder,
        pooling_layers, is_batchnorm[1], dropout, reg
    ]
    #initialize Decoder's arch
    """
    dense_intermediate_dims,conv_intermediate_dims,input_shape,strides= {},kernel_sizes={},upsampling_layers ={},
            is_flatten = False, is_batchnorm = (False,False),dropout = 0.0,reg=0.0

    """
    decoderArch = [
        dense_layers, trans_conv_layers, lstm_layers[len(lstm_layers) - 1],
        strides_decoder, kernel_sizes, upsampling_layers, padding, flatten,
        is_batchnorm, dropout, reg
    ]

    #declare and initialize the network
    model = create_nufft_nn(encoderArch, lstm_layers, decoderArch,
                            input_yx_size, is_CuDNNLSTM)

    opt = tf.keras.optimizers.Adam(lr=learning_rate, decay=decay)

    loss = tf.keras.losses.MeanSquaredError()
    mertric = ['mse']
    if args.loss is "MAE":
        loss = tf.keras.losses.MeanAbsoluteError()
        mertric = ['mae']

    model.compile(
        loss=loss,
        optimizer=opt,
        metrics=mertric,
    )

    y_axis_len, x_axis_len = input_yx_size
    input_shape = (None, y_axis_len, x_axis_len)
    model.build(input_shape=input_shape)

    model.summary()
    if load_weights:
        model.load_weights('./training_checkpoints/cp-best_loss.ckpt')

    #edit data_loader.py if you want to play with data

    input_ks, ground_truth = load_data(num_samples,
                                       is_flatten=flatten,
                                       is_flat_channel_in=is_flat_channel_in)
    input_ks_max = np.max(input_ks)
    input_ks = input_ks / input_ks_max

    checkpoint_path = "training_checkpoints/cp-best_loss.ckpt"

    cp_callback = []

    NAME = "NUFFT_NET"

    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
    cp_callback.append(tensorboard)
    if save_weights:
        cp_callback.append(
            tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                               save_best_only=True,
                                               save_weights_only=True,
                                               verbose=verbose))
    if args.is_train:
        model.fit(input_ks,
                  ground_truth,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_split=validation_ratio,
                  callbacks=cp_callback)

    if args.name_model != "":
        print("Saving The Model.. ")
        model.save('./saved_models/' + args.name_model)

    dict_name = './saved_predictions/'
    #return to image size
    x_axis_len = int(x_axis_len / 4)
    np.random.seed(int(time()))

    if save_train_prediction <= num_samples:
        rand_ix = np.random.randint(0, num_samples - 1, save_train_prediction)

        kspace = input_ks[rand_ix]
        if args.save_input:

            np.save("./saved_predictions/inputs.npy",
                    input_ks[rand_ix] * input_ks_max)
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_train_prediction)
        for i in range(save_train_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            if flatten:
                output_gt = np.reshape(output_gt, (y_axis_len * 2, x_axis_len))
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'prediction%d.npy' % (i + 1), output)

        input_ks, ground_truth = load_data(
            num_test_samples,
            'test',
            is_flatten=flatten,
            is_flat_channel_in=is_flat_channel_in)
        input_ks_max = np.max(input_ks)
        input_ks = input_ks / input_ks_max
    if args.is_eval:
        model.evaluate(input_ks,
                       ground_truth,
                       batch_size,
                       verbose,
                       callbacks=cp_callback)

    if save_test_prediction <= num_test_samples:
        rand_ix = np.random.randint(0, num_test_samples - 1,
                                    save_test_prediction)
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/test_inputs.npy",
                    input_ks[rand_ix] * input_ks_max)
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_test_prediction)
        for i in range(save_test_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            if flatten:
                output_gt = np.reshape(output_gt, (y_axis_len * 2, x_axis_len))
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'test_prediction%d.npy' % (i + 1), output)
def main(args):
    directory = Path('./saved_predictions/')
    directory.mkdir(exist_ok=True)
    directory = Path('./saved_models/')
    directory.mkdir(exist_ok=True)
    directory = Path('./training_checkpoints/')
    directory.mkdir(exist_ok=True)
    #passed arguments
    input_yx_size = tuple(args.input_yx_size)
    batch_size = args.batch_size
    epochs = args.epochs
    learning_rate = args.learning_rate
    num_test_samples = args.num_test_samples
    save_weights = args.save_weights
    load_weights = args.load_weights
    every = args.every
    num_samples = args.num_samples
    save_train_prediction = args.save_train_prediction
    save_test_prediction = args.save_test_prediction
    verbose = args.verbose
    validation_ratio = args.validation_ratio
    y_axis_len, x_axis_len = input_yx_size
    val = input("Enter number of samples to load to GPUs: ")
    gpu_load = int(val)
    strategy = tf.distribute.MirroredStrategy()

    BATCH_SIZE_PER_REPLICA = batch_size
    BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync

    with strategy.scope():
        model = models.Sequential()

        model.add(
            Bidirectional(CuDNNLSTM(256, return_sequences=True),
                          input_shape=input_yx_size))
        model.add(
            CuDNNLSTM(256, return_sequences=True, input_shape=input_yx_size))
        model.add(tf.keras.layers.Reshape([256, 256, 1]))
        model.add(
            Conv2DTranspose(64, (3, 3),
                            padding="same",
                            activation="relu",
                            strides=(2, 1)))
        model.add(
            Conv2DTranspose(64, (3, 3),
                            padding="same",
                            activation="relu",
                            strides=(1, 1)))
        model.add(
            Conv2DTranspose(256, (1, 1), activation="relu", strides=(1, 1)))
        model.add(
            Conv2DTranspose(256, (1, 1), activation="relu", strides=(1, 1)))
        model.add(
            Conv2DTranspose(64, (1, 1), activation="relu", strides=(1, 1)))
        model.add(
            Conv2DTranspose(32, (1, 1), activation="relu", strides=(1, 1)))
        model.add(Conv2DTranspose(1, (1, 1), strides=(1, 1)))

        model.add(tf.keras.layers.Reshape([512, 256]))

        opt = tf.keras.optimizers.Adam(lr=learning_rate, decay=1e-6)

        loss = tf.keras.losses.MeanSquaredError()
        mertric = ['mse']
        if args.loss is "MAE":
            loss = tf.keras.losses.MeanAbsoluteError()
            mertric = ['mae']

        model.compile(
            loss=loss,
            optimizer=opt,
            metrics=mertric,
        )
    model.summary()
    if load_weights:
        model.load_weights('./training_checkpoints/cp-best_loss.ckpt')
    #edit data_loader.py if you want to play with data
    input_ks, ground_truth = load_data(num_samples)

    input_ks = input_ks / np.max(input_ks)

    checkpoint_path = "training_checkpoints/cp-best_loss.ckpt"

    cp_callback = []

    NAME = "NUFFT_NET"

    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
    cp_callback.append(tensorboard)
    if save_weights:
        cp_callback.append(
            tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                               save_best_only=True,
                                               save_weights_only=True,
                                               verbose=verbose))
    epochs_losses = []
    if args.is_train:
        for e in range(epochs):
            losses = []
            for bth, i in enumerate(range(0, num_samples, gpu_load)):
                #loss_batch = model.train_on_batch(input_ks[i:i+gpu_load],ground_truth[i:i+gpu_load])
                #losses.append(loss_batch)
                model.fit(input_ks[i:i + gpu_load],
                          ground_truth[i:i + gpu_load],
                          BATCH_SIZE,
                          verbose=verbose,
                          callbacks=cp_callback)
                #print("the %d'th batch in %d'th epoch  and is loss: %f"%(bth,e,loss_batch))
            #if i%every is 0:
            #model.save_weights("./training_checkpoints/cp-{}.ckpt".format(e))
            #avg_loss = sum(losses) / len(losses)
            #epochs_losses.append(avg_loss)
            #print("End of %d th Epoch and AVG loss is %f"%(e,avg_loss))
            print("End of Epoch {}".format(e))

    if args.name_model is not "":
        model.save('./saved_models/' + args.name_model)

    #____________________saving predicitons

    dict_name = './saved_predictions/'
    #return to image size
    x_axis_len = int(x_axis_len / 4)
    np.random.seed(int(time()))

    if save_train_prediction <= num_samples:
        rand_ix = np.random.randint(0, num_samples - 1, save_train_prediction)
        #kspace = np.zeros((save_train_prediction,
        #y_axis_len,input_ks[rand_ix].shape[1]))
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/inputs.npy", input_ks[rand_ix])
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_train_prediction)
        for i in range(save_train_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'prediction%d.npy' % (i + 1), output)

        input_ks, ground_truth = load_data(
            num_test_samples,
            'test',
            is_flatten=flatten,
            is_flat_channel_in=is_flat_channel_in)

        input_ks = input_ks / np.max(input_ks)
    if args.is_eval:
        model.evaluate(input_ks,
                       ground_truth,
                       batch_size,
                       verbose,
                       callbacks=cp_callback)

    if save_test_prediction <= num_test_samples:
        rand_ix = np.random.randint(0, num_test_samples - 1,
                                    save_test_prediction)
        #kspace = np.zeros((save_test_prediction,
        #y_axis_len,input_ks[rand_ix].shape[1]))
        kspace = input_ks[rand_ix]
        if args.save_input:
            np.save("./saved_predictions/test_inputs.npy", input_ks[rand_ix])
        ground_truth = ground_truth[rand_ix]
        preds = model.predict(kspace, batch_size=save_test_prediction)
        for i in range(save_test_prediction):

            output = np.reshape(preds[i], (y_axis_len * 2, x_axis_len))
            output = output * 255
            output[np.newaxis, ...]
            output_gt = ground_truth[i]
            output_gt[np.newaxis, ...]
            output = np.concatenate([output, output_gt], axis=0)
            np.save(dict_name + 'test_prediction%d.npy' % (i + 1), output)
Beispiel #27
0
def train_scratch(model_name):
    optimizer = getattr(keras.optimizers, opt.train.optimizer)()
    criterion = keras.losses.CategoricalCrossentropy()
    metric = keras.metrics.CategoricalAccuracy()
    # val_metric = keras.metrics.CategoricalAccuracy()

    lr_scheduler = keras.callbacks.ReduceLROnPlateau(
        monitor=opt.train.monitor,
        factor=opt.train.lr_factor,
        patience=opt.train.lr_patience,
        min_lr=opt.train.lr_min_lr,
        verbose=1)
    checkpoint = keras.callbacks.ModelCheckpoint(
        filepath='results/models/best_model.h5',
        save_best_only=False,
        monitor='val_loss',
        verbose=1)
    tensorboard = keras.callbacks.TensorBoard(
        log_dir='results/tensorboard_logs', update_freq='epoch')
    csv_logger = keras.callbacks.CSVLogger('results/logs/training.log')
    early_stop = keras.callbacks.EarlyStopping(
        monitor=opt.train.monitor,
        min_delta=opt.train.stop_min_delta,
        patience=opt.train.stop_patience,
        verbose=1,
        restore_best_weights=True)
    #callbacks = [lr_scheduler, tensorboard, csv_logger, early_stop]
    callbacks = [lr_scheduler, early_stop]
    for dataset_name in opt.dataset.test_dataset_names:
        # get data
        logger.info('============loading data============')
        train_data, test_data, input_shape, num_classes = load_data(
            opt, dataset_name)
        #lb = LabelBinarizer()
        #y_train_onehot = lb.fit_transform(y_train)
        logger.info(('===========Done==============='))
        if model_name != opt.model.name:
            opt.model.name = model_name
        # get model
        model = None
        if opt.model.name == 'TSCNet':
            model = TSCNet(num_classes, opt.model.num_layers)
        elif opt.model.name == 'ResNet':
            x, y = build_resnet(input_shape, 64, num_classes)
            model = keras.models.Model(inputs=x, outputs=y)
        elif opt.model.name == 'FCN':
            x, y = build_fcn(input_shape, num_classes)
            model = keras.models.Model(inputs=x, outputs=y)
        elif opt.model.name == 'ResNet10':
            x, y = build_resnet10(input_shape, num_classes)
            model = keras.models.Model(inputs=x, outputs=y)
        elif opt.model.name == 'ResNet18':
            x, y = build_resnet18(input_shape, num_classes)
            model = keras.models.Model(inputs=x, outputs=y)
        elif opt.model.name == 'ResNet34':
            x, y = build_resnet34(input_shape, num_classes)
            model = keras.models.Model(inputs=x, outputs=y)
        elif opt.model.name == 'ResNet50':
            x, y = build_resnet50(input_shape, num_classes)
            model = keras.models.Model(inputs=x, outputs=y)
        elif opt.model.name == "FCNLSTM":
            x, y = build_fcnlstm(input_shape, num_classes, num_cells=8)
            model = keras.models.Model(inputs=x, outputs=y)
        # summary model
        print('model builed!')
        if model:
            model.summary()
        solver = Solver(opt, model, dataset_name, num_classes)
        solver.fit(train_data=train_data,
                   test_data=test_data,
                   optimizer=optimizer,
                   criterion=criterion,
                   callbacks=callbacks,
                   metric=metric)
        solver.evaluate(test_data)
Beispiel #28
0
def link(model_name,
         dev_pred_mentions,
         test_pred_mentions,
         predict_log,
         batch_size=32,
         n_epoch=50,
         learning_rate=0.001,
         optimizer_type='adam',
         embed_type=None,
         embed_trainable=True,
         use_relative_pos=False,
         n_neg=1,
         omit_one_cand=True,
         callbacks_to_add=None,
         swa_type=None,
         predict_on_final_test=True,
         **kwargs):
    config = ModelConfig()
    config.model_name = model_name
    config.batch_size = batch_size
    config.n_epoch = n_epoch
    config.learning_rate = learning_rate
    config.optimizer = get_optimizer(optimizer_type, learning_rate)
    config.embed_type = embed_type
    if embed_type:
        config.embeddings = np.load(
            format_filename(PROCESSED_DATA_DIR,
                            EMBEDDING_MATRIX_TEMPLATE,
                            type=embed_type))
        config.embed_trainable = embed_trainable
    else:
        config.embeddings = None
        config.embed_trainable = True

    config.callbacks_to_add = callbacks_to_add or [
        'modelcheckpoint', 'earlystopping'
    ]

    config.vocab = pickle_load(
        format_filename(PROCESSED_DATA_DIR, VOCABULARY_TEMPLATE, level='char'))
    config.vocab_size = len(config.vocab) + 2
    config.mention_to_entity = pickle_load(
        format_filename(PROCESSED_DATA_DIR, MENTION_TO_ENTITY_FILENAME))
    config.entity_desc = pickle_load(
        format_filename(PROCESSED_DATA_DIR, ENTITY_DESC_FILENAME))

    config.exp_name = '{}_{}_{}_{}_{}_{}'.format(
        model_name, embed_type if embed_type else 'random',
        'tune' if embed_trainable else 'fix', batch_size, optimizer_type,
        learning_rate)
    config.use_relative_pos = use_relative_pos
    if config.use_relative_pos:
        config.exp_name += '_rel'
    config.n_neg = n_neg
    if config.n_neg > 1:
        config.exp_name += '_neg_{}'.format(config.n_neg)
    config.omit_one_cand = omit_one_cand
    if not config.omit_one_cand:
        config.exp_name += '_not_omit'
    if kwargs:
        config.exp_name += '_' + '_'.join(
            [str(k) + '_' + str(v) for k, v in kwargs.items()])
    callback_str = '_' + '_'.join(config.callbacks_to_add)
    callback_str = callback_str.replace('_modelcheckpoint',
                                        '').replace('_earlystopping', '')
    config.exp_name += callback_str

    # logger to log output of training process
    predict_log.update({
        'el_exp_name': config.exp_name,
        'el_batch_size': batch_size,
        'el_optimizer': optimizer_type,
        'el_epoch': n_epoch,
        'el_learning_rate': learning_rate,
        'el_other_params': kwargs
    })

    print('Logging Info - Experiment: %s' % config.exp_name)
    model = LinkModel(config, **kwargs)

    model_save_path = os.path.join(config.checkpoint_dir,
                                   '{}.hdf5'.format(config.exp_name))
    if not os.path.exists(model_save_path):
        raise FileNotFoundError(
            'Recognition model not exist: {}'.format(model_save_path))
    if swa_type is None:
        model.load_best_model()
    elif 'swa' in callbacks_to_add:
        model.load_swa_model(swa_type)
        predict_log['er_exp_name'] += '_{}'.format(swa_type)

    dev_data_type = 'dev'
    dev_data = load_data(dev_data_type)
    dev_text_data, dev_gold_mention_entities = [], []
    for data in dev_data:
        dev_text_data.append(data['text'])
        dev_gold_mention_entities.append(data['mention_data'])

    if predict_on_final_test:
        test_data_type = 'test_final'
    else:
        test_data_type = 'test'
    test_data = load_data(test_data_type)
    test_text_data = [data['text'] for data in test_data]

    if dev_pred_mentions is not None:
        print(
            'Logging Info - Evaluate over valid data based on predicted mention:'
        )
        r, p, f1 = model.evaluate(dev_text_data, dev_pred_mentions,
                                  dev_gold_mention_entities)
        dev_performance = 'dev_performance' if swa_type is None else '%s_dev_performance' % swa_type
        predict_log[dev_performance] = (r, p, f1)
    print('Logging Info - Generate submission for test data:')
    test_pred_mention_entities = model.predict(test_text_data,
                                               test_pred_mentions)
    test_submit_file = predict_log[
        'er_exp_name'] + '_' + config.exp_name + '_%s%ssubmit.json' % (
            swa_type + '_' if swa_type else '',
            'final_' if predict_on_final_test else '')
    submit_result(test_submit_file, test_data, test_pred_mention_entities)

    predict_log['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S',
                                             time.localtime())
    write_log(format_filename(LOG_DIR, PERFORMANCE_LOG, model_type='2step'),
              log=predict_log,
              mode='a')
    return predict_log
Beispiel #29
0
    def __init__(self, dataset):

        train_path = config.dataset_path + dataset + "/train_data_50000.json"
        dev_path = config.dataset_path + dataset + "/dev_data_5000.json"
        test_path = config.dataset_path + dataset + "/dev_data_5000.json"
        rel_dict_path = config.dataset_path + dataset + "/rel2id.json"

        # data process
        self.train_data, self.dev_data, self.test_data, self.id2rel, self.rel2id, self.num_rels = load_data(
            train_path, dev_path, test_path, rel_dict_path)
def main_foolbox():

    data_loaders, dataset_sizes, class_to_idx = load_data(args.test_data,
                                                          args.batch_size,
                                                          train_flag=False,
                                                          kwargs=kwargs)
    print('length of dataset: {}'.format(dataset_sizes))

    if args.model_type == 0:

        # transfer learning based model

        from TransferNet import Transfer_Net
        model_target = Transfer_Net(num_class=10, base_net=args.base_net)
        model_target.load_state_dict(torch.load(args.input_model))
        model_target.to(DEVICE)
        model_target = model_target.predict

    elif args.model_type == 1:
        # normal DNN
        model_target = torch.load(args.target_model)
        model_target.to(DEVICE)
        model_target = model_target.eval()

    fmodel = fb.PyTorchModel(model_target, bounds=(0, 1))
    epsilons = [
        0.0, 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.1,
        0.3, 0.5, 1.0
    ]

    attacks = [
        fb.attacks.L2RepeatedAdditiveGaussianNoiseAttack(),
        # fb.attacks.LinfDeepFoolAttack(steps=50, candidates=10, overshoot=0.02, loss='logits'),
    ]
    attack = attacks[0]

    running_corrects_adv_untargeted = 0

    if os.path.isdir(args.output_path):
        shutil.rmtree(args.output_path)

    for batch_idx, (inputs, labels) in enumerate(data_loaders):

        cln_data, true_label = inputs.to(DEVICE), labels.to(DEVICE)
        print()
        print('clean data shape: {}, true label: {}'.format(
            cln_data.shape, true_label))
        print()

        advs, _, success = attack(fmodel,
                                  cln_data,
                                  true_label,
                                  epsilons=epsilons)
        adv_images = advs[4].clone().detach().requires_grad_(True)

        # predict adversarial samples
        outputs = model_target(adv_images.to(DEVICE))
        _, predicted = torch.max(outputs, 1)
        running_corrects_adv_untargeted += torch.sum(
            predicted == true_label.data)
        print('perturbed data predict label: ', predicted)

        # save adversarial images to local
        for idx, adver_seed in enumerate(adv_images):
            for key, value in class_to_idx.items():
                if true_label[idx].item() == value:
                    adver_seed_dir = os.path.join(args.output_path, key)
                    if not os.path.isdir(adver_seed_dir):
                        os.makedirs(adver_seed_dir)

            adver_seed_path = os.path.join(
                adver_seed_dir,
                str(batch_idx) + '_' + str(idx) + '.jpg')
            torchvision.utils.save_image(adver_seed,
                                         adver_seed_path,
                                         normalize=True,
                                         scale_each=True)

    print('running_corrects_adver: {}'.format(running_corrects_adv_untargeted))