예제 #1
0
def test_HWC2CHW(plot=False):
    """
    Test HWC2CHW
    """
    logger.info("Test HWC2CHW")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()
    hwc2chw_op = c_vision.HWC2CHW()
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=hwc2chw_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=decode_op)

    image_transposed = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        image_transposed.append(item1["image"].copy())
        image.append(item2["image"].copy())

        # check if the shape of data is transposed correctly
        # transpose the original image from shape (H,W,C) to (C,H,W)
        mse = diff_mse(item1['image'], item2['image'].transpose(2, 0, 1))
        assert mse == 0
    if plot:
        visualize(image, image_transposed)
예제 #2
0
def test_random_grayscale_valid_prob(plot=False):
    """
    Test RandomGrayscale Op: valid input, expect to pass
    """
    logger.info("test_random_grayscale_valid_prob")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        # Note: prob is 1 so the output should always be grayscale images
        py_vision.RandomGrayscale(1),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms2 = [
        py_vision.Decode(),
        py_vision.ToTensor()
    ]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_gray = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_gray.append(image1)
        image.append(image2)
    if plot:
        visualize(image, image_gray)
예제 #3
0
def test_center_crop_op(height=375, width=375, plot=False):
    """
    Test CenterCrop
    """
    logger.info("Test CenterCrop")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
    decode_op = vision.Decode()
    # 3 images [375, 500] [600, 500] [512, 512]
    center_crop_op = vision.CenterCrop([height, width])
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=center_crop_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
    data2 = data2.map(input_columns=["image"], operations=decode_op)

    image_cropped = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image_cropped.append(item1["image"].copy())
        image.append(item2["image"].copy())
    if plot:
        visualize(image, image_cropped)
def main(vis=False):
    X, Y = util.load_dataset(
        '../kaggleData/sorted',
        ['mask_weared_incorrect', 'with_mask', 'without_mask'], 30)

    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)
    clf = LogisticRegression(random_state=0,
                             max_iter=1000,
                             multi_class='multinomial',
                             solver='lbfgs').fit(X_train, Y_train)

    # predictedTrain = clf.predict(X_train)
    # util.findF1Score(predictedTrain, Y_train, "On Train Set")
    accOnTrain = clf.score(X_train, Y_train)
    print("Acc on train: ", accOnTrain)

    # predictedTest = clf.predict(X_test)
    # util.findF1Score(predictedTest, Y_test, "On Test Set")
    accOnTest = clf.score(X_test, Y_test)
    print("Acc on test: ", accOnTest)

    if vis: util.visualize(X_train, Y_train)
예제 #5
0
def summary_thick_seg(thick_seg, writer, flag, step):
    image = thick_seg.planes[..., 0, 1:4]
    rho = thick_seg.planes[..., 0, 6:7]
    semantic = (thick_seg.planes[..., 0, 7] * args.num_classes).long()
    predict = torch.argmax(thick_seg.predict2d, dim=-1)[..., 0]

    writer.add_images(flag + '/image',
                      image,
                      global_step=step,
                      dataformats='NHWC')
    writer.add_images(flag + '/rho', rho, global_step=step, dataformats='NHWC')
    writer.add_images(flag + '/semantic',
                      util.visualize(semantic),
                      global_step=step,
                      dataformats='NHWC')
    writer.add_images(flag + '/predict',
                      util.visualize(predict),
                      global_step=step,
                      dataformats='NHWC')

    if thick_seg.labels2d is not None:
        labels = thick_seg.labels2d[..., 0]
        writer.add_images(flag + '/labels',
                          util.visualize(labels),
                          global_step=step,
                          dataformats='NHWC')
        writer.add_scalar(flag + '/os', thick_seg.oa, global_step=step)
        writer.add_scalar(flag + '/mean_iou',
                          thick_seg.mean_iou,
                          global_step=step)
        writer.add_histogram(flag + '/iou', thick_seg.iou, global_step=step)
예제 #6
0
def test_random_perspective_op(plot=False):
    """
    Test RandomPerspective in python transformations
    """
    logger.info("test_random_perspective_op")
    # define map operations
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomPerspective(),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)

    transforms2 = [
        py_vision.Decode(),
        py_vision.ToTensor()
    ]
    transform2 = py_vision.ComposeOp(transforms2)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_perspective = []
    image_original = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_perspective.append(image1)
        image_original.append(image2)
    if plot:
        visualize(image_original, image_perspective)
예제 #7
0
def test_random_crop_op_c(plot=False):
    """
    Test RandomCrop Op in c transforms
    """
    logger.info("test_random_crop_op_c")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
    decode_op = c_vision.Decode()

    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=random_crop_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=decode_op)

    image_cropped = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = item1["image"]
        image2 = item2["image"]
        image_cropped.append(image1)
        image.append(image2)
    if plot:
        visualize(image, image_cropped)
def main(args):
    random.seed(229)
    # load hyperparameters
    json_path = os.path.join(args.model_dir, "params.json")
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = deep_net_utils.Params(json_path)

    data_container = load_dataset(args, params)
    X_train, Y_train = data_container['train']
    X_val, Y_val = data_container['val']
    X_test, Y_test = data_container['test']

    clf = LogisticRegression(random_state=0,
                             max_iter=params.iter,
                             multi_class='multinomial',
                             solver='lbfgs').fit(X_train, Y_train)

    predicted_train = clf.predict(X_train)
    predicted_val = clf.predict(X_val)

    train_confus_save_path = os.path.join(args.model_dir,
                                          "confus_f1_train.json")
    val_confus_save_path = os.path.join(args.model_dir, "confus_f1_val.json")

    util.compute_and_save_f1(predicted_train, Y_train, train_confus_save_path)
    util.compute_and_save_f1(predicted_val, Y_val, val_confus_save_path)

    train_accur = clf.score(X_train, Y_train)
    print("Acc on train: ", train_accur)

    val_accur = clf.score(X_val, Y_val)
    print("Acc on validation: ", val_accur)

    if args.vis: util.visualize(X_train, Y_train)
예제 #9
0
def test_random_crop_op_py(plot=False):
    """
    Test RandomCrop op in py transforms
    """
    logger.info("test_random_crop_op_py")
    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    # Second dataset
    # Second dataset for comparison
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms2 = [
        py_vision.Decode(),
        py_vision.ToTensor()
    ]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    crop_images = []
    original_images = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        crop = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        crop_images.append(crop)
        original_images.append(original)
    if plot:
        visualize(original_images, crop_images)
예제 #10
0
def test_random_crop_comp(plot=False):
    """
    Test RandomCrop and compare between python and c image augmentation
    """
    logger.info("Test RandomCrop with c_transform and py_transform comparison")
    cropped_size = 512

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    random_crop_op = c_vision.RandomCrop(cropped_size)
    decode_op = c_vision.Decode()
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=random_crop_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomCrop(cropped_size),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_c_cropped = []
    image_py_cropped = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_c_cropped.append(c_image)
        image_py_cropped.append(py_image)
    if plot:
        visualize(image_c_cropped, image_py_cropped)
예제 #11
0
def test_center_crop_comp(height=375, width=375, plot=False):
    """
    Test CenterCrop between python and c image augmentation
    """
    logger.info("Test CenterCrop")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = vision.Decode()
    center_crop_op = vision.CenterCrop([height, width])
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=center_crop_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.CenterCrop([height, width]),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_cropped = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        # Note: The images aren't exactly the same due to rounding error
        assert diff_mse(py_image, c_image) < 0.001
        image_cropped.append(c_image.copy())
        image.append(py_image.copy())
    if plot:
        visualize(image, image_cropped)
예제 #12
0
    def train(self, model, train_dataloader, eval_dataloader, val_dict):
        device = self.device
        model.to(device)

        optimizer = AdamW(model.parameters(), lr=self.lr)
        global_idx = 0
        best_scores = {'F1': -1.0, 'EM': -1.0}
        tensorboard_writer = SummaryWriter(self.save_dir)

        for epoch_num in range(self.num_epochs):
            self.log.info(f'Epoch: {epoch_num}')
            with torch.enable_grad(), tqdm(total=len(train_dataloader.dataset),
                                           position=0,
                                           leave=True) as progress_bar:
                for batch in train_dataloader:
                    optimizer.zero_grad()
                    model.train()
                    input_ids = batch['input_ids'].to(device)
                    attention_mask = batch['attention_mask'].to(device)
                    start_positions = batch['start_positions'].to(device)
                    end_positions = batch['end_positions'].to(device)
                    outputs = model(input_ids,
                                    attention_mask=attention_mask,
                                    start_positions=start_positions,
                                    end_positions=end_positions)
                    loss = outputs[0]
                    loss.backward()

                    optimizer.step()

                    progress_bar.update(len(input_ids))
                    progress_bar.set_postfix(epoch=epoch_num, NLL=loss.item())
                    tensorboard_writer.add_scalar('train/NLL', loss.item(),
                                                  global_idx)
                    if (global_idx % self.eval_every) == 0:
                        self.log.info(f'Evaluating at step {global_idx}...')
                        preds, curr_score = self.evaluate(model,
                                                          eval_dataloader,
                                                          val_dict,
                                                          return_preds=True)
                        results_str = ', '.join(f'{k}: {v:05.2f}'
                                                for k, v in curr_score.items())
                        self.log.info('Visualizing in TensorBoard...')
                        for k, v in curr_score.items():
                            tensorboard_writer.add_scalar(
                                f'val/{k}', v, global_idx)
                        self.log.info(f'Eval {results_str}')
                        if self.visualize_predictions:
                            util.visualize(tensorboard_writer,
                                           pred_dict=preds,
                                           gold_dict=val_dict,
                                           step=global_idx,
                                           split='val',
                                           num_visuals=self.num_visuals)
                        if curr_score['F1'] >= best_scores['F1']:
                            best_scores = curr_score
                            self.save(model)
                    global_idx += 1
        return best_scores
예제 #13
0
def test_linear_transformation_op(plot=False):
    """
    Test LinearTransformation op: verify if images transform correctly
    """
    logger.info("test_linear_transformation_01")

    # Initialize parameters
    height = 50
    weight = 50
    dim = 3 * height * weight
    transformation_matrix = np.eye(dim)
    mean_vector = np.zeros(dim)

    # Define operations
    transforms = [
        py_vision.Decode(),
        py_vision.CenterCrop([height, weight]),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())
    # Note: if transformation matrix is diagonal matrix with all 1 in diagonal,
    #       the output matrix in expected to be the same as the input matrix.
    data1 = data1.map(input_columns=["image"],
                      operations=py_vision.LinearTransformation(
                          transformation_matrix, mean_vector))

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_transformed = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_transformed.append(image1)
        image.append(image2)

        mse = diff_mse(image1, image2)
        assert mse == 0
    if plot:
        visualize(image, image_transformed)
예제 #14
0
def test_five_crop_op(plot=False):
    """
    Test FiveCrop
    """
    logger.info("test_five_crop")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms_1 = [
        vision.Decode(),
        vision.ToTensor(),
    ]
    transform_1 = vision.ComposeOp(transforms_1)
    data1 = data1.map(input_columns=["image"], operations=transform_1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms_2 = [
        vision.Decode(),
        vision.FiveCrop(200),
        lambda images: np.stack([vision.ToTensor()(image)
                                 for image in images])  # 4D stack of 5 images
    ]
    transform_2 = vision.ComposeOp(transforms_2)
    data2 = data2.map(input_columns=["image"], operations=transform_2())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_2 = item2["image"]

        logger.info("shape of image_1: {}".format(image_1.shape))
        logger.info("shape of image_2: {}".format(image_2.shape))

        logger.info("dtype of image_1: {}".format(image_1.dtype))
        logger.info("dtype of image_2: {}".format(image_2.dtype))
        if plot:
            visualize(np.array([image_1] * 10),
                      (image_2 * 255).astype(np.uint8).transpose(0, 2, 3, 1))

        # The output data should be of a 4D tensor shape, a stack of 5 images.
        assert len(image_2.shape) == 4
        assert image_2.shape[0] == 5
예제 #15
0
def util_test_ten_crop(crop_size, vertical_flip=False, plot=False):
    """
    Utility function for testing TenCrop. Input arguments are given by other tests
    """
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms_1 = [
        vision.Decode(),
        vision.ToTensor(),
    ]
    transform_1 = vision.ComposeOp(transforms_1)
    data1 = data1.map(input_columns=["image"], operations=transform_1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms_2 = [
        vision.Decode(),
        vision.TenCrop(crop_size, use_vertical_flip=vertical_flip),
        lambda images: np.stack([vision.ToTensor()(image)
                                 for image in images])  # 4D stack of 10 images
    ]
    transform_2 = vision.ComposeOp(transforms_2)
    data2 = data2.map(input_columns=["image"], operations=transform_2())
    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_2 = item2["image"]

        logger.info("shape of image_1: {}".format(image_1.shape))
        logger.info("shape of image_2: {}".format(image_2.shape))

        logger.info("dtype of image_1: {}".format(image_1.dtype))
        logger.info("dtype of image_2: {}".format(image_2.dtype))

        if plot:
            visualize(np.array([image_1] * 10),
                      (image_2 * 255).astype(np.uint8).transpose(0, 2, 3, 1))

        # The output data should be of a 4D tensor shape, a stack of 10 images.
        assert len(image_2.shape) == 4
        assert image_2.shape[0] == 10
예제 #16
0
def test_random_choice_comp(plot=False):
    """
    Test RandomChoice and compare with single CenterCrop results
    """
    logger.info("test_random_choice_comp")
    # define map operations
    transforms_list = [py_vision.CenterCrop(64)]
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomChoice(transforms_list),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)

    transforms2 = [
        py_vision.Decode(),
        py_vision.CenterCrop(64),
        py_vision.ToTensor()
    ]
    transform2 = py_vision.ComposeOp(transforms2)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_choice = []
    image_original = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_choice.append(image1)
        image_original.append(image2)

        mse = diff_mse(image1, image2)
        assert mse == 0
    if plot:
        visualize(image_original, image_choice)
def test_random_crop_and_resize_op_py(plot=False):
    """
    Test RandomCropAndResize op in py transforms
    """
    logger.info("test_random_crop_and_resize_op_py")
    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    # With these inputs we expect the code to crop the whole image
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    # Second dataset
    # Second dataset for comparison
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())
    num_iter = 0
    crop_and_resize_images = []
    original_images = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        original = cv2.resize(original, (512, 256))
        mse = diff_mse(crop_and_resize, original)
        # Due to rounding error the mse for Python is not exactly 0
        assert mse <= 0.05
        logger.info("random_crop_and_resize_op_{}, mse: {}".format(
            num_iter + 1, mse))
        num_iter += 1
        crop_and_resize_images.append(crop_and_resize)
        original_images.append(original)
    if plot:
        visualize(original_images, crop_and_resize_images)
예제 #18
0
def test_HWC2CHW_comp(plot=False):
    """
    Test HWC2CHW between python and c image augmentation
    """
    logger.info("Test HWC2CHW with c_transform and py_transform comparison")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    hwc2chw_op = c_vision.HWC2CHW()
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=hwc2chw_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.ToTensor(),
        py_vision.HWC2CHW()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_c_transposed = []
    image_py_transposed = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        # compare images between that applying c_transform and py_transform
        mse = diff_mse(py_image, c_image)
        # the images aren't exactly the same due to rounding error
        assert mse < 0.001

        image_c_transposed.append(item1["image"].copy())
        image_py_transposed.append(item2["image"].copy())

    if plot:
        visualize(image_c_transposed, image_py_transposed)
def test_random_vertical_comp(plot=False):
    """
    Test test_random_vertical_flip and compare between python and c image augmentation ops
    """
    logger.info("test_random_vertical_comp")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    # Note: The image must be flipped if prob is set to be 1
    random_horizontal_op = c_vision.RandomVerticalFlip(1)
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        # Note: The image must be flipped if prob is set to be 1
        py_vision.RandomVerticalFlip(1),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    images_list_c = []
    images_list_py = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image_c = item1["image"]
        image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        images_list_c.append(image_c)
        images_list_py.append(image_py)

        # Check if the output images are the same
        mse = diff_mse(image_c, image_py)
        assert mse < 0.001
    if plot:
        visualize(images_list_c, images_list_py)
def train(args, params):
    random.seed(229)

    # Check model_type is set (if 'model_type' is 'svm', 'kernel' is also set)
    assert (hasattr(params, 'model_type'))
    if params.model_type == 'svm': assert (hasattr(params, 'kernel'))

    data_container = util.load_dataset_for_split(args, params)
    X_train, Y_train = data_container['train']
    X_val, Y_val = data_container['val']
    X_test, Y_test = data_container['test']

    if args.verbose:
        print(
            "We have {}, {}, and {} training examples for classes 0, 1, 2 respectively."
            .format(np.sum(np.where(Y_train == 0, 1, 0)),
                    np.sum(np.where(Y_train == 1, 1, 0)),
                    np.sum(np.where(Y_train == 2, 1, 0))))

    model = fit_softmax_or_svm(X_train, Y_train, params)

    model_file_path = os.path.join(args.model_dir, "finalized_model.sav")
    pickle.dump(model, open(model_file_path, 'wb'))

    predicted_train = model.predict(X_train)
    predicted_val = model.predict(X_val)

    train_confus_save_path = os.path.join(args.model_dir,
                                          "confus_f1_train.json")
    val_confus_save_path = os.path.join(args.model_dir, "confus_f1_val.json")

    util.compute_and_save_f1(predicted_train, Y_train, train_confus_save_path)
    util.compute_and_save_f1(predicted_val, Y_val, val_confus_save_path)
    if params.model_type == 'softmax' and args.verbose:
        print("Model converged in {} iterations.".format(model.n_iter_))

    train_accur = model.score(X_train, Y_train)
    print("Acc on train: ", train_accur)

    val_accur = model.score(X_val, Y_val)
    print("Acc on validation: ", val_accur)

    if args.vis: util.visualize(X_train, Y_train)
def test_random_crop_and_resize_op_c(plot=False):
    """
    Test RandomCropAndResize op in c transforms
    """
    logger.info("test_random_crop_and_resize_op_c")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    # With these inputs we expect the code to crop the whole image
    random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2),
                                                           (1, 3))
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"],
                      operations=random_crop_and_resize_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=decode_op)
    num_iter = 0
    crop_and_resize_images = []
    original_images = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        crop_and_resize = item1["image"]
        original = item2["image"]
        # Note: resize the original image with the same size as the one applied RandomResizedCrop()
        original = cv2.resize(original, (512, 256))
        mse = diff_mse(crop_and_resize, original)
        assert mse == 0
        logger.info("random_crop_and_resize_op_{}, mse: {}".format(
            num_iter + 1, mse))
        num_iter += 1
        crop_and_resize_images.append(crop_and_resize)
        original_images.append(original)
    if plot:
        visualize(original_images, crop_and_resize_images)
예제 #22
0
def main():
    X, Y = util.load_dataset('../data/small/', ['incorrect', 'correct'], 96)

    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)
    clf = LogisticRegression(random_state=0,
                             max_iter=1000).fit(X_train, Y_train)
    # clf = SGDClassifier(loss='log', max_iter=10000).fit(X_train, Y_train)

    predictedTrain = clf.predict(X_train)
    util.findF1Score(predictedTrain, Y_train, "On Train Set")
    accOnTrain = clf.score(X_train, Y_train)
    print("Acc on train: ", accOnTrain)

    predictedTest = clf.predict(X_test)
    util.findF1Score(predictedTest, Y_test, "On Test Set")
    accOnTest = clf.score(X_test, Y_test)
    print("Acc on test: ", accOnTest)

    util.visualize(X_train, Y_train)
예제 #23
0
def main(vis = False):
    X, Y = util.load_dataset('../FinalPhotosData', ['mask_weared_incorrect', 'with_mask', 'without_mask'], 224)

    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
    svm = SVC(kernel='rbf', probability=True, random_state=42)

    # fit model
    svm.fit(X_train, Y_train)

    # predictedTrain = clf.predict(X_train)
    # util.findF1Score(predictedTrain, Y_train, "On Train Set")
    Y_pred = svm.predict(X_test)

    # calculate accuracy
    accuracy = accuracy_score(Y_test, Y_pred)
    print('Model accuracy is: ', accuracy)

    # # predictedTest = clf.predict(X_test)
    # # util.findF1Score(predictedTest, Y_test, "On Test Set")
    # accOnTest = clf.score(X_test, Y_test)
    # print("Acc on test: ", accOnTest)

    if vis: util.visualize(X_train, Y_train)
예제 #24
0
def main():
    print("Analyzer has started..", flush=True)
    if not os.path.exists(DATA_FOLDER_PATH):
        os.mkdir(DATA_FOLDER_PATH)

    if os.path.exists(DATA_ANALYZER_PATH):
        with open(DATA_ANALYZER_PATH, 'r') as f:
            data_analyzer = json.load(f)
    else:
        data_analyzer = data_template

    data_analyzer = data_template

    data_analyzer = addict.Dict(data_analyzer)

    while True:
        t0 = time.time()

        last_id_processed = data_analyzer['last_id_processed']
        user_answers = requests.get(
            f"{URL_DATA_DISTRIBUTOR}/user_answers/{last_id_processed}").json()

        print(user_answers)
        if len(user_answers):
            new_data_analyzer = parseUserAnswers(user_answers,
                                                 URL_DATA_DISTRIBUTOR)
            data_analyzer = updateAnalyzerDict(data_analyzer,
                                               new_data_analyzer)

            visualize(data_analyzer, ANALYZE_PATH)

            with open(DATA_ANALYZER_PATH, 'w') as f:
                json.dump(data_analyzer, f)

        dt = time.time() - t0
        print(f"Analyzer took {dt} seconds\n", flush=True)
        time.sleep(SLEEP_TIME - dt)
예제 #25
0
 def print_net (self, epoch, display_flag = True ):
     # saving down true images.    
     if self.main_img_visual is False:          
         imgs = self.train_set_x.reshape((self.train_set_x.shape[0].eval(),self.height,self.width,self.channels))                                 
         imgs = imgs.eval()[self.visualize_ind]
         loc_im = '../visuals/images/image_'
         imgs = util.visualize(imgs, prefix = loc_im, is_color = self.color_filter if self.channels == 3 else False)    
     self.main_img_visual = True                
     # visualizing activities.
     activity_now = self.activities(0)     
     bar = progressbar.ProgressBar(maxval=len(self.nkerns), \
                     widgets=[progressbar.AnimatedMarker(), \
                     ' visualizing ', 
                     ' ', progressbar.Percentage(), \
                     ' ',progressbar.ETA(), \
                     ]).start()        
     for m in xrange(len(self.nkerns)):   #For each layer 
         loc_ac = '../visuals/activities/layer_' + str(m) + "/epoch_" + str(epoch)
         if not os.path.exists(loc_ac):   
             os.makedirs(loc_ac)
         loc_ac = loc_ac + "/filter_"
         current_activity = activity_now[m]
         current_activity = current_activity[self.visualize_ind]                            
         imgs = util.visualize(current_activity, loc_ac, is_color = False)
         
         current_weights = self.ConvLayers.weights[m]    # for each layer       
         loc_we = '../visuals/filters/layer_' + str(m) + "/epoch_" + str(epoch)
         if not os.path.exists(loc_we):   
             os.makedirs(loc_we)
         loc_we = loc_we + "/filter_"
         if len(current_weights.shape.eval()) == 5:
             imgs = util.visualize(numpy.squeeze(current_weights.dimshuffle(0,3,4,1,2).eval()), prefix = loc_we, is_color = self.color_filter)
         else:   
             imgs = util.visualize(current_weights.dimshuffle(0,2,3,1).eval(), prefix = loc_we, is_color = self.color_filter)            
         bar.update(m+1)
     bar.finish()
예제 #26
0
def main(args):
    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    models = {}

    if args.use_ensemble:

        total_models = 0
        for model_name in ['bidaf', 'bidafextra', 'fusionnet']:

            models_list = []

            for model_file in glob.glob(
                    f'{args.load_path}/{model_name}-*/{args.ensemble_models}'):

                # Get model
                log.info('Building model...')
                if model_name == 'bidaf':
                    model = BiDAF(word_vectors=word_vectors,
                                  hidden_size=args.hidden_size)
                elif model_name == 'bidafextra':
                    model = BiDAFExtra(word_vectors=word_vectors, args=args)
                elif model_name == 'fusionnet':
                    model = FusionNet(word_vectors=word_vectors, args=args)

                model = nn.DataParallel(model, gpu_ids)
                log.info(f'Loading checkpoint from {model_file}...')
                model = util.load_model(model,
                                        model_file,
                                        gpu_ids,
                                        return_step=False)

                # Load each model on CPU (have plenty of RAM ...)
                model = model.cpu()
                model.eval()

                models_list.append(model)

            models[model_name] = models_list

            total_models += len(models_list)

        log.info(f'Using an ensemble of {total_models} models')

    else:

        device, gpu_ids = util.get_available_devices()

        # Get model
        log.info('Building model...')
        if args.model == 'bidaf':
            model = BiDAF(word_vectors=word_vectors,
                          hidden_size=args.hidden_size)
        elif args.model == 'bidafextra':
            model = BiDAFExtra(word_vectors=word_vectors, args=args)
        elif args.model == 'fusionnet':
            model = FusionNet(word_vectors=word_vectors, args=args)

        model = nn.DataParallel(model, gpu_ids)
        log.info(f'Loading checkpoint from {args.load_path}...')
        model = util.load_model(model,
                                args.load_path,
                                gpu_ids,
                                return_step=False)
        model = model.to(device)
        model.eval()

        models[args.model] = [model]

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)[f'{args.split}_record_file']
    dataset = SQuAD(record_file, args)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    # Evaluate
    log.info(f'Evaluating on {args.split} split...')
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions for TensorBoard
    sub_dict = {}  # Predictions for submission
    eval_file = vars(args)[f'{args.split}_eval_file']
    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)
    with torch.no_grad(), \
            tqdm(total=len(dataset)) as progress_bar:
        for cw_idxs, cc_idxs, qw_idxs, qc_idxs, cw_pos, cw_ner, cw_freq, cqw_extra, y1, y2, ids in data_loader:
            # Setup for forward
            cw_idxs = cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            batch_size = cw_idxs.size(0)

            p1s = []
            p2s = []

            for model_name in models:
                for model in models[model_name]:
                    # Move model to GPU to evaluate
                    model = model.to(device)

                    # Forward
                    if model_name == 'bidaf':
                        log_p1, log_p2 = model.to(device)(cw_idxs, qw_idxs)
                    else:
                        log_p1, log_p2 = model.to(device)(cw_idxs, qw_idxs,
                                                          cw_pos, cw_ner,
                                                          cw_freq, cqw_extra)

                    log_p1, log_p2 = log_p1.cpu(), log_p2.cpu()

                    if not args.use_ensemble:
                        y1, y2 = y1.to(device), y2.to(device)
                        log_p1, log_p2 = log_p1.to(device), log_p2.to(device)

                        loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                        nll_meter.update(loss.item(), batch_size)

                    # Move model back to CPU to release GPU memory
                    model = model.cpu()

                    # Get F1 and EM scores
                    p1, p2 = log_p1.exp().unsqueeze(
                        -1).cpu(), log_p2.exp().unsqueeze(-1).cpu()
                    p1s.append(p1), p2s.append(p2)

            best_ps = torch.max(
                torch.cat([
                    torch.cat(p1s, -1).unsqueeze(-1),
                    torch.cat(p2s, -1).unsqueeze(-1)
                ], -1), -2)[0]

            p1, p2 = best_ps[:, :, 0], best_ps[:, :, 1]
            starts, ends = util.discretize(p1, p2, args.max_ans_len,
                                           args.use_squad_v2)

            # Log info
            progress_bar.update(batch_size)
            if args.split != 'test':
                # No labels for the test set, so NLL would be invalid
                progress_bar.set_postfix(NLL=nll_meter.avg)

            idx2pred, uuid2pred = util.convert_tokens(gold_dict, ids.tolist(),
                                                      starts.tolist(),
                                                      ends.tolist(),
                                                      args.use_squad_v2)
            pred_dict.update(idx2pred)
            sub_dict.update(uuid2pred)

    # Log results (except for test set, since it does not come with labels)
    if args.split != 'test':
        results = util.eval_dicts(gold_dict, pred_dict, args.use_squad_v2)
        results_list = [('NLL', nll_meter.avg), ('F1', results['F1']),
                        ('EM', results['EM'])]
        if args.use_squad_v2:
            results_list.append(('AvNA', results['AvNA']))
        results = OrderedDict(results_list)

        # Log to console
        results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())
        log.info(f'{args.split.title()} {results_str}')

        # Log to TensorBoard
        tbx = SummaryWriter(args.save_dir)
        util.visualize(tbx,
                       pred_dict=pred_dict,
                       eval_path=eval_file,
                       step=0,
                       split=args.split,
                       num_visuals=args.num_visuals)

    # Write submission file
    sub_path = join(args.save_dir, args.split + '_' + args.sub_file)
    log.info(f'Writing submission file to {sub_path}...')
    with open(sub_path, 'w', newline='', encoding='utf-8') as csv_fh:
        csv_writer = csv.writer(csv_fh, delimiter=',')
        csv_writer.writerow(['Id', 'Predicted'])
        for uuid in sorted(sub_dict):
            csv_writer.writerow([uuid, sub_dict[uuid]])
예제 #27
0
from sys import argv

import hierarchy_cluster
import util

data_set_location = "dataset/Hierarchical_2.csv"

if __name__ == "__main__":

    # Load dataset
    data = util.load_data_set(data_set_location)

    # Visualization with no color
    util.visualize(data)

    # Argument Disimmiliarity method type clustering from CLI
    # ex : python3 main 1
    # 1 for single link
    # 2 for complete link
    # 3 for group average
    # 4 for centroid based
    try:
        type = int(argv[1])
    except:
        type = 1  # default 1 for no argument
    hierarchy_cluster.agglomerative_clustering(data, type=type)
예제 #28
0
파일: main.py 프로젝트: pyro-ppl/pyro
def main(args):
    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
    results = []
    columns = []

    for num_quadrant_inputs in args.num_quadrant_inputs:
        # adds an s in case of plural quadrants
        maybes = "s" if num_quadrant_inputs > 1 else ""

        print("Training with {} quadrant{} as input...".format(
            num_quadrant_inputs, maybes))

        # Dataset
        datasets, dataloaders, dataset_sizes = get_data(
            num_quadrant_inputs=num_quadrant_inputs, batch_size=128)

        # Train baseline
        baseline_net = baseline.train(
            device=device,
            dataloaders=dataloaders,
            dataset_sizes=dataset_sizes,
            learning_rate=args.learning_rate,
            num_epochs=args.num_epochs,
            early_stop_patience=args.early_stop_patience,
            model_path="baseline_net_q{}.pth".format(num_quadrant_inputs),
        )

        # Train CVAE
        cvae_net = cvae.train(
            device=device,
            dataloaders=dataloaders,
            dataset_sizes=dataset_sizes,
            learning_rate=args.learning_rate,
            num_epochs=args.num_epochs,
            early_stop_patience=args.early_stop_patience,
            model_path="cvae_net_q{}.pth".format(num_quadrant_inputs),
            pre_trained_baseline_net=baseline_net,
        )

        # Visualize conditional predictions
        visualize(
            device=device,
            num_quadrant_inputs=num_quadrant_inputs,
            pre_trained_baseline=baseline_net,
            pre_trained_cvae=cvae_net,
            num_images=args.num_images,
            num_samples=args.num_samples,
            image_path="cvae_plot_q{}.png".format(num_quadrant_inputs),
        )

        # Retrieve conditional log likelihood
        df = generate_table(
            device=device,
            num_quadrant_inputs=num_quadrant_inputs,
            pre_trained_baseline=baseline_net,
            pre_trained_cvae=cvae_net,
            num_particles=args.num_particles,
            col_name="{} quadrant{}".format(num_quadrant_inputs, maybes),
        )
        results.append(df)
        columns.append("{} quadrant{}".format(num_quadrant_inputs, maybes))

    results = pd.concat(results, axis=1, ignore_index=True)
    results.columns = columns
    results.loc["Performance gap", :] = results.iloc[0, :] - results.iloc[1, :]
    results.to_csv("results.csv")
예제 #29
0
def main(args):
    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)
    ch_vectors = util.torch_from_json(args.char_emb_file)

    # Get model
    log.info('Building model...')
    model = BiDAF(word_vectors=word_vectors,
                  ch_vectors=ch_vectors,
                  hidden_size=args.hidden_size)
    model = nn.DataParallel(model, gpu_ids)
    log.info(f'Loading checkpoint from {args.load_path}...')
    model = util.load_model(model, args.load_path, gpu_ids, return_step=False)
    model = model.to(device)
    model.eval()

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)[f'{args.split}_record_file']
    dataset = SQuAD(record_file, args.use_squad_v2)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    # Evaluate
    log.info(f'Evaluating on {args.split} split...')
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions for TensorBoard
    sub_dict = {}  # Predictions for submission
    eval_file = vars(args)[f'{args.split}_eval_file']
    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)
    with torch.no_grad(), \
            tqdm(total=len(dataset)) as progress_bar:
        for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:
            # Setup for forward
            cw_idxs = cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            cc_idxs = cc_idxs.to(device)
            qc_idxs = qc_idxs.to(device)
            batch_size = cw_idxs.size(0)

            # Forward
            log_p1, log_p2 = model(cw_idxs, qw_idxs, cc_idxs, qc_idxs)
            y1, y2 = y1.to(device), y2.to(device)
            loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
            nll_meter.update(loss.item(), batch_size)

            # Get F1 and EM scores
            p1, p2 = log_p1.exp(), log_p2.exp()
            starts, ends = util.discretize(p1, p2, args.max_ans_len,
                                           args.use_squad_v2)

            # Log info
            progress_bar.update(batch_size)
            if args.split != 'test':
                # No labels for the test set, so NLL would be invalid
                progress_bar.set_postfix(NLL=nll_meter.avg)

            idx2pred, uuid2pred = util.convert_tokens(gold_dict, ids.tolist(),
                                                      starts.tolist(),
                                                      ends.tolist(),
                                                      args.use_squad_v2)
            pred_dict.update(idx2pred)
            sub_dict.update(uuid2pred)

    # Log results (except for test set, since it does not come with labels)
    if args.split != 'test':
        results = util.eval_dicts(gold_dict, pred_dict, args.use_squad_v2)
        results_list = [('NLL', nll_meter.avg), ('F1', results['F1']),
                        ('EM', results['EM'])]
        if args.use_squad_v2:
            results_list.append(('AvNA', results['AvNA']))
        results = OrderedDict(results_list)

        # Log to console
        results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())
        log.info(f'{args.split.title()} {results_str}')

        # Log to TensorBoard
        tbx = SummaryWriter(args.save_dir)
        util.visualize(tbx,
                       pred_dict=pred_dict,
                       eval_path=eval_file,
                       step=0,
                       split=args.split,
                       num_visuals=args.num_visuals)

    # Write submission file
    sub_path = join(args.save_dir, args.split + '_' + args.sub_file)
    log.info(f'Writing submission file to {sub_path}...')
    with open(sub_path, 'w', newline='', encoding='utf-8') as csv_fh:
        csv_writer = csv.writer(csv_fh, delimiter=',')
        csv_writer.writerow(['Id', 'Predicted'])
        for uuid in sorted(sub_dict):
            csv_writer.writerow([uuid, sub_dict[uuid]])
예제 #30
0
파일: train.py 프로젝트: tconsigny/robustQA
    def train(self, model, train_dataloader, eval_dataloader, val_dict, experts):
        device = self.device

        if experts is False:
            model.to(device)
            optim = AdamW(model.parameters(), lr=self.lr)
        else:
            model.gate.to(device)
            optim = AdamW(model.gate.parameters(), lr=self.lr)

        global_idx = 0
        best_scores = {'F1': -1.0, 'EM': -1.0}
        tbx = SummaryWriter(self.save_dir)

        
        for epoch_num in range(self.num_epochs):
            self.log.info(f'Epoch: {epoch_num}')
            with torch.enable_grad(), tqdm(total=len(train_dataloader.dataset)) as progress_bar:
                #remember stochastic for MoE
                for batch in train_dataloader:
                    optim.zero_grad()
                    if experts:
                        model.gate.train()
                        model.expert1.eval()
                        model.expert2.eval()
                        model.expert3.eval()
                    else:
                        model.train()
                    input_ids = batch['input_ids'].to(device)
                    attention_mask = batch['attention_mask'].to(device)
                    start_positions = batch['start_positions'].to(device)
                    end_positions = batch['end_positions'].to(device)
                    
                    if experts:
                        outputs1 = model.expert1(input_ids, attention_mask=attention_mask)
                        start_logits1, end_logits1 = outputs1.start_logits, outputs1.end_logits
                        outputs2 = model.expert2(input_ids, attention_mask=attention_mask)
                        start_logits2, end_logits2 = outputs2.start_logits, outputs2.end_logits
                        outputs3 = model.expert3(input_ids, attention_mask=attention_mask)
                        start_logits3, end_logits3 = outputs3.start_logits, outputs3.end_logits

                        expert_weights = model.gate.forward(example)
                        start_logits = start_logits1 * expert_weights[0] + start_logits2 * expert_weights[1] + 
                            start_logits3 * expert_weights[2]
                        end_logits = end_logits1 * expert_weights[0] + end_logits2 * expert_weights[1] + 
                            end_logits3 * expert_weights[2]
                        loss = -np.log(start_logits[0][start_positions[0]]) - np.log(end_logits[0][end_positions[0]])

                    else:
                        outputs = model(input_ids, attention_mask=attention_mask,
                                    start_positions=start_positions,
                                    end_positions=end_positions)
                        loss = outputs[0]

                    loss.backward()
                    optim.step()
                    progress_bar.update(len(input_ids))
                    progress_bar.set_postfix(epoch=epoch_num, NLL=loss.item())
                    tbx.add_scalar('train/NLL', loss.item(), global_idx)
                    if (global_idx % self.eval_every) == 0:
                        self.log.info(f'Evaluating at step {global_idx}...')
                        preds, curr_score = self.evaluate(model, eval_dataloader, val_dict, return_preds=True)
                        results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in curr_score.items())
                        self.log.info('Visualizing in TensorBoard...')
                        for k, v in curr_score.items():
                            tbx.add_scalar(f'val/{k}', v, global_idx)
                        self.log.info(f'Eval {results_str}')
                        if self.visualize_predictions:
                            util.visualize(tbx,
                                           pred_dict=preds,
                                           gold_dict=val_dict,
                                           step=global_idx,
                                           split='val',
                                           num_visuals=self.num_visuals)
                        if curr_score['F1'] >= best_scores['F1']:
                            best_scores = curr_score
                            if experts:
                                self.save(model.gate)
                            else:
                                self.save(model)
                    global_idx += 1
예제 #31
0
# MNISTデータ(手書き数字画像)を読み込む
# 初回はDLするので時間がかかる(53MB)
mnist = fetch_mldata('MNIST original')

# サンプルデータの読み込み
x_all = mnist.data.astype(np.float32) / 255
x_data = np.vstack([x_all[0]]*10)

# SaltAndPepperNoise
x = x_data.copy()
titles = []
for i in xrange(10):
    rate = 0.1 * i
    titles.append('%3.1f' % rate)
    sap = SaltAndPepperNoise(rate=rate)
    x[i] = sap.noise(x[i])
visualize(x, '../img/noise/s&p.jpg', (8, 2), (1, 10), titles=titles)

# GaussianNoise
x = x_data.copy()
titles = []
for i in xrange(10):
    scale = 0.1 * i
    titles.append('%3.1f' % scale)
    if i == 0:
        continue
    gaus = GaussianNoise(scale=scale)
    x[i] = gaus.noise(x[i])
visualize(x, '../img/noise/gaus.jpg', (8, 2), (1, 10), titles=titles)

예제 #32
0
def main(args):
  print("in main")
  print("args: ", args)

  if True: 
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info('Using random seed {}...'.format(args.seed))
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading embeddings...')

    # CHECK IF WE NEED TO USE ALL OF THESE???? 
    word_vectors = util.torch_from_json(args.word_emb_file)

    # Get model
    log.info('Building model...')
    model = BiDAF(word_vectors=word_vectors,
                  hidden_size=args.hidden_size,
                  drop_prob=args.drop_prob)
    model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info('Loading checkpoint from {}...'.format(args.load_path))
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    optimizer = optim.Adadelta(model.parameters(), args.lr,
                               weight_decay=args.l2_wd)
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR


    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    print("train dataset!: ", train_dataset)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)


# Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info('Starting epoch {}...'.format(epoch))
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # Forward
                log_p1, log_p2 = model(cw_idxs, qw_idxs)
                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                loss_val = loss.item()

                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch,
                                         NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR',
                               optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info('Evaluating at step {}...'.format(step))
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                            for k, v in results.items())
                    log.info('Dev {}'.format(results_str))

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar('dev/{}'.format(k), v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
def run_cnn(  arch_params,
                    optimization_params ,
                    data_params, 
                    filename_params,
                    visual_params,
                    verbose = False, 
                    ):
    

    #####################
    # Unpack Variables  #
    #####################



    results_file_name   = filename_params [ "results_file_name" ]                # Files that will be saved down on completion Can be used by the parse.m file
    error_file_name     = filename_params [ "error_file_name" ]
    cost_file_name      = filename_params [ "cost_file_name"  ]
    confusion_file_name = filename_params [ "confusion_file_name" ]
    network_save_name   = filename_params [ "network_save_name" ]

    dataset             = data_params [ "loc" ]
    height              = data_params [ "height" ]
    width               = data_params [ "width" ]
    batch_size          = data_params [ "batch_size" ]    
    load_batches        = data_params [ "load_batches"  ] * batch_size
    batches2train       = data_params [ "batches2train" ]
    batches2test        = data_params [ "batches2test" ]
    batches2validate    = data_params [ "batches2validate" ] 
    channels            = data_params [ "channels" ]

    mom_start                       = optimization_params [ "mom_start" ]
    mom_end                         = optimization_params [ "mom_end" ]
    mom_epoch_interval              = optimization_params [ "mom_interval" ]
    mom_type                        = optimization_params [ "mom_type" ]
    initial_learning_rate           = optimization_params [ "initial_learning_rate" ]              
    learning_rate_decay             = optimization_params [ "learning_rate_decay" ] 
    ada_grad                        = optimization_params [ "ada_grad" ]   
    fudge_factor                    = optimization_params [ "fudge_factor" ]
    l1_reg                          = optimization_params [ "l1_reg" ]
    l2_reg                          = optimization_params [ "l2_reg" ]
    rms_prop                        = optimization_params [ "rms_prop" ]
    rms_rho                         = optimization_params [ "rms_rho" ]
    rms_epsilon                     = optimization_params [ "rms_epsilon" ]

    squared_filter_length_limit     = arch_params [ "squared_filter_length_limit" ]   
    n_epochs                        = arch_params [ "n_epochs" ]
    validate_after_epochs           = arch_params [ "validate_after_epochs"  ]
    mlp_activations                 = arch_params [ "mlp_activations"  ] 
    cnn_activations                 = arch_params [ "cnn_activations" ]
    dropout                         = arch_params [ "dropout"  ]
    column_norm                     = arch_params [ "column_norm"  ]    
    dropout_rates                   = arch_params [ "dropout_rates" ]
    nkerns                          = arch_params [ "nkerns"  ]
    outs                            = arch_params [ "outs" ]
    filter_size                     = arch_params [ "filter_size" ]
    pooling_size                    = arch_params [ "pooling_size" ]
    num_nodes                       = arch_params [ "num_nodes" ]
    use_bias                        = arch_params [ "use_bias" ]
    random_seed                     = arch_params [ "random_seed" ]
    svm_flag                        = arch_params [ "svm_flag" ]

    visualize_flag          = visual_params ["visualize_flag" ]
    visualize_after_epochs  = visual_params ["visualize_after_epochs" ]
    n_visual_images         = visual_params ["n_visual_images" ] 
    display_flag            = visual_params ["display_flag" ]


    # Random seed initialization.
    rng = numpy.random.RandomState(random_seed)  


    #################
    # Data Loading  #
    #################
    print "... loading data"
    # load matlab files as dataset.
    if data_params["type"] == 'mat':
        train_data_x, train_data_y, train_data_y1 = load_data_mat(dataset, batch = 1 , type_set = 'train')             
        test_data_x, test_data_y, valid_data_y1 = load_data_mat(dataset, batch = 1 , type_set = 'test')      # Load dataset for first epoch.
        valid_data_x, valid_data_y, test_data_y1 = load_data_mat(dataset, batch = 1 , type_set = 'valid')    # Load dataset for first epoch.

        train_set_x = theano.shared(numpy.asarray(train_data_x, dtype=theano.config.floatX), borrow=True)
        train_set_y = theano.shared(numpy.asarray(train_data_y, dtype='int32'), borrow=True)
        train_set_y1 = theano.shared(numpy.asarray(train_data_y1, dtype=theano.config.floatX), borrow=True)

        test_set_x = theano.shared(numpy.asarray(test_data_x, dtype=theano.config.floatX), borrow=True)
        test_set_y = theano.shared(numpy.asarray(test_data_y, dtype='int32'), borrow=True) 
        test_set_y1 = theano.shared(numpy.asarray(test_data_y1, dtype=theano.config.floatX), borrow=True)

        valid_set_x = theano.shared(numpy.asarray(valid_data_x, dtype=theano.config.floatX), borrow=True)
        valid_set_y = theano.shared(numpy.asarray(valid_data_y, dtype='int32'), borrow=True)
        valid_set_y1 = theano.shared(numpy.asarray(valid_data_y1, dtype=theano.config.floatX), borrow=True)

        # compute number of minibatches for training, validation and testing
        n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
        n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
        n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

        multi_load = True

    # load pkl data as is shown in theano tutorials
    elif data_params["type"] == 'pkl':   

        data = load_data_pkl(dataset)
        train_set_x, train_set_y, train_set_y1 = data[0]
        valid_set_x, valid_set_y, valid_set_y1 = data[1]
        test_set_x, test_set_y, test_set_y1 = data[2]

         # compute number of minibatches for training, validation and testing
        n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
        n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
        n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

        n_train_images = train_set_x.get_value(borrow=True).shape[0]
        n_test_images = test_set_x.get_value(borrow=True).shape[0]
        n_valid_images = valid_set_x.get_value(borrow=True).shape[0]

        n_train_batches_all = n_train_images / batch_size 
        n_test_batches_all = n_test_images / batch_size 
        n_valid_batches_all = n_valid_images / batch_size

        if (n_train_batches_all < batches2train) or (n_test_batches_all < batches2test) or (n_valid_batches_all < batches2validate):        # You can't have so many batches.
            print "...  !! Dataset doens't have so many batches. "
            raise AssertionError()

        multi_load = False

    # load skdata ( its a good library that has a lot of datasets)
    elif data_params["type"] == 'skdata':

        if (dataset == 'mnist' or 
            dataset == 'mnist_noise1' or 
            dataset == 'mnist_noise2' or
            dataset == 'mnist_noise3' or
            dataset == 'mnist_noise4' or
            dataset == 'mnist_noise5' or
            dataset == 'mnist_noise6' or
            dataset == 'mnist_bg_images' or
            dataset == 'mnist_bg_rand' or
            dataset == 'mnist_rotated' or
            dataset == 'mnist_rotated_bg') :

            print "... importing " + dataset + " from skdata"

            func = globals()['load_skdata_' + dataset]
            data = func()
            train_set_x, train_set_y, train_set_y1 = data[0]
            valid_set_x, valid_set_y, valid_set_y1 = data[1]
            test_set_x, test_set_y, test_set_y1 = data[2]

            # compute number of minibatches for training, validation and testing
            n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
            n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
            n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

            n_train_images = train_set_x.get_value(borrow=True).shape[0]
            n_test_images = test_set_x.get_value(borrow=True).shape[0]
            n_valid_images = valid_set_x.get_value(borrow=True).shape[0]

            n_train_batches_all = n_train_images / batch_size 
            n_test_batches_all = n_test_images / batch_size 
            n_valid_batches_all = n_valid_images / batch_size

            if (n_train_batches_all < batches2train) or (n_test_batches_all < batches2test) or (n_valid_batches_all < batches2validate):        # You can't have so many batches.
                print "...  !! Dataset doens't have so many batches. "
                raise AssertionError()

            multi_load = False

        elif dataset == 'cifar10':
            print "... importing cifar 10 from skdata"

            data = load_skdata_cifar10()
            train_set_x, train_set_y, train_set_y1 = data[0]
            valid_set_x, valid_set_y, valid_set_y1 = data[1]
            test_set_x, test_set_y, test_set_y1 = data[2]

            # compute number of minibatches for training, validation and testing
            n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
            n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
            n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

            multi_load = False

        elif dataset == 'caltech101':
            print "... importing caltech 101 from skdata"

                # shuffle the data
            total_images_in_dataset = 9144 
            rand_perm = numpy.random.permutation(total_images_in_dataset)  # create a constant shuffle, so that data can be loaded in batchmode with the same random shuffle

            n_train_images = total_images_in_dataset / 3
            n_test_images = total_images_in_dataset / 3
            n_valid_images = total_images_in_dataset / 3 

            n_train_batches_all = n_train_images / batch_size 
            n_test_batches_all = n_test_images / batch_size 
            n_valid_batches_all = n_valid_images / batch_size

            if (n_train_batches_all < batches2train) or (n_test_batches_all < batches2test) or (n_valid_batches_all < batches2validate):        # You can't have so many batches.
                print "...  !! Dataset doens't have so many batches. "
                raise AssertionError()

            train_data_x, train_data_y  = load_skdata_caltech101(batch_size = load_batches, rand_perm = rand_perm, batch = 1 , type_set = 'train' , height = height, width = width)             
            test_data_x, test_data_y  = load_skdata_caltech101(batch_size = load_batches, rand_perm = rand_perm, batch = 1 , type_set = 'test' , height = height, width = width)      # Load dataset for first epoch.
            valid_data_x, valid_data_y  = load_skdata_caltech101(batch_size = load_batches, rand_perm = rand_perm, batch = 1 , type_set = 'valid' , height = height, width = width)    # Load dataset for first epoch.

            train_set_x = theano.shared(train_data_x, borrow=True)
            train_set_y = theano.shared(train_data_y, borrow=True)
            
            test_set_x = theano.shared(test_data_x, borrow=True)
            test_set_y = theano.shared(test_data_y, borrow=True) 
          
            valid_set_x = theano.shared(valid_data_x, borrow=True)
            valid_set_y = theano.shared(valid_data_y, borrow=True)

            # compute number of minibatches for training, validation and testing
            n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
            n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
            n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

            multi_load = True

    # Just checking as a way to see if the intended dataset is indeed loaded.
    assert height*width*channels == train_set_x.get_value( borrow = True ).shape[1]
    assert batch_size >= n_visual_images
     
    if ada_grad is True:
        assert rms_prop is False
    elif rms_prop is True:
        assert ada_grad is False
        fudge_factor = rms_epsilon

    ######################
    # BUILD NETWORK      #
    ######################


    print '... building the network'    
    start_time = time.clock()
    # allocate symbolic variables for the data
    index = T.lscalar()         # index to a [mini]batch
    x = T.matrix('x')           # the data is presented as rasterized images
    y = T.ivector('y')          # the labels are presented as 1D vector of [int] 

    if svm_flag is True:
        y1 = T.matrix('y1')     # [-1 , 1] labels in case of SVM    

    first_layer_input = x.reshape((batch_size, channels, height, width))

    # Create first convolutional - pooling layers 
    activity = []       # to record Cnn activities 
    weights = []

    conv_layers=[]
    filt_size = filter_size[0]
    pool_size = pooling_size[0]

    if not nkerns == []: 
        conv_layers.append ( LeNetConvPoolLayer(
                                rng,
                                input = first_layer_input,
                                image_shape=(batch_size, channels , height, width),
                                filter_shape=(nkerns[0], channels , filt_size, filt_size),
                                poolsize=(pool_size, pool_size),
                                activation = cnn_activations[0],
                                verbose = verbose
                                 ) )
        activity.append ( conv_layers[-1].output )
        weights.append ( conv_layers[-1].filter_img)

        # Create the rest of the convolutional - pooling layers in a loop
        next_in_1 = ( height - filt_size + 1 ) / pool_size        
        next_in_2 = ( width - filt_size + 1 ) / pool_size
    
        for layer in xrange(len(nkerns)-1):   
            filt_size = filter_size[layer+1]
            pool_size = pooling_size[layer+1]
            conv_layers.append ( LeNetConvPoolLayer(
                                rng,
                                input=conv_layers[layer].output,        
                                image_shape=(batch_size, nkerns[layer], next_in_1, next_in_2),
                                filter_shape=(nkerns[layer+1], nkerns[layer], filt_size, filt_size),
                                poolsize=(pool_size, pool_size),
                                activation = cnn_activations[layer+1],
                                verbose = verbose
                                 ) )
            next_in_1 = ( next_in_1 - filt_size + 1 ) / pool_size        
            next_in_2 = ( next_in_2 - filt_size + 1 ) / pool_size
            weights.append ( conv_layers[-1].filter_img )
            activity.append( conv_layers[-1].output )

    # Assemble fully connected laters
    if nkerns == []:
        fully_connected_input = first_layer_input
    else:
        fully_connected_input = conv_layers[-1].output.flatten(2)

    if len(dropout_rates) > 2 :
        layer_sizes =[]
        layer_sizes.append( nkerns[-1] * next_in_1 * next_in_2 )
        for i in xrange(len(dropout_rates)-1):
            layer_sizes.append ( num_nodes[i] )
        layer_sizes.append ( outs )
        
    elif len(dropout_rates) == 1:
        layer_size = [ nkerns[-1] * next_in_1 * next_in_2, outs]
    else :
        layer_sizes = [ nkerns[-1] * next_in_1 * next_in_2, num_nodes[0] , outs]

    assert len(layer_sizes) - 1 == len(dropout_rates)           # Just checking.

    """  Dropouts implemented from paper:
    Srivastava, Nitish, et al. "Dropout: A simple way to prevent neural networks
    from overfitting." The Journal of Machine Learning Research 15.1 (2014): 1929-1958.
    """

    MLPlayers = MLP( rng=rng,
                     input=fully_connected_input,
                     layer_sizes=layer_sizes,
                     dropout_rates=dropout_rates,
                     activations=mlp_activations,
                     use_bias = use_bias,
                     svm_flag = svm_flag,
                     verbose = verbose)

    # Build the expresson for the categorical cross entropy function.
    if svm_flag is False:
        cost = MLPlayers.negative_log_likelihood( y )
        dropout_cost = MLPlayers.dropout_negative_log_likelihood( y )
    else :        
        cost = MLPlayers.negative_log_likelihood( y1 )
        dropout_cost = MLPlayers.dropout_negative_log_likelihood( y1 )

    # create theano functions for evaluating the graphs
    test_model = theano.function(
            inputs=[index],
            outputs=MLPlayers.errors(y),
            givens={
                x: test_set_x[index * batch_size:(index + 1) * batch_size],
                y: test_set_y[index * batch_size:(index + 1) * batch_size]})

    validate_model = theano.function(
            inputs=[index],
            outputs=MLPlayers.errors(y),
            givens={
                x: valid_set_x[index * batch_size:(index + 1) * batch_size],
                y: valid_set_y[index * batch_size:(index + 1) * batch_size]})

    prediction = theano.function(
        inputs = [index],
        outputs = MLPlayers.predicts,
        givens={
                x: test_set_x[index * batch_size: (index + 1) * batch_size]})

    nll = theano.function(
        inputs = [index],
        outputs = MLPlayers.probabilities,
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size]})



    # function to return activations of each image
    activities = theano.function (
        inputs = [index],
        outputs = activity,
        givens = {
                x: train_set_x[index * batch_size: (index + 1) * batch_size]
                 })

    # Compute cost and gradients of the model wrt parameter
    params = []
    for layer in conv_layers:
        params = params + layer.params
    params = params + MLPlayers.params

    output = dropout_cost + l1_reg * MLPlayers.dropout_L1 + l2_reg * MLPlayers.dropout_L2 if dropout else cost + l1_reg * MLPlayers.L1 + l2_reg * MLPlayers.L2

    gradients = []
    for param in params: 
        gradient = T.grad( output ,param)
        gradients.append ( gradient )

    # TO DO: Try implementing Adadelta also. 
     
    # Compute momentum for the current epoch
    epoch = T.scalar()
    mom = ifelse(epoch <= mom_epoch_interval,
        mom_start*(1.0 - epoch/mom_epoch_interval) + mom_end*(epoch/mom_epoch_interval),
        mom_end)

    # learning rate
    eta = theano.shared(numpy.asarray(initial_learning_rate,dtype=theano.config.floatX))
    # accumulate gradients for adagrad
     
    grad_acc = []
    for param in params:
        eps = numpy.zeros_like(param.get_value(borrow=True), dtype=theano.config.floatX)   
        grad_acc.append(theano.shared(eps, borrow=True))

    # accumulate velocities for momentum
    velocities = []
    for param in params:
        velocity = theano.shared(numpy.zeros(param.get_value(borrow=True).shape,dtype=theano.config.floatX))
        velocities.append(velocity)
     

    # create updates for each combination of stuff 
    updates = OrderedDict()
    print_flag = False
     
    for velocity, gradient, acc , param in zip(velocities, gradients, grad_acc, params):        

        if ada_grad is True:

            """ Adagrad implemented from paper:
            John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods
            for online learning and stochastic optimization. JMLR
            """

            current_acc = acc + T.sqr(gradient) # Accumulates Gradient 
            updates[acc] = current_acc          # updates accumulation at timestamp

        elif rms_prop is True:

            """ Tieleman, T. and Hinton, G. (2012):
            Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.
            Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)"""

            current_acc = rms_rho * acc + (1 - rms_rho) * T.sqr(gradient) 
            updates[acc] = current_acc

        else:
            current_acc = 1
            fudge_factor = 0

        if mom_type == 0:               # no momentum
            updates[velocity] = -(eta / T.sqrt(current_acc + fudge_factor)) * gradient                                            
            #updates[velocity] = -1*eta*gradient
                        # perform adagrad velocity update
                        # this will be just added to parameters.
        elif mom_type == 1:       # if polyak momentum    

            """ Momentum implemented from paper:  
            Polyak, Boris Teodorovich. "Some methods of speeding up the convergence of iteration methods." 
            USSR Computational Mathematics and Mathematical Physics 4.5 (1964): 1-17.

            Adapted from Sutskever, Ilya, Hinton et al. "On the importance of initialization and momentum in deep learning." 
            Proceedings of the 30th international conference on machine learning (ICML-13). 2013.
            equation (1) and equation (2)"""   

            updates[velocity] = mom * velocity - (1.-mom) * ( eta / T.sqrt(current_acc+ fudge_factor))  * gradient                             

        elif mom_type == 2:             # Nestrov accelerated gradient beta stage... 

            """Nesterov, Yurii. "A method of solving a convex programming problem with convergence rate O (1/k2)."
            Soviet Mathematics Doklady. Vol. 27. No. 2. 1983.
            Adapted from https://blogs.princeton.edu/imabandit/2013/04/01/acceleratedgradientdescent/ 

            Instead of using past params we use the current params as described in this link
            https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,"""
  
            updates[velocity] = mom * velocity - (1.-mom) * ( eta / T.sqrt(current_acc + fudge_factor))  * gradient                                 
            updates[param] = mom * updates[velocity] 

        else:
            if print_flag is False:
                print_flag = True
                print "!! Unrecognized mometum type, switching to no momentum."
            updates[velocity] = -( eta / T.sqrt(current_acc+ fudge_factor) ) * gradient                                              
                        

        if mom_type != 2:
            stepped_param  = param + updates[velocity]
        else:
            stepped_param = param + updates[velocity] + updates[param]

        if param.get_value(borrow=True).ndim == 2 and column_norm is True:

            """ constrain the norms of the COLUMNs of the weight, according to
            https://github.com/BVLC/caffe/issues/109 """

            col_norms = T.sqrt(T.sum(T.sqr(stepped_param), axis=0))
            desired_norms = T.clip(col_norms, 0, T.sqrt(squared_filter_length_limit))
            scale = desired_norms / (1e-7 + col_norms)
            updates[param] = stepped_param * scale

        else:            
            updates[param] = stepped_param

     
    if svm_flag is True:
        train_model = theano.function(inputs= [index, epoch],
                outputs=output,
                updates=updates,
                givens={
                    x: train_set_x[index * batch_size:(index + 1) * batch_size],
                    y1: train_set_y1[index * batch_size:(index + 1) * batch_size]},
                on_unused_input='ignore'                    
                    )
    else:
        train_model = theano.function(inputs= [index, epoch],
                outputs=output,
                updates=updates,
                givens={
                    x: train_set_x[index * batch_size:(index + 1) * batch_size],
                    y: train_set_y[index * batch_size:(index + 1) * batch_size]},
                on_unused_input='ignore'                    
                    )

    decay_learning_rate = theano.function(
           inputs=[], 
           outputs=eta,                                               # Just updates the learning rates. 
           updates={eta: eta * learning_rate_decay}
            )

    momentum_value = theano.function ( 
                        inputs =[epoch],
                        outputs = mom,
                        )

    end_time = time.clock()

    # setting up visualization stuff...
    shuffle_batch_ind = numpy.arange(batch_size)
    numpy.random.shuffle(shuffle_batch_ind)
    visualize_ind = shuffle_batch_ind[0:n_visual_images]
    #visualize_ind = range(n_visual_images)
    main_img_visual = True

     
    # create all directories required for saving results and data.
    if visualize_flag is True:
        if not os.path.exists('../visuals'):
            os.makedirs('../visuals')                
        if not os.path.exists('../visuals/activities'):
            os.makedirs('../visuals/activities')
            for i in xrange(len(nkerns)):
                os.makedirs('../visuals/activities/layer_'+str(i))
        if not os.path.exists('../visuals/filters'):
            os.makedirs('../visuals/filters')
            for i in xrange(len(nkerns)):
                os.makedirs('../visuals/filters/layer_'+str(i))
        if not os.path.exists('../visuals/images'):
            os.makedirs('../visuals/images')
    if not os.path.exists('../results/'):
        os.makedirs ('../results')

    print "...      -> building complete, took " + str((end_time - start_time)) + " seconds" 


    ###############
    # TRAIN MODEL #
    ###############
     
    #pdb.set_trace()
    print "... training"
    start_time = time.clock()

    patience = numpy.inf  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    this_validation_loss = []
    best_validation_loss = numpy.inf
    best_iter = 0
    epoch_counter = 0
    early_termination = False
    cost_saved = []
    best_params = None
    iteration= 0

    while (epoch_counter < n_epochs) and (not early_termination):
        epoch_counter = epoch_counter + 1 
         
        for batch in xrange (batches2train):
            if verbose is True:
                print "...          -> Epoch: " + str(epoch_counter) + " Batch: " + str(batch+1) + " out of " + str(batches2train) + " batches"

            if multi_load is True:
                iteration= (epoch_counter - 1) * n_train_batches * batches2train + batch
                # Load data for this batch
                if verbose is True:
                    print "...          -> loading data for new batch"

                if data_params["type"] == 'mat':
                    train_data_x, train_data_y, train_data_y1 = load_data_mat(dataset, batch = batch + 1 , type_set = 'train')             

                elif data_params["type"] == 'skdata':                   
                    if dataset == 'caltech101':
                        train_data_x, train_data_y  = load_skdata_caltech101(batch_size = load_batches, batch = batch + 1 , type_set = 'train', rand_perm = rand_perm, height = height, width = width )

                        # Do not use svm_flag for caltech 101                        
                train_set_x.set_value(train_data_x ,borrow = True)
                train_set_y.set_value(train_data_y ,borrow = True)

                for minibatch_index in xrange(n_train_batches):
                    if verbose is True:
                        print "...                  ->    Mini Batch: " + str(minibatch_index + 1) + " out of "    + str(n_train_batches)                                                             
                        cost_ij = train_model( minibatch_index, epoch_counter) 
                        cost_saved = cost_saved +[cost_ij]
                    
            else:        
                iteration= (epoch_counter - 1) * n_train_batches + batch
                cost_ij = train_model(batch, epoch_counter)
                cost_saved = cost_saved +[cost_ij]
         
        if  epoch_counter % validate_after_epochs is 0:  
            # Load Validation Dataset here.
            validation_losses = 0.      
            if multi_load is True:
                # Load data for this batch
                 
                for batch in xrange ( batches2test ):
                    if data_params["type"] == 'mat':
                        valid_data_x, valid_data_y, valid_data_y1 = load_data_mat(dataset, batch = batch + 1 , type_set = 'valid')             

                    elif data_params["type"] == 'skdata':                   
                        if dataset == 'caltech101':
          
                            valid_data_x, valid_data_y = load_skdata_caltech101(batch_size = load_batches, batch = batch + 1 , type_set = 'valid' , rand_perm = rand_perm, height = height, width = width )
                            # Do not use svm_flag for caltech 101                    
                    valid_set_x.set_value(valid_data_x,borrow = True)
                    valid_set_y.set_value(valid_data_y,borrow = True)

                    validation_losses = validation_losses + numpy.sum([[validate_model(i) for i in xrange(n_valid_batches)]])

                this_validation_loss = this_validation_loss + [validation_losses]

                if verbose is True:
                    if this_validation_loss[-1] < best_validation_loss :
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +",  validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "%, learning_rate = " + str(eta.get_value(borrow=True))+  ", momentum = " +str(momentum_value(epoch_counter))  + " -> best thus far " 
                    else :
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +",  validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "%, learning_rate = " + str(eta.get_value(borrow=True)) +  ", momentum = " +str(momentum_value(epoch_counter)) 
                else:
                    if this_validation_loss[-1] < best_validation_loss :
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +",  validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "% -> best thus far " 
                    else :
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +",  validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "%"
                 
            else:

                validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
                this_validation_loss = this_validation_loss + [numpy.sum(validation_losses)]
                if verbose is True:
                    if this_validation_loss[-1] < best_validation_loss :                    
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +",  validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "%, learning_rate = " + str(eta.get_value(borrow=True)) + ", momentum = " +str(momentum_value(epoch_counter)) + " -> best thus far " 
                    else:
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +",  validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "%, learning_rate = " + str(eta.get_value(borrow=True)) + ", momentum = " +str(momentum_value(epoch_counter)) 
                else:
                    if this_validation_loss[-1] < best_validation_loss :                    
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +",  validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "% -> best thus far " 
                    else:
                        print "...      -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +",  validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "% "                        

            #improve patience if loss improvement is good enough
            if this_validation_loss[-1] < best_validation_loss *  \
               improvement_threshold:
                patience = max(patience, iteration* patience_increase)
                best_iter = iteration


            best_validation_loss = min(best_validation_loss, this_validation_loss[-1])
        new_leanring_rate = decay_learning_rate()    

         
        if visualize_flag is True:
            if  epoch_counter % visualize_after_epochs is 0: 
                # saving down images. 
                if main_img_visual is False:
                    for i in xrange(n_visual_images):
                        curr_img = numpy.asarray(numpy.reshape(train_set_x.get_value( borrow = True )[visualize_ind[i]],[height, width, channels] ) * 255., dtype='uint8' )
                        if verbose is True:
                            cv2.imshow("Image Number " +str(i) + "_label_" + str(train_set_y.eval()[visualize_ind[i]]), curr_img)
                        cv2.imwrite("../visuals/images/image_" + str(i)+ "_label_" + str(train_set_y.eval()[visualize_ind[i]]) + ".jpg", curr_img )
                main_img_visual = True

                # visualizing activities.
                activity = activities(0)
                 
                for m in xrange(len(nkerns)):   #For each layer 
                    loc_ac = '../visuals/activities/layer_' + str(m) + "/epoch_" + str(epoch_counter) +"/"
                    if not os.path.exists(loc_ac):   
                        os.makedirs(loc_ac)
                    current_activity = activity[m]
                    for i in xrange(n_visual_images):  # for each randomly chosen image .. visualize its activity 
                        visualize(current_activity[visualize_ind[i]], loc = loc_ac, filename = 'activity_' + str(i) + "_label_" + str(train_set_y.eval()[visualize_ind[i]]) +'.jpg' , show_img = display_flag)

                # visualizing the filters.
                for m in xrange(len(nkerns)):
                    if m == 0:              # first layer outpus. 
                        if channels == 3:    # if the image is color, then first layer looks at color pictures and I can visualize the filters also as color.
                            curr_image = weights[m].eval()
                            if not os.path.exists('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter)):
                                os.makedirs('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter))
                            visualize_color_filters(curr_image, loc = '../visuals/filters/layer_' + str(m) + '/' + 'epoch_' + str(epoch_counter) + '/' , filename = 'kernel_0.jpg' , show_img = display_flag)
                        else:       # visualize them as grayscale images.
                            for i in xrange(weights[m].shape.eval()[1]):
                                curr_image = weights[m].eval() [:,i,:,:]
                                if not os.path.exists('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter)):
                                    os.makedirs('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter))
                                visualize(curr_image, loc = '../visuals/filters/layer_' + str(m) + '/' + 'epoch_' + str(epoch_counter) + '/' , filename = 'kernel_' + str(i) + '.jpg' , show_img = display_flag)
                    else:
                        for i in xrange(nkerns[m-1]): 
                            curr_image = weights[m].eval()[:,i,:,:]
                            if not os.path.exists('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter)):
                                os.makedirs('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter))
                            visualize(curr_image, loc = '../visuals/filters/layer_' + str(m) + '/' + 'epoch_' + str(epoch_counter) + '/' , filename =  'kernel_'  + str(i) + '.jpg' , show_img = display_flag)
             
        if patience <= iteration:
            early_termination = True
            break
        save_network( 'network.pkl.gz',  params, arch_params, data_params )    
    end_time = time.clock()
    print "... training complete, took " + str((end_time - start_time)/ 60.) +" minutes"



    ###############
    # TEST MODEL  #
    ###############
    start_time = time.clock()
    print "... testing"
    wrong = 0
    predictions = []
    class_prob = []
    labels = []
     
    if multi_load is False:

        labels = test_set_y.eval().tolist()   
        for mini_batch in xrange(batches2test):
            #print ".. Testing batch " + str(mini_batch)
            wrong = wrong + int(test_model(mini_batch))                        
            predictions = predictions + prediction(mini_batch).tolist()
            class_prob = class_prob + nll(mini_batch).tolist()
        print "...      -> Total test accuracy : " + str(float((batch_size*n_test_batches)-wrong )*100/(batch_size*n_test_batches)) + " % out of " + str(batch_size*n_test_batches) + " samples."

    else:
         
        for batch in xrange(batches2test):
            print ".. Testing batch " + str(batch)
            # Load data for this batch
            if data_params["type"] == 'mat':
                test_data_x, test_data_y, test_data_y1 = load_data_mat(dataset, batch = batch + 1 , type_set = 'test')             

            elif data_params["type"] == 'skdata':                   
                if dataset == 'caltech101':
  
                    test_data_x, test_data_y = load_skdata_caltech101(batch_size = load_batches, batch = batch +  1 , type_set = 'test', rand_perm = rand_perm, height = height, width = width )

            test_set_x.set_value(test_data_x,borrow = True)
            test_set_y.set_value(test_data_y,borrow = True)

            labels = labels + test_set_y.eval().tolist() 
            for mini_batch in xrange(n_test_batches):
                wrong = wrong + int(test_model(mini_batch))   
                predictions = predictions + prediction(mini_batch).tolist()
                class_prob = class_prob + nll(mini_batch).tolist()
         
        print "...      -> Total test accuracy : " + str(float((batch_size*n_test_batches*batches2test)-wrong )*100/(batch_size*n_test_batches*batches2test)) + " % out of " + str(batch_size*n_test_batches*batches2test) + " samples."

    end_time = time.clock()

    correct = 0
    confusion = numpy.zeros((outs,outs), dtype = int)
    for index in xrange(len(predictions)):
        if labels[index] is predictions[index]:
            correct = correct + 1
        confusion[int(predictions[index]),int(labels[index])] = confusion[int(predictions[index]),int(labels[index])] + 1


    # Save down data 
    f = open(results_file_name, 'w')
    for i in xrange(len(predictions)):
        f.write(str(i))
        f.write("\t")
        f.write(str(labels[i]))
        f.write("\t")
        f.write(str(predictions[i]))
        f.write("\t")
        for j in xrange(outs):
            f.write(str(class_prob[i][j]))
            f.write("\t")
        f.write('\n')

    f = open(error_file_name,'w')
    for i in xrange(len(this_validation_loss)):
        f.write(str(this_validation_loss[i]))
        f.write("\n")
    f.close()

    f = open(cost_file_name,'w')
    for i in xrange(len(cost_saved)):
        f.write(str(cost_saved[i]))
        f.write("\n")
    f.close()

    f = open(confusion_file_name, 'w')
    f.write(confusion)

    f.close()
    
    
    save_network( network_save_name,  params, arch_params, data_params )
    end_time = time.clock()
    print "Testing complete, took " + str((end_time - start_time)/ 60.) + " minutes"    
    print "Confusion Matrix with accuracy : " + str(float(correct)/len(predictions)*100)
    print confusion
    print "Done"

    pdb.set_trace()
    def print_net(self, epoch, display_flag=True):
        # saving down true images.
        if self.main_img_visual is False:
            for i in xrange(self.n_visual_images):
                curr_img = numpy.asarray(
                    numpy.reshape(
                        self.train_set_x.get_value(borrow=True)[self.visualize_ind[i]],
                        [self.height, self.width, self.channels],
                    )
                    * 255.0,
                    dtype="uint8",
                )
                if self.display_flag is True:
                    cv2.imshow(
                        "Image Number " + str(i) + "_label_" + str(self.train_set_y.eval()[self.visualize_ind[i]]),
                        curr_img,
                    )
                cv2.imwrite(
                    "../visuals/images/image_"
                    + str(i)
                    + "_label_"
                    + str(self.train_set_y.eval()[self.visualize_ind[i]])
                    + ".jpg",
                    curr_img,
                )
        self.main_img_visual = True

        # visualizing activities.
        activity = self.activities(0)
        for m in xrange(len(self.nkerns)):  # For each layer
            loc_ac = "../visuals/activities/layer_" + str(m) + "/epoch_" + str(epoch) + "/"
            if not os.path.exists(loc_ac):
                os.makedirs(loc_ac)
            current_activity = activity[m]
            for i in xrange(self.n_visual_images):  # for each randomly chosen image .. visualize its activity
                util.visualize(
                    current_activity[self.visualize_ind[i]],
                    loc=loc_ac,
                    filename="activity_"
                    + str(i)
                    + "_label_"
                    + str(self.train_set_y.eval()[self.visualize_ind[i]])
                    + ".jpg",
                    show_img=display_flag,
                )

        # visualizing the filters.
        for m in xrange(len(self.nkerns)):
            curr_weights = self.weights[m].eval()
            if curr_weights.shape[1] == 3:
                # if the image is color, then first layer looks at color pictures and
                # I can visualize the filters also as color.
                curr_image = curr_weights
                if not os.path.exists("../visuals/filters/layer_" + str(m) + "/epoch_" + str(epoch)):
                    os.makedirs("../visuals/filters/layer_" + str(m) + "/epoch_" + str(epoch_counter))
                util.visualize_color_filters(
                    curr_image,
                    loc="../visuals/filters/layer_" + str(m) + "/" + "epoch_" + str(epoch) + "/",
                    filename="kernel_0.jpg",
                    show_img=self.display_flag,
                )
            else:  # visualize them as grayscale images.
                for i in xrange(curr_weights.shape[1]):
                    curr_image = curr_weights[:, i, :, :]
                    if not os.path.exists("../visuals/filters/layer_" + str(m) + "/epoch_" + str(epoch)):
                        os.makedirs("../visuals/filters/layer_" + str(m) + "/epoch_" + str(epoch))
                    util.visualize(
                        curr_image,
                        loc="../visuals/filters/layer_" + str(m) + "/" + "epoch_" + str(epoch) + "/",
                        filename="kernel_" + str(i) + ".jpg",
                        show_img=self.display_flag,
                    )
예제 #35
0
        if epoch == 0 and i == 0:
            with open('../output/da/graph.dot', 'w') as o:
                o.write(computational_graph.build_computational_graph((loss, )).dump())
            with open('../output/da/graph.wo_split.dot', 'w') as o:
                g = computational_graph.build_computational_graph((loss, ), remove_split=True)
                o.write(g.dump())
    print 'train mean loss={}'.format(sum_loss / N)
    losses += [sum_loss / N]
    # 評価
    sum_loss = 0
    for i in xrange(0, N_test, batchsize):
        x_batch = x_test[i:i+batchsize]
        loss = da.test(x_batch)
        sum_loss += float(loss.data) * batchsize
    print 'test mean loss={}'.format(sum_loss / N_test)
    losses += [sum_loss / N_test]
    all_loss.append(losses)
    # 可視化
    h = MinMaxScaler().fit_transform(da.model.encoder.W)
    visualize(h, '../img/da/w/da_{0:04d}.jpg'.format(epoch + 1), (8, 8), (10, 10))

# モデルの保存
pickle.dump(da, open('../output/da/model.pkl', 'wb'))

# loss, accuracyの保存
pickle.dump(all_loss, open('../output/da/loss.pkl', 'wb'))

# 可視化
visualize(h, '../img/da/da_w.jpg', (8, 8), (10, 10))

예제 #36
0
            loss = da1.train(x_batch)
            sum_loss += float(loss.data) * batchsize
        print 'train mean loss={}'.format(sum_loss / N)
        losses += [sum_loss / N]
        # 評価
        sum_loss = 0
        for i in xrange(0, N_test, batchsize):
            x_batch = x_test[i:i+batchsize]
            loss = da1.test(x_batch)
            sum_loss += float(loss.data) * batchsize
        print 'test mean loss={}'.format(sum_loss / N_test)
        losses += [sum_loss / N_test]
        all_loss.append(losses)
        # 可視化
        h1 = MinMaxScaler().fit_transform(da1.model.encoder.W)
        visualize(h1, '../img/sda/da1/sda_da1_{0:04d}.jpg'.format(epoch + 1), (8, 8), (10, 10))
    pickle.dump(da1, open(da1_filename, 'wb'))
    pickle.dump(all_loss, open('../output/sda/loss_da1.pkl', 'wb'))

# 2層目
da2_filename = '../output/sda/model_da2.pkl'
try:
    da2 = pickle.load(open(da2_filename))
except IOError:
    da2 = DenoisingAutoencoder(100, 49, noise=SaltAndPepperNoise())
    h1_train = da1.encode(Variable(x_train)).data
    h1_test  = da1.encode(Variable(x_test)).data
    n_epoch = 30
    all_loss = []
    for epoch in xrange(n_epoch):
        print 'epoch', epoch