Esempio n. 1
0
    def __init__(self):

        # Detection or Classification
        self.training_type = config['training_mode']
        self.ds = Dataset(config)
        self.ds.anchors, self.ds.anchors_coords = self.ds.get_anchors()

        if (self.training_type == 'detection'):
            self.data_loader = self.load_dataloader(mode=self.training_type)
        elif (self.training_type == 'classification'):
            self.data_loader = self.load_dataloader(mode=self.training_type)
        pass
Esempio n. 2
0
def main():
    ds = Dataset(config)
    imgs, annots = ds.open_traffic_ds(config)
    dp = DataPrepper(x_data=imgs, y_data=annots)

    dp.x_data_scaled, dp.y_data_scaled = dp.rescale_data(dp.x_data, dp.y_data)
    km = KMeans(k=args.k, dataset=dp.y_data_scaled)
    if (args.fit_avg):
        km.fit_average(max_iterations=args.kmeans_iters)
        if (args.save_anchors):
            km.write_anchors(km.centroids)
    else:
        km.fit()
        if (args.save_anchors):
            km.write_anchors(km.centroids)
parser.add_argument("--decay_step", type=int, default=1, help="learning rate decay steps")
parser.add_argument("--minimal_lr", type=float, default=1e-4, help="minimal learning rate")
parser.add_argument("--optimizer", type=str, default="adam", help="optimizer: [rmsprop | adadelta | adam | ...]")
parser.add_argument("--grad_clip", type=float, default=5.0, help="maximal gradient norm")
parser.add_argument("--epochs", type=int, default=50, help="train epochs")
parser.add_argument("--batch_size", type=int, default=20, help="batch size")
parser.add_argument("--max_to_keep", type=int, default=1, help="maximum trained model to be saved")
parser.add_argument("--no_imprv_tolerance", type=int, default=None, help="no improvement tolerance")
config = Configurations(parser.parse_args())

# os environment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = config.log_level
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx

# if dataset is not prepared, then build it
if not os.path.exists(config.save_path) or not os.listdir(config.save_path):
    process_data(config)

print("load dataset...")
dataset = Dataset(config.train_set, config.dev_set, config.test_set, batch_size=config.batch_size, shuffle=True)

print("build model and train...")
model = BiLSTMCRFModel(config)
if config.restore:
    model.restore_last_session()
if config.train:
    model.train(dataset)
model.restore_last_session()
model.evaluate(dataset.get_data_batches("test"), name="test")
model.close_session()
Esempio n. 4
0
def train(model_name, 
        backbone, 
        train_img, 
        train_annot,
        shuffle = False, 
        input_shape = (None, None, 3),
        image_format = 'channels_last',
        label_path = None,
        verify_dataset = True,
        checkpoint_path = None,
        epochs = 1,
        batch_size = 2,
        validate = False,
        val_img = None,
        val_annot = None,
        val_shuffle = False,
        val_batch_size = 1,
        optimizer_name = 'adam',
        loss_name = 'categorical_crossentropy',
        data_augment = False,
        load_weights = None,
        resume_checkpoint = False):
    
    classes = utils.get_label(label_path)
    n_classes = len(classes[0])

    model = model_from_name.get_model(model_name)(n_classes = n_classes,
                                            backbone = backbone,
                                            input_shape = input_shape,
                                            image_format = image_format)
    optimizer = optimizer_name
    loss = loss_name
    matric = [tf.keras.metrics.MeanIoU(n_classes)]
    model.compile(
        optimizer,
        loss,
        matric
    )
    model.summary()
    tf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)

    if checkpoint_path is None:
        ck_path = os.path.join(os.getcwd(), "checkpoint")
        if not os.path.isdir(checkpoint_path):
            print("creating folder checkpoint: ", ck_path)
            os.mkdir(ck_path)
    
    if resume_checkpoint:
        last_checkpoint = find_checkpoint(checkpoint_path)
        print("Loading the weights from latest checkpoint ",
                last_checkpoint)
        model.load_weights(last_checkpoint)
    
    if verify_dataset:
        assert utils.verify_dataset(train_img, train_annot)

    train_dataset = Dataset(
        train_img,
        train_annot,
        classes,
        preprocessing=utils.preprocessing
        # resize=(384, 512),
        # resample='bilinear'
    )
    train_dataloader = Dataloader(train_dataset, batch_size=batch_size, shuffle=shuffle)

    if validate:
        if verify_dataset:
            assert utils.verify_dataset(val_img, val_annot)
        valid_dataset = Dataset(
            val_img,
            val_annot,
            classes,
            preprocessing=utils.preprocessing
            # resize=(384, 512),
            # resample='bilinear'
        )
        valid_dataloader = Dataloader(valid_dataset, batch_size=val_batch_size, shuffle=val_shuffle)
    
    output_checkpoint = os.path.join(checkpoint_path, "model-{epoch:04d}.h5")
    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(output_checkpoint),
        tf.keras.callbacks.TensorBoard()
    ]

    if validate:
        history = model.fit(
            train_dataloader,
            epochs = epochs,
            callbacks = callbacks,
            steps_per_epoch=len(train_dataloader),
            validation_data = valid_dataloader,
            validation_steps = len(valid_dataloader),
            use_multiprocessing = False
        )
    else:
        history = model.fit(
            train_dataloader,
            epochs = epochs,
            callbacks = callbacks,
            steps_per_epoch=len(train_dataloader),
            use_multiprocessing = True
        )
Esempio n. 5
0
 def __init__(self):
     self.ds = Dataset(config)
     self.ds.anchors, self.ds.anchors_coords = self.ds.get_anchors()
     pass
Esempio n. 6
0
class Tester:
    def __init__(self):
        self.ds = Dataset(config)
        self.ds.anchors, self.ds.anchors_coords = self.ds.get_anchors()
        pass

    def load_dataloader(self):
        root_loc = config['home_dirs'][1] if config['use_colab'] else config[
            'home_dirs'][0]
        num_workers = 1 if config['use_gpu'] else config['num_workers']
        dataset = VOCDetection(root=root_loc + config['test_data_loc'])
        data_loader = torch.utils.data.DataLoader(
            dataset,
            1,
            shuffle=config['shuffle_data'],
            num_workers=num_workers,
            collate_fn=detection_collate,
            drop_last=True)
        return data_loader

    def open_model(self, model_loc, device):
        print("-- Opening Model --")
        root_loc = config['home_dirs'][1] if config['use_colab'] else config[
            'home_dirs'][0]
        yolo = YOLOv2(config=config,
                      mode='test',
                      anchor_boxes=self.ds.anchors,
                      device=device)
        yolo.load_state_dict(
            torch.load(root_loc + model_loc, map_location=torch.device('cpu')))
        yolo.eval()
        if (config['use_gpu'] and torch.cuda.is_available()):
            yolo = yolo.to(device)
        print("-- Successfully Loaded Model --")
        return yolo

    def training_optimizer(self, optimizer_name, model):
        if (optimizer_name == 'adam'):
            optimizer = torch.optim.Adam(params=model.parameters(),
                                         lr=config['d_learning_rate'],
                                         weight_decay=config['d_weight_decay'])
            return optimizer
        elif (optimizer_name == 'sgd'):
            optimizer = torch.optim.SGD(params=model.parameters(),
                                        lr=config['d_learning_rate'],
                                        momentum=config['d_momentum'],
                                        weight_decay=config['d_weight_decay'])
            return optimizer

    def load_checkpoint(self, checkpoint_loc):
        root_loc = config['home_dirs'][1] if config['use_colab'] else config[
            'home_dirs'][0]
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        yolo = YOLOv2(config=config,
                      mode='test',
                      anchor_boxes=self.ds.anchors,
                      device=device)
        if (config['use_gpu'] and torch.cuda.is_available()):
            yolo = yolo.to(device)
        optimizer = self.training_optimizer(config['optimizer_type'], yolo)

        checkpoint = torch.load(root_loc + checkpoint_loc + 'checkpoint.pt',
                                map_location=device)
        yolo.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch']
        total_loss = checkpoint['total_loss']
        conf_loss = checkpoint['conf_loss']
        localization_loss = checkpoint['bbox_loss']
        cls_loss = checkpoint['cls_loss']
        yolo.eval()
        return yolo, optimizer, epoch, total_loss, conf_loss, localization_loss, cls_loss

    def test_single_example(self, dataloader):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        if (config['warmstart']):
            # Load latest checkpoint
            yolo, optimizer, curr_epoch, total_loss, conf_loss, localization_loss, cls_loss = self.load_checkpoint(
                config['checkpoints_loc'])
        else:
            yolo = self.open_model(config['model_loc'], device)
        criterion = YOLO_SSE(config=config, device=device)

        def testing_loop(dataloader):
            for idx, (images, targets) in enumerate(dataloader):
                targets = self.ds.generate_gt_data(targets)
                targets = torch.from_numpy(targets).float()
                if (config['use_gpu'] and torch.cuda.is_available()):
                    images = images.to(device)
                    targets = targets.to(device)
                # Get predictions on image from the model
                bbox_preds, cls_preds, conf_preds, preds = yolo(
                    images, targets)
                images = images.squeeze(0)
                images = images.cpu().numpy()
                print(images.shape)
                self.examine_predictions(images, bbox_preds)
                break

            pass

        testing_loop(dataloader=dataloader)
        pass

    def test(self, dataloader):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        if (config['warmstart']):
            # Load latest checkpoint
            yolo, optimizer, curr_epoch, total_loss, conf_loss, localization_loss, cls_loss = self.load_checkpoint(
                config['checkpoints_loc'])
        else:
            yolo = self.open_model(config['model_loc'], device)
        criterion = YOLO_SSE(config=config, device=device)

        def testing_loop(dataloader):
            predictions = []
            gt = []
            for idx, (images, targets) in enumerate(dataloader):
                gt.append(targets)
                targets = self.ds.generate_gt_data(targets)
                targets = torch.from_numpy(targets).float()
                if (config['use_gpu'] and torch.cuda.is_available()):
                    images = images.to(device)
                    targets = targets.to(device)
                # Get predictions on image from the model
                bbox_preds, cls_preds, conf_preds, preds = yolo(
                    images, targets)
                predictions.append(preds)

                images = images.squeeze(0)
            return gt, predictions

        gt, predictions = testing_loop(dataloader=dataloader)

        # -- Evaluation --
        # print("--- Evaluation Results ---")
        # e = Evaluator(config,gt,predictions)
        # print(len(gt))
        # print(len(predictions))
        # e.mAP(gt,predictions)
        pass

    def unnormalize_img(self, img):
        img = img[:, :, ::-1].transpose(1, 2, 0)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img *= 255.0
        img = np.ascontiguousarray(img, dtype=np.uint8)
        return img

    def examine_predictions(self, img=None, bbox_preds=None, show_grid=True):
        img = self.unnormalize_img(img)

        x = np.floor(config['img_size'] / config['num_grid_cells'])
        y = np.floor(config['img_size'] / config['num_grid_cells'])
        move_x = x
        move_y = y

        fig, ax = plt.subplots(1)
        ax.imshow(img)

        # Shows the Feature map grid size
        if (show_grid):
            for b in range(1):
                for grid_row in range(config['img_size'] //
                                      config['grid_stride']):
                    plt.plot([0, config['img_size']], [move_y, move_y],
                             color='y',
                             marker='.')
                    for grid_col in range(config['img_size'] //
                                          config['grid_stride']):
                        plt.plot([move_x, move_x], [0, config['img_size']],
                                 color='y',
                                 marker='.')
                        move_x += x
                    move_x = x
                    move_y += y
        # Now add boxes
        for b in range(1):
            for idx, bbox in enumerate(bbox_preds):
                rect = patches.Rectangle((bbox[0] * x, bbox[1] * y),
                                         (bbox[2] - bbox[0]) * x,
                                         (bbox[3] - bbox[1]) * y,
                                         linewidth=2,
                                         edgecolor='r',
                                         facecolor='none')
                ax.add_patch(rect)
        #plt.savefig(config['save_plots_loc']+'predictions_im2.png')

    def examine_predictions2(self, img, bbox_preds):
        img = self.unnormalize_img(img)

        x = np.floor(config['img_size'] / config['num_grid_cells'])
        y = np.floor(config['img_size'] / config['num_grid_cells'])
        move_x = x
        move_y = y

        fig, ax = plt.subplots(1)
        ax.imshow(img)
        for b in range(1):
            for grid_row in range(config['img_size'] // config['grid_stride']):
                plt.plot([0, config['img_size']], [move_y, move_y],
                         color='y',
                         marker='.')
                for grid_col in range(config['img_size'] //
                                      config['grid_stride']):
                    plt.plot([move_x, move_x], [0, config['img_size']],
                             color='y',
                             marker='.')

                    # Draw Anchors
                    for anchor in range(len(self.ds.anchors)):
                        # Draw Predicitions
                        # bbox = bxbybwbh
                        bbox = bbox_preds[b][grid_row + grid_col][anchor]

                        rect = patches.Rectangle((bbox[0] * x, bbox[1] * y),
                                                 bbox[2],
                                                 bbox[3],
                                                 linewidth=2,
                                                 edgecolor='r',
                                                 facecolor='none')
                        ax.add_patch(rect)
                    move_x += x
                move_x = x
                move_y += y

        plt.savefig(config['save_plots_loc'] + 'predictions_im.png')
Esempio n. 7
0
parser.add_argument("--optimizer", type=str, default="lazyadam", help="optimizer: [rmsprop | adadelta | adam | ...]")
parser.add_argument("--grad_clip", type=float, default=5.0, help="maximal gradient norm")
parser.add_argument("--epochs", type=int, default=50, help="train epochs")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--emb_drop_rate", type=float, default=0.2, help="dropout rate for embeddings")
parser.add_argument("--rnn_drop_rate", type=float, default=0.5, help="dropout rate for embeddings")
parser.add_argument("--max_to_keep", type=int, default=1, help="maximum trained model to be saved")
parser.add_argument("--model_name", type=str, default="datnetf_model", help="model name")
parser.add_argument("--no_imprv_tolerance", type=int, default=None, help="no improvement tolerance")
config = parser.parse_args()

# os environment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx

print("load dataset...")
src_datasets, tgt_datasets, vocab = process_transfer(config)
train_rate = int(config.train_rate) if float(config.train_rate) > 1.0 else float(config.train_rate)
src_dataset = Dataset(src_datasets, batch_size=config.batch_size, shuffle=True)
tgt_dataset = Dataset(tgt_datasets, batch_size=config.batch_size, train_rate=train_rate, shuffle=True)

print("build model...")
model = DATNetFModel(config, vocab)
if config.restore_model:
    model.restore_last_session()
if config.train:
    model.train(src_dataset, tgt_dataset)
model.restore_last_session()
model.evaluate_data(tgt_dataset.test_batches(), name="target_test", resource="target")
model.close_session()
Esempio n. 8
0
parser.add_argument("--no_imprv_tolerance",
                    type=int,
                    default=None,
                    help="no improvement tolerance")
config = parser.parse_args()

# os environment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx

print("load dataset...")
src_datasets, tgt_datasets, vocab = process_transfer(config)
train_rate = int(
    config.train_rate) if float(config.train_rate) > 1.0 else float(
        config.train_rate)
src_dataset = Dataset(src_datasets, batch_size=config.batch_size, shuffle=True)
tgt_dataset = Dataset(tgt_datasets,
                      batch_size=config.batch_size,
                      train_rate=train_rate,
                      shuffle=True)

print("build model and train...")
model = DATNetPModel(config, vocab)
if config.restore_model:
    model.restore_last_session()
if config.train:
    model.train(src_dataset, tgt_dataset)
model.restore_last_session()
model.evaluate_data(tgt_dataset.test_batches(),
                    "target_test",
                    resource="target")
Esempio n. 9
0
                    help="maximum trained model to be saved")
parser.add_argument("--no_imprv_tolerance",
                    type=int,
                    default=None,
                    help="no improvement tolerance")
config = parser.parse_args()

# os environment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx

print("load dataset...")
datasets, vocab = process_base(config)
train_rate = int(
    config.train_rate) if float(config.train_rate) > 1.0 else float(
        config.train_rate)
dataset = Dataset(datasets,
                  batch_size=config.batch_size,
                  train_rate=train_rate,
                  shuffle=True)

print("build model and train...")
model = BaseModel(config, vocab)
if config.restore_model:
    model.restore_last_session()
if config.train:
    model.train(dataset)
model.restore_last_session()
model.evaluate_data(dataset.test_batches(), name="test")
model.close_session()
# os environment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx

# if dataset is not prepared, then build it
if not os.path.exists(config.save_path) or not os.listdir(config.save_path):
    process_transfer(config)

print("load dataset...")
train_ratio = int(
    config.train_ratio) if float(config.train_ratio) > 1.0 else float(
        config.train_ratio)
src_dataset = Dataset(config.src_train_set,
                      config.src_dev_set,
                      config.src_test_set,
                      batch_size=config.batch_size,
                      shuffle=True)
tgt_dataset = Dataset(config.tgt_train_set,
                      config.tgt_dev_set,
                      config.tgt_test_set,
                      batch_size=config.batch_size,
                      train_rate=train_ratio,
                      shuffle=True)

print("build model and train...")
model = DATNetPModel(config)
if config.restore_model:
    model.restore_last_session()
if config.train:
    model.train(src_dataset, tgt_dataset)
Esempio n. 11
0
class Trainer:
    def __init__(self):

        # Detection or Classification
        self.training_type = config['training_mode']
        self.ds = Dataset(config)
        self.ds.anchors, self.ds.anchors_coords = self.ds.get_anchors()

        if (self.training_type == 'detection'):
            self.data_loader = self.load_dataloader(mode=self.training_type)
        elif (self.training_type == 'classification'):
            self.data_loader = self.load_dataloader(mode=self.training_type)
        pass

    def load_dataloader(self, mode):
        if (mode == 'detection'):
            root_loc = config['home_dirs'][1] if config[
                'use_colab'] else config['home_dirs'][0]
            num_workers = 1 if config['use_gpu'] else config['num_workers']
            dataset = VOCDetection(root=root_loc + config['train_data_loc'])
            data_loader = torch.utils.data.DataLoader(
                dataset,
                config['batch_size'],
                shuffle=config['shuffle_data'],
                num_workers=num_workers,
                collate_fn=detection_collate,
                drop_last=True)
            return data_loader
        elif (mode == 'classification'):
            root_loc = config['home_dirs'][1] if config[
                'use_colab'] else config['home_dirs'][0]
            num_workers = 1 if config['use_gpu'] else config['num_workers']
            dataset = ClassificationDataset(root=root_loc +
                                            config['train_data_loc'])
            data_loader = torch.utils.data.DataLoader(
                dataset,
                config['batch_size'],
                shuffle=config['shuffle_data'],
                num_workers=num_workers,
                collate_fn=classification_collate,
                drop_last=True)

            for idx, (img, target) in enumerate(data_loader):
                print("IN LOOP")
                print(idx)
                print("TARGET", target)
                break
            print("DONE")

    def classification_train(self, train_set):

        darknet19 = DarkNet19(config)
        optimizer = torch.optim.SGD(params=darknet19.parameters(),
                                    lr=config['d_learning_rate'],
                                    momentum=config['d_momentum'],
                                    weight_decay=config['d_weight_decay'])

        def train_loop(num_epochs, train_set):
            total_loss, conf_loss, localization_loss, cls_loss = 0, 0, 0, 0
            print('=============== TRAINING STARTED =====================')
            for epoch in range(config['d_num_epochs']):
                for i, (images, targets) in enumerate(train_set):
                    optimizer.zero_grad()

                    optimizer.step()
                    pass

        #train_loop(config['c_num_epochs'],train_set)
        pass

    def show_information(self, device):
        input_size = [config['img_size'], config['img_size']]
        training_info = {
            "Input size : ": input_size,
            "Batch size : ": config['batch_size'],
            "Learning Rate : ": config['d_learning_rate'],
            "Epochs : ": config['d_num_epochs'],
            "Device : ": device,
            "Pre-trained : ": config['use_pretrained']
        }
        for k, v in training_info.items():
            print(k, v)
        pass

    def training_optimizer(self, optimizer_name, model):
        if (optimizer_name == 'adam'):
            optimizer = torch.optim.Adam(params=model.parameters(),
                                         lr=config['d_learning_rate'],
                                         eps=config['optimizer_eps'],
                                         weight_decay=config['d_weight_decay'])
            return optimizer
        elif (optimizer_name == 'sgd'):
            optimizer = torch.optim.SGD(params=model.parameters(),
                                        lr=config['d_learning_rate'],
                                        momentum=config['d_momentum'],
                                        weight_decay=config['d_weight_decay'])
            return optimizer
        elif (optimizer_name == 'adagrad'):
            optimizer = torch.optim.Adagrad(params=model.parameters(),
                                            lr=config['d_learning_rate'],
                                            lr_decay=0,
                                            weight_decay=0,
                                            initial_accumulator_value=0,
                                            eps=1e-10)
            return optimizer

    def detection_train(self, train_set):
        # -- Initializing primary vars --
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        yolo = None
        optimizer = None
        curr_epoch = 0
        total_loss, conf_loss, localization_loss, cls_loss = 0, 0, 0, 0

        # -- Getting Checkpointed Model or training from scratch? --
        if (config['warmstart']):
            # Load latest checkpoint
            yolo, optimizer, curr_epoch, total_loss, conf_loss, localization_loss, cls_loss = self.load_checkpoint(
                config['checkpoints_loc'])
        else:
            # Create YOLOv2 model with fresh weights
            yolo = YOLOv2(config,
                          mode='train',
                          anchor_boxes=self.ds.anchors,
                          device=device)
            if (config['use_gpu'] and torch.cuda.is_available()):
                yolo = yolo.to(device)
            optimizer = self.training_optimizer(config['optimizer_type'], yolo)
            if (config['use_pretrained']):
                root_loc = config['home_dirs'][1] if config[
                    'use_colab'] else config['home_dirs'][0]
                # Loading backbone Darknet19 pretrained weights
                yolo.load_from_npz(root_loc +
                                   config['c_pretrained_weights_loc'])

        # -- YOLO Loss Fn for YOLOv2
        criterion = YOLO_SSE(config=config,
                             anchor_boxes=self.ds.anchors,
                             device=device)

        # -- Display Training Parameters --
        self.show_information(device)

        def train_loop(curr_epoch, num_epochs, train_set):
            print('=============== TRAINING STARTED =====================')
            for epoch in range(curr_epoch,
                               curr_epoch + config['d_num_epochs']):
                epoch_total_loss, epoch_conf_loss, epoch_bbox_loss, epoch_cls_loss = 0, 0, 0, 0

                # Forward pass thru each example in model
                for i, (images, targets) in tqdm(enumerate(train_set),
                                                 total=len(train_set)):
                    # Zero out all gradients
                    optimizer.zero_grad()

                    # Transform ground truth data
                    targets = self.ds.generate_gt_data(targets)
                    targets = torch.from_numpy(targets).float()

                    if (config['use_gpu'] and torch.cuda.is_available()):
                        images = images.to(device)
                        targets = targets.to(device)

                    # Get predictions on image from the model
                    bxbybwbh_preds, cls_preds, conf_preds = yolo(
                        images, targets)

                    # Compare predictions and calculate the loss from the loss function
                    total_loss, conf_loss, localization_loss, cls_loss = criterion(
                        targets, bxbybwbh_preds, conf_preds, cls_preds)
                    print(
                        "[Epoch {0}/{1}]: Total Loss: {2:.2f} | Conf Loss: {3:.2f} | BBox Loss: {4:.2f} | Cls Loss: {5:.2f}"
                        .format(epoch + 1, config['d_num_epochs'], total_loss,
                                conf_loss, localization_loss, cls_loss))

                    epoch_total_loss += total_loss
                    epoch_conf_loss += conf_loss
                    epoch_bbox_loss += localization_loss
                    epoch_cls_loss += cls_loss

                    # back prop
                    total_loss.backward()
                    if (config['clip_grad']):
                        torch.nn.utils.clip_grad_norm_(
                            yolo.parameters(),
                            max_norm=config['clip_grad_max_norm'])
                    optimizer.step()
                    #self.examine_predictions(images[0].data.numpy(),targets.data.numpy())
                print(
                    "=========================================================================================================="
                )
                print(
                    "[Epoch {0}/{1}]: Total Loss: {2:.2f} | Conf Loss: {3:.2f} | BBox Loss: {4:.2f} | Cls Loss: {5:.2f}"
                    .format(epoch + 1, config['d_num_epochs'],
                            epoch_total_loss, epoch_conf_loss, epoch_bbox_loss,
                            epoch_cls_loss))
                # -- Save Model --
                #self.save_model(yolo,epoch)

                # self.checkpoint_model(checkpoint_info={
                #     "model_state_dict" : yolo.state_dict(),
                #     "optimizer_state_dict" : optimizer.state_dict(),
                #     "epoch" : epoch,
                #     "total_loss" : total_loss,
                #     "conf_loss" : conf_loss,
                #     "bbox_loss" : localization_loss,
                #     "cls_loss" : cls_loss
                # })

                #break
                pass

        train_loop(curr_epoch, config['d_num_epochs'], train_set)
        pass

    def load_checkpoint(self, checkpoint_loc):
        root_loc = config['home_dirs'][1] if config['use_colab'] else config[
            'home_dirs'][0]
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        yolo = YOLOv2(config=config,
                      mode='train',
                      anchor_boxes=self.ds.anchors,
                      device=device)
        if (config['use_gpu'] and torch.cuda.is_available()):
            yolo = yolo.to(device)
        optimizer = self.training_optimizer(config['optimizer_type'], yolo)

        checkpoint = torch.load(root_loc + checkpoint_loc + 'checkpoint.pt',
                                map_location=device)
        yolo.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch']
        total_loss = checkpoint['total_loss']
        conf_loss = checkpoint['conf_loss']
        localization_loss = checkpoint['bbox_loss']
        cls_loss = checkpoint['cls_loss']
        yolo.train()
        return yolo, optimizer, epoch, total_loss, conf_loss, localization_loss, cls_loss

    def checkpoint_model(self, checkpoint_info):
        chck_pt_loc = config['checkpoints_loc']
        root_loc = config['home_dirs'][1] if config['use_colab'] else config[
            'home_dirs'][0]
        save_loc = root_loc + chck_pt_loc
        model_name = "checkpoint.pt"
        torch.save(checkpoint_info, save_loc + model_name)

    def save_model(self, model, num_epoch=0):
        chck_pt_loc = config['checkpoints_loc']
        root_loc = config['home_dirs'][1] if config['use_colab'] else config[
            'home_dirs'][0]
        save_loc = root_loc + chck_pt_loc
        model_name = "epoch_{}.pt".format(num_epoch)
        torch.save(model.state_dict(), save_loc + model_name)
        pass

    def unnormalize_img(self, img):
        mean = config['ds_mean']
        std = config['ds_std']

        img = img[:, :, :].transpose(1, 2, 0)

        img *= 255.0

        # # Multiply by std
        # img[...,0] *= std[0]
        # img[...,1] *= std[1]
        # img[...,2] *= std[2]

        # # Add data by the mean
        # img[...,0] += mean[0]
        # img[...,1] += mean[1]
        # img[...,2] += mean[2]

        img = np.ascontiguousarray(img, dtype=np.uint8)
        return img

    def examine_predictions(self, img, bxbybwbh_preds):
        img = self.unnormalize_img(img)

        x = np.floor(config['img_size'] / config['num_grid_cells'])
        y = np.floor(config['img_size'] / config['num_grid_cells'])
        move_x = x
        move_y = y

        fig, ax = plt.subplots(1)
        ax.imshow(img)
        for b in range(config['batch_size']):
            for grid_row in range(config['img_size'] // config['grid_stride']):
                plt.plot([0, config['img_size']], [move_y, move_y],
                         color='y',
                         marker='.')
                for grid_col in range(config['img_size'] //
                                      config['grid_stride']):
                    plt.plot([move_x, move_x], [0, config['img_size']],
                             color='y',
                             marker='.')
                    move_x += x
                move_x = x
                move_y += y

        for b in range(config['batch_size']):
            for grid_cell in range(169):
                # Draw Anchors
                for anchor in range(5):
                    # Draw Predicitions
                    bbox = bxbybwbh_preds[b, grid_cell, anchor]

                    rect = patches.Rectangle((bbox[0], bbox[1]),
                                             bbox[2] - bbox[0],
                                             bbox[3] - bbox[1],
                                             linewidth=2,
                                             edgecolor='r',
                                             facecolor='none')
                    ax.add_patch(rect)
        plt.savefig(config['save_plots_loc'] + 'training_predictions_im.png')
        sys.exit()
        pass
Esempio n. 12
0
                    default=[5, 10],
                    help="fold range of unlabeled set")
config = Configurations(parser.parse_args())

# os environment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = config.log_level
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx

# if dataset is not prepared, then build it
if not os.path.exists(config.save_path) or not os.listdir(config.save_path):
    process_data(config)

print("load dataset...")
label_dataset = Dataset(config.train_set,
                        config.dev_set,
                        config.test_set,
                        batch_size=config.batch_size,
                        fold=config.labeled_range,
                        shuffle=True)
partial_dataset = Dataset(config.train_set_p,
                          None,
                          None,
                          batch_size=config.batch_size,
                          fold=config.partial_range,
                          shuffle=True)
unlabeled_dataset = Dataset(config.train_set_u,
                            None,
                            None,
                            batch_size=config.batch_size,
                            fold=config.unlabeled_range,
                            shuffle=True)