예제 #1
0
def publish(id):
    if not id:
        return server_error('Unexpected error')

    try:
        entity = capital.get(id)
    except Exception:
        return not_found_error('Capital record not found')

    try:
        input = request.get_json()
        part = input['topic'].split('/')
        topicName = part[-1]
        if len(part) > 1:
            projectName = part[1]
        else:
            projectName = 'hackathon-team-003'
        utility.log_info(topicName)
        client = pubsub.Client(project=projectName)
        topic = client.topic(topicName)
        if topic.exists():
            text = json.dumps(entity)
            encoded = text.encode('utf-8')
            messageid = topic.publish(encoded)
            res = {}
            res['messageId'] = int(messageid)
            return jsonify(res), 200
        else:
            return not_found_error('Topic does not exist')
    except Exception as ex:
        return server_error('Unexpected error')
def pubsub_receive():
    """dumps a received pubsub message to the log"""

    data = {}
    try:
        obj = request.get_json()
        utility.log_info(json.dumps(obj))

        data = base64.b64decode(obj['message']['data'])
        utility.log_info(data)

    except Exception as e:
        # swallow up exceptions
        logging.exception('Oops!')

    return jsonify(data), 200
def run_main(config):
    train_loss_total_avg = 0.0
    train_transform = transforms.Compose([
        CenterCrop2D((200, 200)),
        ElasticTransform(alpha_range=(28.0, 30.0),
                         sigma_range=(3.5, 4.0),
                         p=0.3),
        RandomAffine(degrees=4.6, scale=(0.98, 1.02), translate=(0.03, 0.03)),
        RandomTensorChannelShift((-0.10, 0.10)),
        ToTensor(),
        NormalizeInstance(),
    ])

    val_transform = transforms.Compose([
        CenterCrop2D((200, 200)),
        ToTensor(),
        NormalizeInstance(),
    ])

    #    import ipdb as pdb; pdb.set_trace()

    # Here we assume that the SC GM Challenge data is inside the folder
    # "data" and it was previously resampled.
    gmdataset_train = SCGMChallenge2DTrain(root_dir="data",
                                           subj_ids=range(1, 9),
                                           transform=train_transform,
                                           slice_filter_fn=SliceFilter())

    # Here we assume that the SC GM Challenge data is inside the folder
    # "../data" and it was previously resampled.
    gmdataset_val = SCGMChallenge2DTrain(root_dir="data",
                                         subj_ids=range(9, 11),
                                         transform=val_transform)

    train_loader = DataLoader(gmdataset_train,
                              batch_size=16,
                              shuffle=True,
                              pin_memory=True,
                              collate_fn=mt_collate,
                              num_workers=1)

    val_loader = DataLoader(gmdataset_val,
                            batch_size=16,
                            shuffle=True,
                            pin_memory=True,
                            collate_fn=mt_collate,
                            num_workers=1)

    # import ipdb as pdb; pdb.set_trace()

    utility.create_log_file(config)
    utility.log_info(
        config, "{0}\nStarting experiment {1}\n{0}\n".format(
            50 * "=", utility.get_experiment_name(config)))
    model = Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    # print(model)
    #summary(model, (3, 224, 224))

    # import ipdb as pdb; pdb.set_trace()
    if config['operation_mode'].lower(
    ) == "retrain" or config['operation_mode'].lower() == "inference":
        print("Using a trained model...")
        model.load_state_dict(torch.load(config['trained_model']))
    elif config["operation_mode"].lower() == "visualize":
        print("Visualizing weights...")
        if cuda:
            model.load_state_dict(torch.load(config['trained_model']))
        else:
            model.load_state_dict(
                torch.load(config['trained_model'], map_location='cpu'))
        v.visualize_model(model, config)
        return

    # import ipdb as pdb; pdb.set_trace()
    if cuda:
        model.cuda()

    num_epochs = config["num_epochs"]
    initial_lr = config["lr"]

    optimizer = optim.Adam(model.parameters(), lr=initial_lr)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)

    betas = torch.linspace(3.0, 8.0, num_epochs)
    best_dice = 0
    # import ipdb as pdb; pdb.set_trace()
    writer = SummaryWriter(log_dir=utility.get_experiment_dir(config))
    for epoch in tqdm(range(1, num_epochs + 1)):
        start_time = time.time()

        if not (config['operation_mode'].lower() == "inference"):
            scheduler.step()

            lr = scheduler.get_lr()[0]
            model.beta = betas[epoch - 1]  # for ternary net, set beta
            writer.add_scalar('learning_rate', lr, epoch)

            model.train()
            train_loss_total = 0.0
            num_steps = 0
            for i, batch in enumerate(train_loader):
                input_samples, gt_samples = batch["input"], batch["gt"]
                if cuda:
                    var_input = input_samples.cuda()
                    var_gt = gt_samples.cuda()
                else:
                    var_input = input_samples
                    var_gt = gt_samples
                preds = model(var_input)

                loss = dice_loss(preds, var_gt)
                # if epoch == 1 and i == len(train_loader) - 1:
                #     import ipdb as pdb; pdb.set_trace()
                # if epoch == 4 and i == len(train_loader) - 1:
                #     import ipdb as pdb; pdb.set_trace()
                train_loss_total += loss.item()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                num_steps += 1

                if epoch % 5 == 0:
                    grid_img = vutils.make_grid(input_samples,
                                                normalize=True,
                                                scale_each=True)
                    writer.add_image('Input', grid_img, epoch)

                    grid_img = vutils.make_grid(preds.data.cpu(),
                                                normalize=True,
                                                scale_each=True)
                    writer.add_image('Predictions', grid_img, epoch)

                    grid_img = vutils.make_grid(gt_samples,
                                                normalize=True,
                                                scale_each=True)
                    writer.add_image('Ground Truth', grid_img, epoch)

        if not (config['operation_mode'].lower() == "inference"):
            train_loss_total_avg = train_loss_total / num_steps

        # import ipdb as pdb; pdb.set_trace()
        model.eval()
        val_loss_total = 0.0
        num_steps = 0

        metric_fns = [
            dice_score, hausdorff_score, precision_score, recall_score,
            specificity_score, intersection_over_union, accuracy_score
        ]

        metric_mgr = MetricManager(metric_fns)

        for i, batch in enumerate(val_loader):
            # import ipdb as pdb; pdb.set_trace()
            input_samples, gt_samples = batch["input"], batch["gt"]

            with torch.no_grad():
                if cuda:
                    var_input = input_samples.cuda()
                    var_gt = gt_samples.cuda()
                else:
                    var_input = input_samples
                    var_gt = gt_samples

                preds = model(var_input)
                loss = dice_loss(preds, var_gt)
                val_loss_total += loss.item()

            # Metrics computation
            gt_npy = gt_samples.numpy().astype(np.uint8)
            gt_npy = gt_npy.squeeze(axis=1)

            preds = preds.data.cpu().numpy()
            # if np.isnan(preds).any():
            #     import ipdb as pdb; pdb.set_trace()
            preds = threshold_predictions(preds)
            preds = preds.astype(np.uint8)
            preds = preds.squeeze(axis=1)

            metric_mgr(preds, gt_npy)

            num_steps += 1
        metrics_dict = metric_mgr.get_results()
        metric_mgr.reset()

        writer.add_scalars('metrics', metrics_dict, epoch)

        val_loss_total_avg = val_loss_total / num_steps

        if not (config['operation_mode'].lower() == "inference"):
            writer.add_scalars('losses', {'train_loss': train_loss_total_avg},
                               epoch)
            writer.add_scalars('losses', {
                'val_loss': val_loss_total_avg,
                'train_loss': train_loss_total_avg
            }, epoch)

        end_time = time.time()
        total_time = end_time - start_time
        log_str = "Epoch {} took {:.2f} seconds dice_score={}.".format(
            epoch, total_time, metrics_dict["dice_score"])
        utility.log_info(config, log_str)
        tqdm.write(log_str)
        if metrics_dict["dice_score"] > best_dice:
            best_dice = metrics_dict["dice_score"]
            utility.save_model(model=model, config=config)
    if not (config['operation_mode'].lower() == "inference"):
        utility.save_model(model=model, config=config)
예제 #4
0
def run_main(config):
    dataset_base_path = "./data/"
    target_path = natsorted(glob(dataset_base_path + '/mask/*.png'))
    image_paths = natsorted(glob(dataset_base_path + '/img/*.png'))
    target_val_path = natsorted(glob(dataset_base_path + '/val_mask/*.png'))
    image_val_path = natsorted(glob(dataset_base_path + '/val_img/*.png'))

    nih_dataset_train = EMdataset(image_paths=image_paths,
                                  target_paths=target_path)
    nih_dataset_val = EMdataset(image_paths=image_val_path,
                                target_paths=target_val_path)

    #import ipdb as pdb; pdb.set_trace()
    train_loader = DataLoader(nih_dataset_train,
                              batch_size=16,
                              shuffle=True,
                              num_workers=1)
    val_loader = DataLoader(nih_dataset_val,
                            batch_size=16,
                            shuffle=True,
                            num_workers=1)
    model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    if config['operation_mode'].lower(
    ) == "retrain" or config['operation_mode'].lower() == "inference":
        print("Using a trained model...")
        model.load_state_dict(torch.load(config['trained_model']))
    elif config["operation_mode"].lower() == "visualize":
        print("Using a trained model...")
        if cuda:
            model.load_state_dict(torch.load(config['trained_model']))
        else:
            model.load_state_dict(
                torch.load(config['trained_model'], map_location='cpu'))
        v.visualize_model(model, config)
        return

    # import ipdb as pdb; pdb.set_trace()
    if cuda:
        model.cuda()
        print('gpu_activate')

    num_epochs = config["num_epochs"]
    initial_lr = config["lr"]
    experiment_path = config["log_output_dir"] + config['experiment_name']
    output_image_dir = experiment_path + "/figs/"

    betas = torch.linspace(3.0, 8.0, num_epochs)

    # criterion  = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=initial_lr)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)

    # import ipdb as pdb; pdb.set_trace()
    writer = SummaryWriter(log_dir=utility.get_experiment_dir(config))
    best_score = 0
    for epoch in tqdm(range(1, num_epochs + 1)):
        start_time = time.time()

        scheduler.step()

        lr = scheduler.get_lr()[0]
        model.beta = betas[epoch - 1]  # for ternary net, set beta
        writer.add_scalar('learning_rate', lr, epoch)

        model.train()
        train_loss_total = 0.0
        num_steps = 0
        capture = True
        for i, batch in enumerate(train_loader):
            input_samples, gt_samples = batch[0], batch[1]

            if cuda:
                var_input = input_samples.cuda()
                var_gt = gt_samples.cuda()
            else:
                var_input = input_samples
                var_gt = gt_samples
            preds = model(var_input)
            loss = dice_loss(preds, var_gt)
            # import ipdb as pdb; pdb.set_trace()
            var_gt = var_gt.float()
            train_loss_total += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            num_steps += 1
            if epoch % 1 == 0 and capture:
                capture = False
                input_samples, gt_samples = get_samples(
                    image_val_path, target_val_path, 4)
                if cuda:
                    input_samples = input_samples.cuda()
                preds = model(input_samples)
                input_samples = input_samples.data.cpu().numpy()
                preds = preds.data.cpu().numpy()
                # import ipdb as pdb; pdb.set_trace()
                save_image(input_samples[0][0], gt_samples[0][0], preds[0][0],
                           epoch, 0, output_image_dir)

        train_loss_total_avg = train_loss_total / num_steps

        # import ipdb as pdb; pdb.set_trace()
        model.eval()
        val_loss_total = 0.0
        num_steps = 0

        metric_fns = [
            dice_score, hausdorff_score, precision_score, recall_score,
            specificity_score, intersection_over_union, accuracy_score
        ]

        metric_mgr = MetricManager(metric_fns)

        for i, batch in enumerate(val_loader):
            input_samples, gt_samples = batch[0], batch[1]

            with torch.no_grad():
                if cuda:
                    var_input = input_samples.cuda()
                    var_gt = gt_samples.cuda(async=True)
                else:
                    var_input = input_samples
                    var_gt = gt_samples

                preds = model(var_input)
                loss = dice_loss(preds, var_gt)
                # loss = criterion(preds, var_gt)
                # loss = weighted_bce_loss(preds, var_gt, 0.5, 2.5)
                val_loss_total += loss.item()

            gt_npy = gt_samples.data.cpu().numpy()  #.astype(np.uint8)
            gt_npy = gt_npy.squeeze(axis=1)

            preds = preds.data.cpu().numpy()
            preds = threshold_predictions(preds)
            # preds = preds.astype(np.uint8)
            preds = preds.squeeze(axis=1)

            metric_mgr(preds, gt_npy)

            num_steps += 1

        metrics_dict = metric_mgr.get_results()
        metric_mgr.reset()

        writer.add_scalars('metrics', metrics_dict, epoch)

        val_loss_total_avg = val_loss_total / num_steps

        writer.add_scalars('losses', {
            'val_loss': val_loss_total_avg,
            'train_loss': train_loss_total_avg
        }, epoch)

        end_time = time.time()
        total_time = end_time - start_time
        msg = "Epoch {} took {:.2f} seconds dice_score={}. precision={} iou={} loss_train={} val_loss={}".format(
            epoch, total_time, metrics_dict["dice_score"],
            metrics_dict["precision_score"],
            metrics_dict["intersection_over_union"], train_loss_total_avg,
            val_loss_total_avg)
        utility.log_info(config, msg)
        tqdm.write(msg)
        writer.add_scalars('losses', {'train_loss': train_loss_total_avg},
                           epoch)

        if metrics_dict["dice_score"] > best_score:
            best_score = metrics_dict["dice_score"]
            utility.save_model(model=model, config=config)

    if not (config['operation_mode'].lower() == "inference"):
        utility.save_model(model=model, config=config)
예제 #5
0
 def publish_capital(self, city_id, topicname):
     city = self.fetch_capital(city_id)
     if city is not None:
         ps = mypubsub.PubSub()
         utility.log_info('TOPIC = ' + topicname)
         return ps.publish(topicname, city)
예제 #6
0
def run_main(config):
    train_transform = transforms.Compose([
        CenterCrop2D((200, 200)),
        ElasticTransform(alpha_range=(28.0, 30.0),
                         sigma_range=(3.5, 4.0),
                         p=0.3),
        RandomAffine(degrees=4.6, scale=(0.98, 1.02), translate=(0.03, 0.03)),
        RandomTensorChannelShift((-0.10, 0.10)),
        ToTensor(),
        NormalizeInstance(),
    ])

    val_transform = transforms.Compose([
        CenterCrop2D((200, 200)),
        ToTensor(),
        NormalizeInstance(),
    ])
    # import ipdb as pdb; pdb.set_trace()
    dataset_base_path = "/export/tmp/hemmat/datasets/em_challenge/"
    target_path = natsort.natsorted(glob.glob(dataset_base_path +
                                              'mask/*.PNG'))
    image_paths = natsort.natsorted(glob.glob(dataset_base_path +
                                              'data/*.PNG'))
    target_val_path = natsort.natsorted(
        glob.glob(dataset_base_path + 'val_mask/*.PNG'))
    image_val_path = natsort.natsorted(
        glob.glob(dataset_base_path + 'val_img/*.PNG'))

    gmdataset_train = EMdataset(image_paths=image_paths,
                                target_paths=target_path)
    gmdataset_val = EMdataset(image_paths=image_val_path,
                              target_paths=target_val_path)
    train_loader = DataLoader(gmdataset_train,
                              batch_size=5,
                              shuffle=True,
                              num_workers=1)
    val_loader = DataLoader(gmdataset_val,
                            batch_size=4,
                            shuffle=True,
                            num_workers=1)

    utility.create_log_file(config)
    utility.log_info(
        config, "{0}\nStarting experiment {1}\n{0}\n".format(
            50 * "=", utility.get_experiment_name(config)))
    # import ipdb as pdb; pdb.set_trace()
    model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    if config['operation_mode'].lower(
    ) == "retrain" or config['operation_mode'].lower() == "inference":
        print("Using a trained model...")
        model.load_state_dict(torch.load(config['trained_model']))
    elif config["operation_mode"].lower() == "visualize":
        print("Using a trained model...")
        if cuda:
            model.load_state_dict(torch.load(config['trained_model']))
        else:
            model.load_state_dict(
                torch.load(config['trained_model'], map_location='cpu'))
        mv.visualize_model(model, config)
        return

    # import ipdb as pdb; pdb.set_trace()
    if cuda:
        model.cuda()

    num_epochs = config["num_epochs"]
    initial_lr = config["lr"]
    experiment_path = config["log_output_dir"] + config['experiment_name']
    output_image_dir = experiment_path + "/figs/"

    betas = torch.linspace(3.0, 8.0, num_epochs)
    optimizer = optim.Adam(model.parameters(), lr=initial_lr)
    # scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
    lr_milestones = range(0, int(num_epochs), int(int(num_epochs) / 5))
    lr_milestones = lr_milestones[1:]
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=lr_milestones,
                                               gamma=0.1)

    # import ipdb as pdb; pdb.set_trace()
    writer = SummaryWriter(log_dir=utility.get_experiment_dir(config))
    best_dice = 0
    for epoch in tqdm(range(1, num_epochs + 1)):
        start_time = time.time()

        scheduler.step()

        lr = scheduler.get_lr()[0]
        model.beta = betas[epoch - 1]  # for ternary net, set beta
        writer.add_scalar('learning_rate', lr, epoch)

        model.train()
        train_loss_total = 0.0
        num_steps = 0
        capture = True
        for i, batch in enumerate(train_loader):
            #import ipdb as pdb; pdb.set_trace()
            input_samples, gt_samples, idx = batch[0], batch[1], batch[2]

            if cuda:
                var_input = input_samples.cuda()
                var_gt = gt_samples.cuda(async=True)
                var_gt = var_gt.float()
            else:
                var_input = input_samples
                var_gt = gt_samples
                var_gt = var_gt.float()
            preds = model(var_input)

            # import ipdb as pdb; pdb.set_trace()
            loss = calc_loss(preds, var_gt)
            train_loss_total += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            num_steps += 1
            if epoch % 5 == 0 and capture:
                capture = False
                input_samples, gt_samples = get_samples(
                    image_val_path, target_val_path, 4)
                if cuda:
                    input_samples = input_samples.cuda()
                preds = model(input_samples)
                input_samples = input_samples.data.cpu().numpy()
                preds = preds.data.cpu().numpy()
                # import ipdb as pdb; pdb.set_trace()
                save_image(input_samples[0][0], gt_samples[0][0], preds[0][0],
                           epoch, 0, output_image_dir)

        train_loss_total_avg = train_loss_total / num_steps

        # import ipdb as pdb; pdb.set_trace()
        model.train()
        val_loss_total = 0.0
        num_steps = 0

        metric_fns = [
            dice_score, hausdorff_score, precision_score, recall_score,
            specificity_score, intersection_over_union, accuracy_score,
            rand_index_score
        ]

        metric_mgr = MetricManager(metric_fns)

        for i, batch in enumerate(val_loader):
            #            input_samples, gt_samples = batch[0], batch[1]
            input_samples, gt_samples, idx = batch[0], batch[1], batch[2]
            with torch.no_grad():
                if cuda:
                    var_input = input_samples.cuda()
                    var_gt = gt_samples.cuda(async=True)
                    var_gt = var_gt.float()
                else:
                    var_input = input_samples
                    var_gt = gt_samples
                    var_gt = var_gt.float()
                # import ipdb as pdb; pdb.set_trace()
                preds = model(var_input)
                loss = dice_loss(preds, var_gt)
                val_loss_total += loss.item()
            # Metrics computation
            gt_npy = gt_samples.numpy().astype(np.uint8)
            gt_npy = gt_npy.squeeze(axis=1)

            preds = preds.data.cpu().numpy()
            preds = threshold_predictions(preds)
            preds = preds.astype(np.uint8)
            preds = preds.squeeze(axis=1)
            metric_mgr(preds, gt_npy)
            #save_image(input_samples[0][0], preds[0], gt_samples, epoch, idx[0])
            # save_pred(model, image_val_path, epoch, output_image_dir)
            num_steps += 1

        metrics_dict = metric_mgr.get_results()
        metric_mgr.reset()

        writer.add_scalars('metrics', metrics_dict, epoch)

        # import ipdb as pdb; pdb.set_trace()
        val_loss_total_avg = val_loss_total / num_steps

        writer.add_scalars('losses', {
            'val_loss': val_loss_total_avg,
            'train_loss': train_loss_total_avg
        }, epoch)

        end_time = time.time()
        total_time = end_time - start_time
        # import ipdb as pdb; pdb.set_trace()
        log_str = "Epoch {} took {:.2f} seconds train_loss={}   dice_score={}   rand_index_score={}  lr={}.".format(
            epoch, total_time, train_loss_total_avg,
            metrics_dict["dice_score"], metrics_dict["rand_index_score"],
            get_lr(optimizer))
        utility.log_info(config, log_str)
        tqdm.write(log_str)

        writer.add_scalars('losses', {'train_loss': train_loss_total_avg},
                           epoch)
        if metrics_dict["dice_score"] > best_dice:
            best_dice = metrics_dict["dice_score"]
            utility.save_model(model=model, config=config)
    if not (config['operation_mode'].lower() == "inference"):
        utility.save_model(model=model, config=config)