Ejemplo n.º 1
0
def register():
    user_id = str(uuid.uuid4())
    liked_chains = split_ids(request.form.get('liked_chains'))
    disliked_chains = split_ids(request.form.get('disliked_chains'))

    users_db[user_id] = {'liked_chains': liked_chains,
                         'disliked_chains': disliked_chains}

    return jsonify({'user_id': user_id})
Ejemplo n.º 2
0
def recommend():
    user_id = request.args['user_id']
    user_location = LatLong.from_dict(request.args)
    query_tags = split_ids(request.args.get('tags'))

    cur_user = users_db[user_id]
    predictions = get_predictions_for(cur_user)

    available_places = []
    for place in places_db:
        chain_id = place['chain_id']
        if (chain_id in cur_user['liked_chains'] or
            chain_id in cur_user['disliked_chains']):
            continue

        # If there is no reviews for this chain
        if chain_id not in predictions:
            continue

        place_location = LatLong.from_dict(place['geometry']['location'])
        if lat_long_dist_in_km(user_location, place_location) > MAX_DIST_IN_KM:
            continue

        place = place.copy()
        place['tags'] = get_tags(place)
        place['prediction'] = predictions[chain_id]

        available_places.append(place)

    print("available", [place["chain_id"] for place in available_places])
    print("user", cur_user)

    if not available_places:
        return jsonify({'error': 'No such places'})

    for index in range(0, len(available_places), HERE_API_BATCH_SIZE):
        places_batch = available_places[index:index + HERE_API_BATCH_SIZE]
        calc_travel_time(user_location, places_batch)

    best_place = min(available_places,
                     key=lambda item: get_sort_key(item, query_tags))

    users_db[user_id]["liked_chains"].append(best_place["chain_id"])  # hack for different places on each call

    photos = best_place.get("photos", [])
    if len(photos) == 0:
        photo_url = None
    else:
        photo_url = GOOGLE_PHOTO_URL.format(reference=photos[0]["photo_reference"])

    return jsonify({
        'name': best_place['name'],
        'chain_id': best_place['chain_id'],
        'lat': best_place['geometry']['location']['lat'],
        'lng': best_place['geometry']['location']['lng'],
        'travel_time_mins': best_place['travel_time'] / SECONDS_PER_MIN,
        'photo_url': photo_url,
    })
Ejemplo n.º 3
0
def create_train_test_directory(split_size, total_samples):
    """
    For tomorrows' Charles - you need to do some reworking
    as you need to have a way to create a suitable method for 
    seperating train and test files. Make pairs right now
    does not store identity info and returns a cached result. 
    We may need to bundle two files into the cache train and test. 
    """
    identity_df = get_identities()
    unique_ids = get_unique_ids(identity_df)
    train_ids, test_ids = split_ids(unique_ids, split_size)
    print(len(train_ids), len(test_ids))
    print(identity_df[identity_df.identity_num == 2880])
Ejemplo n.º 4
0
def main():
    print('settings: ', args)
    setup()
    rnd_state = np.random.RandomState(args.seed)

    print("Loading data ... ... ...")
    dataset = TUDataset('./data/%s/' % args.dataset,
                        name=args.dataset,
                        use_node_attr=args.use_cont_node_attr)
    train_ids, test_ids = split_ids(rnd_state.permutation(len(dataset)),
                                    folds=args.n_folds)
    print("Data loaded !!!")

    acc_folds = []
    for fold_id in range(args.n_folds):
        loaders = []
        for split in ['train', 'test']:
            gdata = dataset[torch.from_numpy(
                (train_ids
                 if split.find('train') >= 0 else test_ids)[fold_id])]
            loader = DataLoader(gdata,
                                batch_size=args.batch_size,
                                shuffle=split.find('train') >= 0,
                                num_workers=args.threads,
                                collate_fn=collate_batch)
            loaders.append(loader)

        print('\nFOLD {}, train {}, test {}'.format(fold_id,
                                                    len(loaders[0].dataset),
                                                    len(loaders[1].dataset)))

        model = GCN(in_features=loaders[0].dataset.num_features,
                    out_features=loaders[0].dataset.num_classes,
                    n_hidden=args.n_hidden,
                    filters=args.filters,
                    dropout=args.dropout).to(args.device)
        print('\nInitialize model')
        print(model)
        train_params = list(
            filter(lambda p: p.requires_grad, model.parameters()))
        print('N trainable parameters:',
              np.sum([p.numel() for p in train_params]))

        optimizer = optim.Adam(train_params,
                               lr=args.lr,
                               weight_decay=args.wd,
                               betas=(0.5, 0.999))
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             args.lr_decay_steps,
                                             gamma=0.1)
        for epoch in range(args.epochs):
            train(loaders[0],
                  model=model,
                  epoch=epoch,
                  optimizer=optimizer,
                  scheduler=scheduler)
            acc = test(loaders[1], model=model, epoch=epoch)
        acc_folds.append(acc)

    print(acc_folds)
    print('{}-fold cross validation avg acc (+- std): {} ({})'.format(
        args.n_folds, np.mean(acc_folds), np.std(acc_folds)))
Ejemplo n.º 5
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = 'data/train/'  # 训练图像文件夹
    dir_mask = 'data/train_masks/'  # 图像的结果文件夹
    dir_checkpoint = 'checkpoints/'  # 训练好的网络保存文件夹

    ids = get_ids(dir_img)  # 图片名字的后4位为数字,能作为图片id

    # 得到元祖列表为[(id1,0),(id1,1),(id2,0),(id2,1),...,(idn,0),(idn,1)]
    # 这样的作用是后面重新设置生成器时会通过后面的0,1作为utils.py中get_square函数的pos参数,pos=0的取左边的部分,pos=1的取右边的部分
    # 这样图片的数量就会变成2倍
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)  # 将数据分为训练集和验证集两份

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])  # 训练集长度

    optimizer = optim.SGD(
        net.parameters(),  # 定义优化器
        lr=lr,
        momentum=0.9,
        weight_decay=0.0005)

    criterion = nn.BCELoss()  # 损失函数

    for epoch in range(epochs):  # 开始训练
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()  # 设置为训练模式

        # reset the generators重新设置生成器
        # 对输入图片dir_img和结果图片dir_mask进行相同的图片处理,即缩小、裁剪、转置、归一化后,将两个结合在一起,返回(imgs_normalized, masks)
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)  # 得到输入图像数据
            true_masks = np.array([i[1] for i in b])  # 得到图像结果数据

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)  # 图像输入的网络后得到结果masks_pred,结果为灰度图像
            masks_probs_flat = masks_pred.view(-1)  # 将结果压扁

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)  # 对两个结果计算损失
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss /
                                                 i))  # 一次迭代后得到的平均损失

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 6
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    # dir_img = 'data/train/'
    # dir_mask = 'data/train_masks/'
    dir_img = 'E:/git/dataset/tgs-salt-identification-challenge/train/images/'
    dir_mask = 'E:/git/dataset/tgs-salt-identification-challenge/train/masks/'
    # dir_img = 'E:/git/dataset/tgs-salt-identification-challenge/train/my_images/'
    # dir_mask = 'E:/git/dataset/tgs-salt-identification-challenge/train/my_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            # true_masks = np.array([i[1] for i in b])#np.rot90(m)
            true_masks = np.array([i[1].T / 65535 for i in b])  #np.rot90(m)

            # show_batch_image(true_masks)
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # show_batch_image(imgs)

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
def train_net(
        net,
        epochs=5,
        batch_size=1,
        lr=0.1,
        #val_percent=0.1,
        save_cp=True,
        gpu=False,
        img_scale=0.5):

    img_train = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/images_jpg/'
    mask_train = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/vessel/'
    img_val = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/val_jpg/'
    mask_val = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/val_vessel/'
    dir_checkpoint = 'checkpoints_drive3_adam/'
    if os.path.exists(dir_checkpoint) is False:
        os.makedirs(dir_checkpoint)

    ids_train = get_ids(img_train)
    data_train = split_ids(ids_train)
    data_train = list(data_train)
    ids_val = get_ids(img_val)
    data_val = split_ids(ids_val)
    data_val = list(data_val)

    #iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(data_train), len(data_val),
               str(save_cp), str(gpu)))

    N_train = len(data_train)

    #optimizer = optim.SGD(net.parameters(),
    #                      lr=lr,
    #                      momentum=0.9,
    #                      weight_decay=0.0005)

    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=1e-5)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           factor=0.1,
                                                           mode='min',
                                                           patience=3,
                                                           verbose=True)
    #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
    criterion = nn.BCELoss()
    #criterion = DiceCoeff()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks_y(data_train, img_train, mask_train,
                                     img_scale)
        val = get_imgs_and_masks_y(data_val, img_val, mask_val, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            #print(masks_pred.shape, true_masks.shape)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            #print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))

        scheduler.step(val_dice)
Ejemplo n.º 8
0
def train_net(args,
              net,
              epochs,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    global best_dice, best_loss
    dir_img = '/home/mori/Programming/Net_Pruning/unetdataset_patchImg/img/'
    dir_mask = '/home/mori/Programming/Net_Pruning/unetdataset_patchImg/graylabel/'

    ids = get_ids(dir_img)  # get file name (without .png)
    print("ids:{}".format(ids))

    ids = split_ids(ids)  # 重采样?
    print("ids:{}".format(ids))
    iddataset = split_train_val(ids, val_percent)  # 按给定比例划分打乱的数据集

    ###### count parameters  ############
    paras = sum([p.data.nelement() for p in net.parameters()])

    print('''
    Starting training:
        Epochs: {}
        Parameters: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
        Deepsupervision: {}
    '''.format(epochs, paras, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu),
               str(args.deepsupervision)))

    N_train = len(iddataset['train'])
    print("N_train:{}".format(N_train))
    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        New_lr = adjust_learning_rate(optimizer, epoch, epochs)
        print(' lr: {}'.format(New_lr))
        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        best_iou = 0
        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):  # 手动分出batch
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            true_masks_flat = true_masks.view(-1)
            true_masks_flat = true_masks_flat / 255  # 归一化

            output = net(imgs)
            masks_pred = F.sigmoid(output)

            if args.deepsupervision:
                #### unet++ with deepsupervision
                loss = 0
                for mp in masks_pred:
                    masks_probs_flat = mp.view(-1)
                    loss += criterion(masks_probs_flat, true_masks_flat)
                loss /= len(masks_pred)
                epoch_loss += loss.item()
            else:
                masks_probs_flat = masks_pred.view(-1)
                loss = criterion(masks_probs_flat, true_masks_flat)
                epoch_loss += loss.item()

                ## todo: adjust iou
                iou = iou_score(output, true_masks / 255)

            ######## record the best iou
            if iou > best_iou:
                best_iou = iou

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            newloss = loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))
        print('Best iou: {}'.format(best_iou))
        val_dice = eval_net(net, val, gpu)
        print('Validation Dice Coeff: {}'.format(val_dice))

        writer.add_scalar('train_loss', epoch_loss / i, (epoch + 1))
        writer.add_scalar('val_dice', val_dice, (epoch + 1))
        writer.add_scalar('best iou', best_iou, (epoch + 1))

        if save_cp:
            #torch.save(net.state_dict(),dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            #print('Checkpoint {} saved !'.format(epoch + 1))
            dice_best = val_dice > best_dice
            loss_best = epoch_loss / i < best_loss
            best_dice = max(val_dice, best_dice)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': net.state_dict(),
                    'best_dice': best_dice,
                    'best_loss': best_loss,
                }, dice_best, loss_best)

    print('Best dice: ', best_dice)
Ejemplo n.º 9
0
        # true_mask = torch.from_numpy(true_mask)

        mask_pred = net(img.cuda())[0]
        mask_pred = mask_pred.data.cpu().numpy()
        mask_pred = (mask_pred > 0.5).astype(int)
        #
        # print('mask_pred.shape.zhaojin', mask_pred.shape)
        # print('true_mask.shape.zhaojin', true_mask.shape)

        tot += dice_cofe(mask_pred, true_mask)
    return tot / (i + 1)


if __name__ == "__main__":

    dir_img = '/home/zhaojin/data/TacomaBridge/segdata/train/img'
    dir_mask = '/home/zhaojin/data/TacomaBridge/segdata/train/mask'
    dir_checkpoint = '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/logloss_softmax/'

    net = UNet(n_channels=1, n_classes=4)
    net = net.cuda()
    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, 0.1)

    val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, 0.5)
    if 1:
        val_dice = eval_net(net, val)
        print('Validation Dice Coeff: {}'.format(val_dice))
Ejemplo n.º 10
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = '../dataset/train/images/'
    dir_mask = '../dataset/train/masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print(('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu))))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print(('Starting epoch {}/{}.'.format(epoch + 1, epochs)))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)): # b[batch_id][0/1]: a batch of image(0)+mask(1)
            #print(('b[0]',b[0][0].shape,b[0][1].shape))
            #imgs = []
            #for img_msk in b:
            #    imgs.append(img_msk[0])
            #print(len(imgs))
            #imgs = np.array(imgs) # Wrong: not all images are of the same shape
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print(('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item())))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(('Epoch finished ! Loss: {}'.format(epoch_loss / i)))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print(('Validation Dice Coeff: {}'.format(val_dice)))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'cropped_CP{}.pth'.format(epoch + 1))
            print(('Checkpoint {} saved !'.format(epoch + 1)))
Ejemplo n.º 11
0
def fit(net,
        tf_writer,
        epochs=5,
        batch_size=1,
        lr=0.0001,
        val_percent=0.05,
        save_cp=True,
        gpu=False,
        img_scale=1,
        l2=1e-8,
        mom=0.9,
        n_classes=4,
        loss_function='bce',
        alpha_non_zero=1,
        resize_in=500):

    # dir_png = "data/caddata_line_v2_1_mini/png"
    # dir_mask = "data/caddata_line_v2_1_mini/mask"
    dir_png_train = os.path.join(args.data, 'train', 'png')
    dir_mask_train = os.path.join(args.data, 'train', 'mask')
    dir_png_val = os.path.join(args.data, 'val', 'png')
    dir_mask_val = os.path.join(args.data, 'val', 'mask')
    # dir_mask = "data/mini/mask"

    dir_checkpoint = os.path.join(args.log, 'checkpoints')
    if not os.path.isdir(dir_checkpoint):
        os.mkdir(dir_checkpoint)

    # train
    ids_train = get_ids(dir_png_train)
    ids_train = split_ids(ids_train, n=1)
    l_ids_train = list(ids_train)
    # val
    ids_val = get_ids(dir_png_val)
    ids_val = split_ids(ids_val, n=1)
    l_ids_val = list(ids_val)

    # iddataset = split_train_val(ids, val_percent)
    iddataset = {'train': l_ids_train, 'val': l_ids_val}

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    if loss_function == 'bce':
        criterion = nn.BCELoss()
    elif loss_function == 'mse':
        criterion = nn.MSELoss()
    # criterion = nn.CrossEntropyLoss()

    with open(os.path.join(args.log, 'log.txt'), 'w+') as f_log:
        for epoch in range(epochs):
            # print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
            # optimizer = optim.SGD(net.parameters(),
            #                       lr=lr * (args.lr_decay ** epoch),
            #                       momentum=mom,
            #                       weight_decay=l2)
            optimizer = optim.Adam(net.parameters(),
                                   lr=lr * (args.lr_decay**epoch),
                                   weight_decay=l2)
            # optimizer = optim.RMSprop(net.parameters(),
            #                           lr=lr * (args.lr_decay ** epoch),
            #                           weight_decay=l2)
            print("Current lr = {}".format(lr * (args.lr_decay**epoch)))
            f_log.write("Current lr = {}".format(lr * (args.lr_decay**epoch)) +
                        '\n')

            net.train()

            # shuffle training set
            random.shuffle(l_ids_train)
            iddataset = {'train': l_ids_train, 'val': l_ids_val}
            # reset the generators
            train = get_imgs_and_masks(iddataset['train'], dir_png_train,
                                       dir_mask_train, img_scale)
            val = get_imgs_and_masks(iddataset['val'], dir_png_val,
                                     dir_mask_val, img_scale)

            epoch_loss = 0
            epoch_tot = 0
            epoch_acc = 0
            epoch_acc_all = 0

            i_out = 0

            for i, b in enumerate(batch(train, batch_size)):
                imgs = np.array([j[0] for j in b]).astype(np.uint8)
                true_masks = np.array([j[1] for j in b]).astype(np.uint8)

                imgs_2 = np.zeros(
                    (imgs.shape[0], N_CHANNELS, resize_in, resize_in))
                true_masks_2 = np.zeros(
                    (true_masks.shape[0], resize_in, resize_in, n_classes))

                ## data augmentation
                for j in range(imgs.shape[0]):
                    img = imgs[j, :, :, :]
                    mask = true_masks[j, :, :, :]
                    # print(np.unique(mask))

                    pil_img = Image.fromarray(img, 'RGB')
                    pil_mask = Image.fromarray(mask, 'RGB')

                    ##debug##
                    # pil_img.show()
                    # pil_mask.show()
                    # print(np.unique(mask))

                    seed = np.random.randint(124521346)

                    # Resize Crop ratio: img
                    random.seed(seed)
                    pil_img = torchvision.transforms.RandomResizedCrop(
                        size=(resize_in),
                        scale=(0.8, 1.0),
                        interpolation=Image.NEAREST)(pil_img)
                    # Resize Crop ratio: true_masks
                    random.seed(seed)
                    pil_mask = torchvision.transforms.RandomResizedCrop(
                        size=(resize_in),
                        scale=(0.8, 1.0),
                        interpolation=Image.NEAREST)(pil_mask)
                    """
                    # rotate seed
                    random_degree = randrange(360)
                    # rotate: img
                    pil_img = torchvision.transforms.functional.rotate(pil_img, angle=random_degree)
                    # rotate: true_masks
                    pil_mask = torchvision.transforms.functional.rotate(pil_mask, angle=random_degree)
                    """

                    # color: img
                    # color: true_masks
                    arr_img = rgb_pil_to_bw_norm_arr(pil_img)
                    # print(np.unique(arr_img))

                    imgs_2[j, N_CHANNELS - 1, :, :] = arr_img
                    # print(np.unique(imgs_2))

                    arr_mask = np.array(pil_mask)
                    arr_mask_2 = np.zeros((resize_in, resize_in, n_classes))
                    for in_c in range(n_classes):
                        in_1, in_2 = np.where(arr_mask[:, :, 0] == (in_c + 1))
                        arr_mask_2[in_1, in_2,
                                   in_c] = arr_mask[in_1, in_2,
                                                    0].astype(bool).astype(
                                                        np.float32)
                    true_masks_2[j, :, :, :] = arr_mask_2
                    # true_masks_2.astype(np.float32)
                    # print("======")
                    # print(np.unique(true_masks_2))

                ## To TorchTensor
                # imgs:
                # imgs = torch.from_numpy(imgs_2.astype(np.float32).transpose(0,3,1,2))
                imgs = torch.from_numpy(imgs_2.astype(np.float32))
                # true_masks:
                # true_masks = torch.from_numpy(true_masks_2.transpose(0,3,1,2))
                true_masks = torch.from_numpy(true_masks_2)

                # imgs = torch.from_numpy(imgs)
                # true_masks = torch.from_numpy(true_masks)
                # true_masks = np.transpose(true_masks, (0, 3, 1, 2))

                # assert imgs.size()[1] == N_CHANNELS
                # assert true_masks.size()[1] == n_classes
                # assert true_masks.size()[2] == imgs.size()[2]
                # assert true_masks.size()[3] == imgs.size()[3]

                if gpu:
                    imgs = imgs.cuda()
                    true_masks = true_masks.cuda()

                masks_pred = net(imgs)

                # view(-1)
                masks_probs_flat = masks_pred.view(-1)
                if gpu:
                    true_masks_flat = true_masks.view(-1)
                else:
                    true_masks_flat = true_masks.reshape(-1)
                true_masks_flat = true_masks_flat.float()

                # loss
                loss = criterion(masks_probs_flat, true_masks_flat)
                in_nonzero = torch.nonzero(true_masks_flat)
                loss_nonzero = criterion(masks_probs_flat[in_nonzero],
                                         true_masks_flat[in_nonzero])
                if in_nonzero.size(0) != 0:
                    loss = loss + alpha_non_zero * loss_nonzero
                epoch_loss += loss.item()

                true_masks_flat_bin = true_masks_flat.unsqueeze(0)
                masks_probs_flat_bin = (masks_probs_flat >
                                        0.5).float().unsqueeze(0)
                this_dice = dice_coeff(masks_probs_flat_bin,
                                       true_masks_flat_bin).item()
                epoch_tot += this_dice

                # e = np.array(masks_probs_flat_bin.cpu())
                # f = np.array(true_masks_flat_bin.cpu())
                acc_train = iou(np.array(true_masks_flat_bin.cpu()),
                                np.array(masks_probs_flat_bin.cpu()))
                acc_train_all = iou_all(np.array(true_masks_flat_bin.cpu()),
                                        np.array(masks_probs_flat_bin.cpu()))
                epoch_acc += acc_train
                epoch_acc_all += acc_train_all

                if i % print_interval == print_interval - 1:
                    print(
                        '{0} / {1} steps. --- loss: {2:.6f}, IoU_train_nz: {3:.4f}, IoU_train_all: {4:.4f}, dice: {5:.4f}'
                        .format(i, int(N_train / batch_size),
                                epoch_loss / (i + 1), epoch_acc / (i + 1),
                                epoch_acc_all / (i + 1), epoch_tot / (i + 1)))
                    f_log.write(
                        '{0} / {1} steps. --- loss: {2:.6f}, ACC_train: {3:.4f}, IoU_train_all: {4:.4f}, dice: {4:.4f}'
                        .format(i, int(N_train / batch_size), epoch_loss /
                                (i + 1), epoch_acc / (i + 1), epoch_acc_all /
                                (i + 1), epoch_tot / (i + 1)) + '\n')

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                i_out = i

            print(
                'Epoch {} finished ! Loss: {}, IoU: {}, IoU_all: {}, dice: {}'.
                format(epoch, epoch_loss / (i_out + 1),
                       epoch_acc / (i_out + 1), epoch_acc_all / (i_out + 1),
                       epoch_tot / (i + 1)))
            f_log.write(
                'Epoch finished ! Loss: {}, IoU: {}, IoU_all: {}, dice: {}'.
                format(epoch_loss / (i_out + 1), epoch_acc /
                       (i_out + 1), epoch_acc_all / (i_out + 1), epoch_tot /
                       (i + 1)) + '\n')
            tf_writer.add_scalar('data/train_loss', epoch_loss / (i_out + 1),
                                 epoch)
            tf_writer.add_scalar('data/train_iou', epoch_acc / (i_out + 1),
                                 epoch)
            tf_writer.add_scalar('data/train_iou_all',
                                 epoch_acc_all / (i_out + 1), epoch)
            tf_writer.add_scalar('data/train_dice', epoch_tot / (i_out + 1),
                                 epoch)

            ## Evaluate
            net.eval()
            tot_val = 0
            epoch_loss_val = 0
            epoch_acc_val = 0
            epoch_acc_val_all = 0
            for i_val, b_val in enumerate(batch(val, batch_size)):
                # imgs_val = np.array([j[0] for j in b_val]).astype(np.float32)
                # true_masks_val = np.array([j[1] for j in b_val])
                #
                # imgs_val = torch.from_numpy(imgs_val)
                # true_masks_val = torch.from_numpy(true_masks_val)
                # true_masks_val = np.transpose(true_masks_val, (0, 3, 1, 2))

                ####
                imgs_val = np.array([j[0] for j in b_val]).astype(np.uint8)
                true_masks_val = np.array([j[1]
                                           for j in b_val]).astype(np.uint8)

                imgs_2 = np.zeros(
                    (imgs_val.shape[0], N_CHANNELS, resize_in, resize_in))
                true_masks_2 = np.zeros(
                    (true_masks_val.shape[0], resize_in, resize_in, n_classes))

                ## data augmentation
                if imgs_val.shape[0] == batch_size:
                    for j in range(imgs_val.shape[0]):
                        img = imgs_val[j, :, :, :]
                        mask = true_masks_val[j, :, :, :]
                        # print(np.unique(mask))

                        pil_img = Image.fromarray(img, 'RGB')
                        pil_mask = Image.fromarray(mask, 'RGB')

                        # resize
                        pil_img = torchvision.transforms.Resize(
                            size=(resize_in),
                            interpolation=Image.NEAREST)(pil_img)
                        pil_mask = torchvision.transforms.Resize(
                            size=(resize_in),
                            interpolation=Image.NEAREST)(pil_mask)

                        # upload img and mask to imgs_2 and mask_2
                        arr_img = rgb_pil_to_bw_norm_arr(pil_img)
                        imgs_2[j, N_CHANNELS - 1, :, :] = arr_img

                        arr_mask = np.array(pil_mask)
                        arr_mask_2 = np.zeros(
                            (resize_in, resize_in, n_classes))
                        for in_c in range(n_classes):
                            in_1, in_2 = np.where(arr_mask[:, :,
                                                           0] == (in_c + 1))
                            arr_mask_2[in_1, in_2,
                                       in_c] = arr_mask[in_1, in_2,
                                                        0].astype(bool).astype(
                                                            np.float32)
                        true_masks_2[j, :, :, :] = arr_mask_2
                        # true_masks_2.astype(np.float32)
                        # print("======")
                        # print(np.unique(true_masks_2))

                    ## To TorchTensor
                    # imgs:
                    imgs_val = torch.from_numpy(imgs_2.astype(np.float32))
                    # true_masks:
                    true_masks_val = torch.from_numpy(true_masks_2)

                    if gpu:
                        imgs_val = imgs_val.cuda()
                        true_masks_val = true_masks_val.cuda()

                    masks_pred_val = net(imgs_val)
                    masks_probs_flat_val = masks_pred_val.view(-1)

                    if gpu:
                        true_masks_flat_val = true_masks_val.view(-1)
                    else:
                        true_masks_flat_val = true_masks_val.reshape(-1)
                    true_masks_flat_val = true_masks_flat_val.float()

                    true_masks_flat_bin_val = true_masks_flat_val.unsqueeze(0)
                    masks_probs_flat_bin_val = (masks_probs_flat_val >
                                                0.5).float().unsqueeze(0)
                    dice_val = dice_coeff(masks_probs_flat_bin_val,
                                          true_masks_flat_bin_val).item()

                    acc_val = iou(np.array(true_masks_flat_bin_val.cpu()),
                                  np.array(masks_probs_flat_bin_val.cpu()))
                    acc_val_all = iou_all(
                        np.array(true_masks_flat_bin_val.cpu()),
                        np.array(masks_probs_flat_bin_val.cpu()))
                    epoch_acc_val += acc_val
                    epoch_acc_val_all += acc_val_all

                    tot_val += dice_val

                    loss_val = criterion(masks_probs_flat_val,
                                         true_masks_flat_val)
                    in_nonzero = torch.nonzero(true_masks_flat_val)
                    loss_val_nonzero = criterion(
                        masks_probs_flat_val[in_nonzero],
                        true_masks_flat_val[in_nonzero])
                    if in_nonzero.size(0) != 0:
                        loss_val = loss_val + alpha_non_zero * loss_val_nonzero

            epoch_loss_val = loss_val / (i_val + 1)
            epoch_dice_val = tot_val / (i_val + 1)
            epoch_acc_val = epoch_acc_val / (i_val + 1)
            epoch_acc_val_all = epoch_acc_val_all / (i_val + 1)

            # val_dice = eval_net(net, val, gpu)
            print(
                '* Val: Loss: {0:.6f}, IoU: {1:.3f}, IoU_all: {2:.3f}, Dice: {3:.3f}'
                .format(epoch_loss_val, epoch_acc_val, epoch_acc_val_all,
                        epoch_dice_val))
            f_log.write(
                '* Val: Loss: {0:.6f}, IoU: {1:.3f}, IoU_all: {2:.3f}, Dice: {3:.3f}'
                .format(epoch_loss_val, epoch_acc_val, epoch_acc_val_all,
                        epoch_dice_val) + '\n')
            tf_writer.add_scalar('data/val_loss', epoch_loss_val, epoch)
            tf_writer.add_scalar('data/val_iou', epoch_acc_val, epoch)
            tf_writer.add_scalar('data/val_iou_all', epoch_acc_val_all, epoch)
            tf_writer.add_scalar('data/val_dice', epoch_dice_val, epoch)

            if save_cp and (epoch % save_interval == save_interval - 1):
                torch.save(net.state_dict(),
                           dir_checkpoint + '/CP{}.pth'.format(epoch + 1))
                print('Checkpoint {} saved !'.format(epoch + 1))
                f_log.write('Checkpoint {} saved !'.format(epoch + 1) + '\n')
Ejemplo n.º 12
0
	def __init__(self,
	             args,
	             data_dir,  # folder with txt files
	             rnd_state=None,
	             use_cont_node_attr=False,
	             # use or not additional float valued node attributes available in some datasets
	             folds=10):

		self.data_dir = data_dir
		self.rnd_state = np.random.RandomState() if rnd_state is None else rnd_state
		self.use_cont_node_attr = use_cont_node_attr
		files = os.listdir(self.data_dir)
		data = {}
		nodes, graphs = self.read_graph_nodes_relations(
			list(filter(lambda f: f.find('graph_indicator') >= 0, files))[0])

		data['adj_list'] = self.read_graph_adj(list(filter(lambda f: f.find('_A') >= 0, files))[0], nodes, graphs)

		node_labels_file = list(filter(lambda f: f.find('node_labels') >= 0, files))
		if len(node_labels_file) == 1:
			data['features'] = self.read_node_features(node_labels_file[0], nodes, graphs, fn=lambda s: int(s.strip()))
		else:
			data['features'] = None

		data['targets'] = np.array(
			self.parse_txt_file(
				list(filter(lambda f: f.find('graph_labels') >= 0 or f.find('graph_attributes') >= 0, files))[0],
				line_parse_fn=lambda s: int(float(s.strip()))))

		if self.use_cont_node_attr:
			data['attr'] = self.read_node_features(list(filter(lambda f: f.find('node_attributes') >= 0, files))[0],
			                                       nodes, graphs,
			                                       fn=lambda s: np.array(list(map(float, s.strip().split(',')))))

		features, n_edges, degrees = [], [], []
		for sample_id, adj in enumerate(data['adj_list']):
			N = len(adj)  # number of nodes
			if data['features'] is not None:
				assert N == len(data['features'][sample_id]), (N, len(data['features'][sample_id]))
			if not np.allclose(adj, adj.T):
				print(sample_id, 'not symmetric')
			n = np.sum(adj)  # total sum of edges
			assert n % 2 == 0, n
			n_edges.append(int(n / 2))  # undirected edges, so need to divide by 2
			degrees.extend(list(np.sum(adj, 1)))
			if data['features'] is not None:
				features.append(np.array(data['features'][sample_id]))

		# Create features over graphs as one-hot vectors for each node
		if data['features'] is not None:
			features_all = np.concatenate(features)
			features_min = features_all.min()
			num_features = int(features_all.max() - features_min + 1)  # number of possible values

		max_degree = np.max(degrees)
		features_onehot = []
		for sample_id, adj in enumerate(data['adj_list']):
			N = adj.shape[0]
			if data['features'] is not None:
				x = data['features'][sample_id]
				feature_onehot = np.zeros((len(x), num_features))
				for node, value in enumerate(x):
					feature_onehot[node, value - features_min] = 1
			else:
				feature_onehot = np.empty((N, 0))
			if self.use_cont_node_attr:
				if args.dataset in ['COLORS-3', 'TRIANGLES']:
					# first column corresponds to node attention and shouldn't be used as node features
					feature_attr = np.array(data['attr'][sample_id])[:, 1:]
				else:
					feature_attr = np.array(data['attr'][sample_id])
			else:
				feature_attr = np.empty((N, 0))
			if args.degree:
				degree_onehot = np.zeros((N, max_degree + 1))
				degree_onehot[np.arange(N), np.sum(adj, 1).astype(np.int32)] = 1
			else:
				degree_onehot = np.empty((N, 0))

			node_features = np.concatenate((feature_onehot, feature_attr, degree_onehot), axis=1)
			if node_features.shape[1] == 0:
				# dummy features for datasets without node labels/attributes
				# node degree features can be used instead
				node_features = np.ones((N, 1))
			features_onehot.append(node_features)

		num_features = features_onehot[0].shape[1]

		shapes = [len(adj) for adj in data['adj_list']]
		labels = data['targets']  # graph class labels
		labels -= np.min(labels)  # to start from 0

		classes = np.unique(labels)
		num_classes = len(classes)

		if not np.all(np.diff(classes) == 1):
			print('making labels sequential, otherwise pytorch might crash')
			labels_new = np.zeros(labels.shape, dtype=labels.dtype) - 1
			for lbl in range(num_classes):
				labels_new[labels == classes[lbl]] = lbl
			labels = labels_new
			classes = np.unique(labels)
			assert len(np.unique(labels)) == num_classes, np.unique(labels)

		def stats(x):
			return (np.mean(x), np.std(x), np.min(x), np.max(x))

		print('N nodes avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(shapes))
		print('N edges avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(n_edges))
		print('Node degree avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(degrees))
		print('Node features dim: \t\t%d' % num_features)
		print('N classes: \t\t\t%d' % num_classes)
		print('Classes: \t\t\t%s' % str(classes))
		for lbl in classes:
			print('Class %d: \t\t\t%d samples' % (lbl, np.sum(labels == lbl)))

		if data['features'] is not None:
			for u in np.unique(features_all):
				print('feature {}, count {}/{}'.format(u, np.count_nonzero(features_all == u), len(features_all)))

		N_graphs = len(labels)  # number of samples (graphs) in data
		assert N_graphs == len(data['adj_list']) == len(features_onehot), 'invalid data'

		# Create train/test sets first
		train_ids, test_ids = split_ids(args, rnd_state.permutation(N_graphs), folds=folds)

		# Create train sets
		splits = []
		for fold in range(len(train_ids)):
			splits.append({'train': train_ids[fold],
			               'test': test_ids[fold]})

		data['features_onehot'] = features_onehot
		data['targets'] = labels
		data['splits'] = splits
		data['N_nodes_max'] = np.max(shapes)  # max number of nodes
		data['num_features'] = num_features
		data['num_classes'] = num_classes

		self.data = data
Ejemplo n.º 13
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.5,
              save_cp=True,
              gpu=True,
              img_scale=0.5):

    dir_img = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_few_images/'
    dir_mask = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_masks_few_images/'
    # dir_img = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_images/'
    # dir_mask = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    #optimizer = optim.SGD(net.parameters(),
    #                      lr=lr,
    #                      momentum=0.9,
    #                      weight_decay=0.0005)
    optimizer = optim.Adam(net.parameters(),
                           lr = lr,
                           betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch {} finished ! Loss: {}'.format(epoch, epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 14
0
def train_prune_net(net,
                    epochs=5,
                    batch_size=1,
                    lr=0.1,
                    val_percent=0.05,
                    save_cp=True,
                    gpu=False,
                    img_scale=0.5,
                    num_prune_iterations=100):

    dir_img = 'data/train/'
    dir_mask = 'data/train_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)
        epoch_loss = 0

        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))

        net.eval()
        sub = list(enumerate(val))[:3]
        for i, b in sub:
            img = b[0]
            true_mask = b[1]

            img = torch.from_numpy(img).unsqueeze(0)
            true_mask = torch.from_numpy(true_mask).unsqueeze(0)

            if gpu:
                img = img.cuda()
                true_mask = true_mask.cuda()

            mask_pred = net(img)[0]
            mask_pred = (mask_pred > 0.5).float()

            val_dice = dice_coeff(mask_pred, true_mask).item()
            print('Validation Dice Coeff at batch {}: {}'.format(
                i + 1, val_dice))

        net.train()
        for i, b in enumerate(batch(train, batch_size)):
            if i > 2:
                break
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            for j in range(num_prune_iterations):
                if j == num_prune_iterations - 1:
                    net.prune(verbose=True)
                else:
                    net.prune(verbose=False)

        summary(net, (3, 640, 640))

        for i, b in sub:
            img = b[0]
            true_mask = b[1]

            img = torch.from_numpy(img).unsqueeze(0)
            true_mask = torch.from_numpy(true_mask).unsqueeze(0)

            if gpu:
                img = img.cuda()
                true_mask = true_mask.cuda()

            mask_pred = net(img)[0]
            mask_pred = (mask_pred > 0.5).float()

            val_dice = dice_coeff(mask_pred, true_mask).item()
            print('Validation Dice Coeff at batch {}: {}'.format(
                i + 1, val_dice))

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        # val_dice = eval_net(net, val, gpu)
        # print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 15
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)
    criterion = nn.BCELoss()

    dir_img = '../all/reduced/'
    dir_mask = '../all/masks/'
    dir_checkpoint = 'checkpoints/'

    # fetches ids in directory, without .jpg extension
    ids = get_ids(dir_img)

    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        # create a generator to try: yeild each value enumerate(batch(train, batch_size))
        # or transfer enumerate() to try loop somehow.

        epoch_loss = 0
        exception_amount = 0

        for i, b in enumerate(batch(train, batch_size)):
            try:
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])

                imgs = torch.from_numpy(imgs)
                true_masks = torch.from_numpy(true_masks)

                if gpu:
                    imgs = imgs.cuda()
                    true_masks = true_masks.cuda()

                masks_pred = net(imgs)
                masks_probs = F.sigmoid(masks_pred)
                masks_probs_flat = masks_probs.view(-1)

                true_masks_flat = true_masks.view(-1)

                loss = criterion(masks_probs_flat, true_masks_flat)
                epoch_loss += loss.item()

                print('{0:.4f} --- loss: {1:.6f}'.format(
                    i * batch_size / N_train, loss.item()))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            except KeyboardInterrupt:
                torch.save(net.state_dict(), 'INTERRUPTED.pth')
                print('Saved interrupt')
                try:
                    sys.exit(0)
                except SystemExit:
                    os._exit(0)

            except Exception as ah:
                print(ah)
                exception_amount += 1
                if exception_amount > 20:
                    print('things arent going good')

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        val_dice = eval_net(net, val, gpu)
        print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 16
0
            max_degree = 14
        else:
            raise NotImplementedError(
                'max_degree value should be specified in advance. '
                'Try running without --torch_geom (-g) and look at dataset statistics printed out by our code.'
            )

    if args.degree:
        transforms.append(T.OneHotDegree(max_degree=max_degree, cat=False))

    dataset = TUDataset('./data/%s/' % args.dataset,
                        name=args.dataset,
                        use_node_attr=args.use_cont_node_attr,
                        transform=T.Compose(transforms))
    train_ids, test_ids = split_ids(args,
                                    rnd_state.permutation(len(dataset)),
                                    folds=n_folds)

else:
    datareader = DataReader(args=args,
                            data_dir='./data/%s/' % args.dataset,
                            rnd_state=rnd_state,
                            folds=n_folds,
                            use_cont_node_attr=args.use_cont_node_attr)

acc_folds = []

for fold_id in range(n_folds):
    torch.cuda.empty_cache()
    loaders = []
    for split in ['train', 'test']:
Ejemplo n.º 17
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_height=512,
              img_scale=0.5):

    #dir_img = 'carvana-image-masking-challenge/train/'
    #dir_mask = 'carvana-image-masking-challenge/train_masks/'

    dir_img = '/root/ctw/train_images_preprocess_other/'
    dir_mask = '/root/ctw/train_images_mask_preprocess_other/'


    #dir_img = '/root/ctw/val_images_preprocess_test/'
    #dir_mask = '/root/ctw/val_images_mask_preprocess_test/'
    dir_checkpoint = 'checkpoints/'

    ids = list(get_ids(dir_img))
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)


    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.Adam(net.parameters(),lr=lr)

   # optimizer = optim.SGD(net.parameters(),
   #                       lr=lr,
   #                       momentum=0.92,
   #                       weight_decay=0.0005)

    criterion = nn.BCELoss()
    #criterion = nn.MSELoss()
 
    #import scipy.misc
    iteration = 0
    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_height, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_height, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            #print(i, len(b))
            """
            for j in b:
                #print(j[0].shape, j[1].shape)
                #print(j[1])
                #scipy.misc.toimage(j[0], cmin=0.0, cmax=1.0).save('%s_outfile.jpg'%count)
                #scipy.misc.toimage(j[1], cmin=0.0, cmax=1.0).save('%s_outmask.jpg'%count)
                count += 1
            """
            iteration += 1 
            try:            
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])
              
    #            print("\nImgs :  \n{}".format(np.unique(imgs)))
    #            print("\ntrue mask \n {} ".format(np.unique(true_masks)))
            #print('%s'%(datetime.datetime.now()), '{0:.4f}'.format(i * batch_size))
            except Exception as e:
                print(Exception)
                continue

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()
            if iteration%100==0:
                print('iter %s'%iteration, '%s'%(datetime.datetime.now()), '{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            val_iou=val_dice/(2-val_dice)
            print('Validation Dice Coeff: {}'.format(val_dice))
            print('Validation iouScore : {}'.format(val_iou))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 18
0
def train_net(
        net,
        epochs=5,
        batch_size=1,
        lr=0.1,
        val_percent=0.05,  # 训练集:验证集= 0.95: 0.05
        save_cp=True,
        gpu=False,
        img_scale=0.5):

    dir_img = opt_train.dir_img
    dir_mask = opt_train.dir_mask
    dir_checkpoint = opt_train.dir_checkpoint

    # 得到 图片路径列表  ids为 图片名称(无后缀名)
    ids = get_ids(dir_img)
    # 得到truple元组  (无后缀名的 图片名称,序号)
    # eg:当n为2  图片名称为bobo.jpg 时, 得到(bobo,0) (bobo,1)
    # 当序号为0 时,裁剪宽度,得到左边部分图片  当序号为1 时,裁剪宽度,得到右边部分图片
    ids = split_ids(ids)
    # 打乱数据集后,按照val_percent的比例来 切分 训练集 和 验证集
    iddataset = split_train_val(ids, val_percent)

    print('''
    开始训练:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        训练集大小: {}
        验证集大小: {}
        GPU: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(gpu)))

    #训练集大小
    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    #二进制交叉熵
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))

        # reset the generators
        # 每轮epoch得到 训练集  和 验证集
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        # 重置epoch损失计数器
        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            # 得到 一个batch的 imgs tensor 及 对应真实mask值
            # 当序号为0 时,裁剪宽度,得到左边部分图片[384,384,3]   当序号为1 时,裁剪宽度,得到右边部分图片[384,190,3]
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            # 将值转为 torch tensor
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            # 训练数据转到GPU上
            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # 得到 网络输出的预测mask [10,1,384,384]
            masks_pred = net(imgs)
            # 经过sigmoid
            masks_probs = F.sigmoid(masks_pred)
            masks_probs_flat = masks_probs.view(-1)

            true_masks_flat = true_masks.view(-1)
            # 计算二进制交叉熵损失
            loss = criterion(masks_probs_flat, true_masks_flat)
            # 统计一个epoch的所有batch的loss之和,用以计算 一个epoch的 loss均值
            epoch_loss += loss.item()

            # 输出 当前epoch的第几个batch  及 当前batch的loss
            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            # 优化器梯度清零
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 更新参数
            optimizer.step()

        # 一轮epoch结束,该轮epoch的 loss均值
        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        # 每轮epoch之后使用验证集进行评价
        if True:
            # 评价函数:Dice系数   Dice距离用于度量两个集合的相似性
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        # 保存模型
        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 19
0
def train_net(net,
              epochs=20,
              batch_size=1,
              lr=0.1,
              lrd=0.99,
              val_percent=0.05,
              save_cp=True,
              gpu=True,
              img_scale=0.5,
              imagepath='',
              maskpath='',
              cpsavepath=''):

    dir_img = imagepath
    dir_mask = maskpath
    dir_checkpoint = cpsavepath
    classweight = [1, 2, 3, 2]

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    logname = cpsavepath + '/' + 'losslog.txt'

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    # classweight = [1,4,8,4]
    criterion = BCELoss_weight(classweight)

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        with open(logname, "a") as f:
            f.write('Starting epoch {}/{}.'.format(epoch + 1, epochs) + "\n")
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        lr = lr * lrd
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        print('lr', lr)
        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            true_masks = np.transpose(true_masks, axes=[0, 3, 1, 2])
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            # print('masks_pred.shape',masks_pred.shape)
            # print('true_masks.shape', true_masks.shape)
            masks_probs_flat = masks_pred

            true_masks_flat = true_masks
            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            printinfo = '{0:.4f} --- loss: {1:.6f}'.format(
                i * batch_size / N_train, loss.item())
            print(printinfo)

            with open(logname, "a") as f:
                f.write(printinfo + "\n")

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))
        with open(logname, "a") as f:
            f.write('Epoch finished ! Loss: {}'.format(epoch_loss / i) + "\n")
        if 1:
            val_dice = eval_net(net, val)
            print('Validation Dice Coeff: {}'.format(val_dice))
            with open(logname, "a") as f:
                f.write('Validation Dice Coeff: {}'.format(val_dice) + "\n")

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
            with open(logname, "a") as f:
                f.write('Checkpoint {} saved !'.format(epoch + 1) + "\n")
Ejemplo n.º 20
0
def train_net(net,
              epochs=30,
              batch_size=6,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

#    dir_img = 'E:/A_paper_thesis/paper5/tensorflow_deeplabv3plus_scrapingData/dataset/Scraping_Data2/train_db'
    dir_img = 'data/train_db/'
    dir_mask = 'data/GT_bw/'
    dir_checkpoint = 'checkpoint0919/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()
    

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)

        epoch_loss = 0
        epoch_iou = 0
        epoch_xor=0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()
            
            print('step:', i)

#            print('Validation Dice Coeff: {0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))
            print('Validation Dice Coeff: {0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))
            

            
#            # mean iou
#            intersect = sum(masks_probs_flat*true_masks_flat)
#            union = sum(masks_probs_flat+true_masks_flat)
#            iou = (intersect+0.001)/(union-intersect+0.001)
#            epoch_iou +=iou
            
            # mean iou
            smooth = 1e-6 # we smooth to avoid our devision 0/0
            intersect = sum(masks_probs_flat*true_masks_flat)
            union = sum(masks_probs_flat+true_masks_flat)-intersect
            iou = (intersect+smooth)/(union+smooth)
            epoch_iou +=iou
            
            # calculate xor
            # xor quation is: xor = (union(output hợp ground truth) - intersect(output giao ground truth))/ ground truth
            # xor =  (union-intersect)/ground truth
            
            xor = (union - intersect)/sum(true_masks_flat)
            epoch_xor += xor
            


            print('mean IoU: {:.4f}'.format(iou))
#            print('mean IoU1: {:.4f}'.format(iou1))
            print('mean xor: {:.4f}'.format(xor))
            
            # end of mean iou

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! epoch_Loss: {:.6f}'.format(epoch_loss / i))
        print('epoch_iou: {:.4f}'.format(epoch_iou / i))
        print('epoch_xor: {:.4f}'.format(epoch_xor / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('epoch_Validation Dice Coeff: {:.4f}'.format(val_dice))
            # need to write mean iou of evaluate here(reference val_dice)
          

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.2,
              save_cp=True,
              gpu=False,
              img_scale=0.5):
    path = [['data/ori1/', 'data/gt1/'],
            ['data/original1/', 'data/ground_truth1/'],
            ['data/Original/', 'data/Ground_Truth/']]
    dir_img = path[0][0]
    dir_mask = path[0][1]
    dir_checkpoint = 'sdgcheck/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.7,
                          weight_decay=0.005)
    '''
    optimizer = optim.Adam(net.parameters(),
                      lr=lr,

                      weight_decay=0.0005)
    '''
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        x = 0
        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])
            '''
            ori=np.transpose(imgs[0], axes=[1, 2, 0])   
            scipy.misc.imsave("ori/ori_"+str(x)+'.jpg', ori)
            
            gt = np.stack((true_masks[0],)*3, axis=-1)
            
            #gt=np.transpose(true_masks[0], axes=[1, 2, 0])
            scipy.misc.imsave("gt/gt_"+str(x)+'.jpg', gt)
            '''
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            x += 1
            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 22
0
def train_net(args, net, val_percent=0.05, save_cp=True):

    dir_img = os.path.join(args.dataset_folder, 'data/train/')
    dir_mask = os.path.join(args.dataset_folder, 'data/train_masks/')
    dir_checkpoint = os.path.join(args.dataset_folder, 'checkpoints/')
    if not os.path.exists(dir_checkpoint):
        os.makedirs(dir_checkpoint)

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
    '''.format(args.epochs, args.batch_size, args.lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(args.epochs):
        print('Starting epoch {}/{}.'.format(args.epochs + 1, args.epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   args.img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 args.img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, args.batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            # if gpu:
            imgs = imgs.cuda()
            true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(
                i * args.batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 23
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = 'data/train/'
    dir_mask = 'data/train_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()
    best_dice = 0.0

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])
            true_masks = true_masks / true_masks.max()

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs = F.sigmoid(masks_pred)
            masks_probs_flat = masks_probs.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))
            with open('acc.log', 'a+') as w:
                w.write('eopch {}, acc:{}\n'.format(epoch, val_dice))

        if save_cp and val_dice > best_dice:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
            best_dice = val_dice
Ejemplo n.º 24
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=1e-3,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = '/home/xyj/data/spacenet/vegas/images_rgb_1300/'
    dir_mask = '/home/xyj/test/Pytorch-UNet/data/train_mask_point/'
    dir_checkpoint = 'checkpoints_point/'

    if not os.path.exists(dir_checkpoint):
        os.mkdir(dir_checkpoint)

    # ids = get_ids(dir_img)  # 返回train文件夹下文件的名字列表,生成器(except last 4 character,.jpg这样的)
    with open('train_list.txt', 'r') as f:
        lines = f.readlines()
        ids = (i.strip('\n')[:-4] for i in lines)

    ids = split_ids(
        ids)  # 返回(id, i), id属于ids,i属于range(n),相当于把train的图✖️了n倍多张,是tuple的生成器

    iddataset = split_train_val(
        ids, val_percent
    )  # validation percentage,是dict = {"train": ___(一个list), "val":___(一个list)}

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    #     optimizer = optim.SGD(net.parameters(),
    #                           lr=lr,
    #                           momentum=0.9,
    #                           weight_decay=0.0005)
    optimizer = optim.Adam(net.parameters(),
                           lr=lr,
                           betas=(0.9, 0.999),
                           eps=1e-3)
    #     scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=40,gamma = 0.3)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)
        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] // 200 for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


#             scheduler.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 25
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.2,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = '/media/workspace/DATASET/Eyes/STARE/stare-images'
    dir_mask = '/media/workspace/DATASET/Eyes/STARE/labels'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids, 4)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([w[0] for w in b]).astype(np.float32)
            true_masks = np.array([w[1] for w in b])
            true_masks[true_masks == 255] = 1

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            if not os.path.exists(dir_checkpoint):
                os.mkdir(dir_checkpoint)
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Ejemplo n.º 26
0
                "prune_channels": args.prune_channels,
                "gpu": args.gpu,
                "load": args.load,
                "channel_txt": args.channel_txt,
                "scale": args.scale,
                "lr": args.lr,
                "iters": args.iters,
                "epochs": args.epochs
            },
            indent=4,
            sort_keys=True)))

    # Dataset
    if not os.path.exists(splitfile):  # Our constant datasplit
        ids = get_ids(dir_img)  # [file1, file2]
        ids = split_ids(ids)  # [(file1, 0), (file1, 1), (file2, 0), ...]
        iddataset = split_train_val(ids, 0.2, splitfile)
        log.info("New split dataset")

    else:
        with open(splitfile) as f:
            iddataset = json.load(f)
        log.info("Load split dataset")

    train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                               args.scale)
    val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, args.scale)

    # Model Initialization
    net = UNet(n_channels=3, n_classes=1, f_channels=args.channel_txt)
    log.info("Built model using {}...".format(args.channel_txt))
Ejemplo n.º 27
0
def train_net(net,
              train_dir=None,
              groundtruth_dir=None,
              weight_dir=None,
              weight_name='DeepInsthink',
              val_percent=0.05,
              epochs=5,
              batch_size=1,
              lr=0.1,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = train_dir
    dir_mask = groundtruth_dir
    dir_checkpoint = weight_dir

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    max_DSC = 0
    max_ep_checkpoint = 0
    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        batchN = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print('ep: {3:.0f} batch [{0:.0f}/{1:.0f}] - loss: {2:.6f}'.format(
                i + 1, N_train / batch_size, loss.item(), epoch + 1))
            #if (i % 5==0):
            #    val_dice = eval_net(net, val, gpu)
            #    print('Validation Dice Coeff: {}'.format(val_dice))
        val_dice = eval_net(net, val, gpu)
        print('Epoch {0:} -- Loss: {1:} -- Validation DSC: {2:}'.format(
            epoch, epoch_loss / i, val_dice))
        if (val_dice >= max_DSC):
            max_DSC = val_dice
            max_ep_checkpoint = epoch + 1
        if save_cp:
            torch.save(
                net.state_dict(),
                dir_checkpoint + weight_name + '-{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
    print('Maximum checkpoint is ' + weight_name + '-{0:}' +
          'with {1:} DSC'.format(max_ep_checkpoint, max_DSC))
Ejemplo n.º 28
0
from model import get_model
from utils import get_data_ids, get_labels, split_ids, save_preds, save_numpy
from keras.callbacks import ModelCheckpoint

# Parameters
params = {'dim': (299, 299),
          'batch_size': 16,
          'n_classes': 28,
          'n_channels': 3,
          'shuffle': True,
          'augment': True,
          'dir_path': '/tmp/human_atlas/training_data/'}

# Datasets
training_ids, testing_ids = get_data_ids()
training_ids, validation_ids = split_ids(training_ids, 0.80)
labels = get_labels()

# Generators
training_generator = DataGenerator(training_ids, labels, **params)
params['batch_size'] = 128
validation_generator = DataGenerator(validation_ids, labels, **params)

# Design model
model = get_model(params, True)
print(model.summary())

# Train
checkpointer = ModelCheckpoint(filepath='model.h5', verbose=2,
                               save_best_only=True, save_weights_only=False)
model.fit_generator(generator=training_generator,
def train_net(net,
              epochs=50,
              batch_size=1,
              lr=0.1,
              val_percent=0.1,
              save_cp=True,
              gpu=True,
              img_scale=[513, 513]):

    #dir_img = '/home/lixiaoxing/data/DRIVE/train/'
    #dir_mask = '/home/lixiaoxing/data/DRIVE/trainannot/'
    dir_img = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/images_jpg/'
    dir_mask = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/vessel/'
    dir_checkpoint = 'checkpoints/'
    if os.path.exists(dir_checkpoint) is False:
        os.makedirs(dir_checkpoint)

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)
        #print(train)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):

            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # up features
            #print('**********************up**************************')
            #up_feature = net.extract_features(imgs)
            #print(up_feature.shape)
            #ff = net._blocks[38]._depthwise_conv

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)
            #print(true_masks_flat.shape)
            #print(masks_probs_flat.shape)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            #print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))