Пример #1
0
def predict():
    """
        Predict text spam or ham
    """
    if request.method == 'POST':
        message = request.form['message']
        message = [message]
        CreateModelObj = CreateModel()
        my_prediction = CreateModelObj.predictSpam(message)
    return render_template('result.html', prediction=my_prediction)
Пример #2
0
def main():
    testing_entropy = 0
   
    args = parse_args()    
    
    if not os.path.exists(args.save):
        os.makedirs(args.save)
        
    args.init_weights = root_base+'/snapshots/' + args.model_name
    
    model = CreateModel(args)
    
    
    model.eval()
    model.cuda()    
    targetloader = CreateTrgDataLoader(args)
    
    for index, batch in tqdm.tqdm(enumerate(targetloader)):
        
        image, _, name,_ = batch
        output = model(Variable(image).cuda())
        output = nn.functional.softmax(output, dim=1)
        testing_entropy += self_entropy(roll_axis(output.cpu().data[0].numpy()))
        output = nn.functional.upsample(output, (1024, 2048), mode='bilinear', align_corners=True).cpu().data[0].numpy()
        #output = np.multiply(output,priors)
        output = output.transpose(1,2,0)
        output_nomask = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        output_col = colorize_mask(output_nomask)
        output_nomask = Image.fromarray(output_nomask)    
        name = name[0].split('/')[-1]
        output_nomask.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0])) 
        
    mIou_ = compute_mIoU(args.gt_dir, args.save, args.devkit_dir, '')    
Пример #3
0
def main():
    opt = TestOptions()
    args = opt.initialize()    
    
    if not os.path.exists(args.save):
        os.makedirs(args.save)
        
    model = CreateModel(args)
    
    
    model.eval()
    model.cuda()    
    targetloader = CreateTrgDataLoader(args)
    
    for index, batch in enumerate(targetloader):
        if index % 100 == 0:
            print '%d processd' % index
        image, _, name = batch
        output = model(Variable(image).cuda())
        output = nn.functional.upsample(output, (1024, 2048), mode='bilinear', align_corners=True).cpu().data[0].numpy()
        output = output.transpose(1,2,0)
        output_nomask = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        output_col = colorize_mask(output_nomask)
        output_nomask = Image.fromarray(output_nomask)    
        name = name[0].split('/')[-1]
        output_nomask.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0])) 
        
    compute_mIoU(args.gt_dir, args.save, args.devkit_dir, args.restore_from)    
Пример #4
0
def CalcSAD(input_png_path, GT_png_path):
    img_input = imread(input_png_path)
    img_GT = imread(GT_png_path)

    # reshape for loop
    img_input = np.reshape(img_input,(-1,1,1,3))
    img_GT = np.reshape(img_GT,(-1,1,1,3))


    MODEL = CreateModel()
    MODEL.load_weights('weights_lecun_normal_Adamax.hdf5')
    img_result = MODEL.predict(img_input, batch_size=256**3)

    img_result = im2uint8(img_result)
    img_GT = im2uint8(img_GT)
    absdiff = np.abs(img_result - img_GT)
    SAD = np.sum(absdiff,axis=1)

    maxSAD = np.max(SAD)
    avgSAD = np.mean(SAD)

    return maxSAD, avgSAD
Пример #5
0
    def __init__(self,args):    
        _t = {'iter time' : Timer()}
        model_name = args.source + '_to_' + args.target

        if not os.path.exists(args.snapshot_dir):
            os.makedirs(args.snapshot_dir)   
            os.makedirs(os.path.join(args.snapshot_dir, 'logs'))

        model, optimizer = CreateModel(args)
        self.args =args
        self.model_name =model_name
        self.model = model
        self.optimizer = optimizer
Пример #6
0
def main():
    opt = TestOptions()
    args = opt.initialize()

    if not os.path.exists(args.save):
        os.makedirs(args.save)
    maxiou = 0
    root = args.restore_from

    for i in range(1, 51):
        #args.restore_from = './snapshots/gta2city_vgg/gta5_{0:d}'.format(i*2000)
        args.restore_from = root + '_{0:d}'.format(i * 2000)
        model = CreateModel(args)
        print(args.restore_from)

        model.eval()
        model.cuda()
        targetloader = CreateTrgDataLoader(args)

        for index, batch in enumerate(targetloader):
            if index % 100 == 0:
                print '%d processd' % index
            image, _, name = batch
            output1, output2 = model(Variable(image).cuda())
            output = output1 + output2
            output = nn.functional.softmax(output, dim=1)
            output = nn.functional.upsample(
                output, (1024, 2048), mode='bilinear',
                align_corners=True).cpu().data[0].numpy()
            output = output.transpose(1, 2, 0)
            output_nomask = np.asarray(np.argmax(output, axis=2),
                                       dtype=np.uint8)
            output_col = colorize_mask(output_nomask)
            output_nomask = Image.fromarray(output_nomask)
            name = name[0].split('/')[-1]
            output_nomask.save('%s/%s' % (args.save, name))
            output_col.save('%s/%s_color.png' %
                            (args.save, name.split('.')[0]))

        iou = compute_mIoU(args.gt_dir, args.save, args.devkit_dir,
                           args.restore_from)
        #print(save_path)
        if iou > maxiou:
            maxiou = iou
            best_model = args.restore_from
        print('The best model is {:s}, the best IOU is {:f}'.format(
            best_model, maxiou))
Пример #7
0
def main():
    opt = TestOptions()
    args = opt.initialize()
    if args.target == 'deepglobe':
        size = 612
    elif args.target == 'deepglobe224':
        size = 224
    elif args.target == 'worldview' or args.target == 'worldviewFI' or args.target == 'worldviewGR' or args.target == 'worldviewFIc' or args.target == 'worldviewc':
        size = 512
    elif args.target == 'sentinel' or args.target == 'sentinelFI' or args.target == 'sentinelGR' or args.target == 'sentinelc':
        size = 224
    if args.target == 'pleiades' or args.target == 'pleiadesFI' or args.target == 'pleiadesGR':
        size = 448

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = CreateModel(args)

    model.eval()
    model.cuda()
    targetloader = CreateTrgDataLoader(args)

    for index, batch in enumerate(targetloader):
        if index % 100 == 0:
            print('%d processd' % index)
        image, _, name = batch
        output = model(Variable(image).cuda())
        output = nn.functional.softmax(output, dim=1)
        output = nn.functional.upsample(
            output, (size, size), mode='bilinear',
            align_corners=True).cpu().data[0].numpy()
        output = output.transpose(1, 2, 0)
        output_nomask = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        output_col = colorize_mask(output_nomask)
        output_nomask = Image.fromarray(output_nomask)
        name = name[0].split('/')[-1]
        if args.target == 'deepglobe' or args.target == 'deepglobe224':
            output_nomask.save('%s/%s.png' %
                               (args.save, os.path.splitext(name)[0]))
        else:
            output_nomask.save('%s/%s.png' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))

    compute_mIoU(args.gt_dir, args.save, args.target, args.devkit_dir,
                 args.restore_from)
Пример #8
0
# callbacks
callbacks = [
    ModelCheckpoint(filepath=os.path.join(hdf5_dir, 'weights{epoch:08d}.hdf5'),
                    monitor='maxSAD',
                    mode='min',
                    verbose=0,
                    save_weights_only=True,
                    save_best_only=False),
    CSVLogger(f"./experiments/{conf['name']}/log.csv"),
    TensorBoard(f"./experiments/{conf['name']}/Logs"),
    # tf.keras.callbacks.EarlyStopping(patience=2),
]
# =============================================================================
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
    MODEL = CreateModel(KI=conf['parameter']['kernel_initializer'],
                        nfs=conf['nfs'])
    MODEL.compile(loss=MeanSquaredError(),
                  optimizer=optimizer,
                  metrics=[myAcc, maxSAD, avgSAD])

if conf['restore_from']:
    print(f"restore from conf['restore_from']")
    MODEL.load_weights(conf['restore_from'])

npzfile = np.load(conf['dataset']['XY_npz'])
X = npzfile['arr_0']
Y = npzfile['arr_1']
del npzfile

MODEL.evaluate(X, Y, batch_size=256**3)
MODEL.fit(X,
Пример #9
0
from model import CreateModel
from data import LoadData
from keras.callbacks import ModelCheckpoint
import tensorflowjs as tfjs
import matplotlib.pyplot as plt

model = CreateModel()
model.compile(optimizer='sgd',
              loss='binary_crossentropy',
              metrics=['binary_accuracy'])
training_data, validation_data = LoadData()
#
## train model
#save_model = ModelCheckpoint('model.h5', save_best_only=True, save_weights_only=True, verbose=1)
#print(f'data generator length: {len(training_data)}')
#model.fit_generator(
#        training_data,
#        validation_data=validation_data,
#        epochs=2,
#        callbacks=[save_model])
#
## fine tuning
#for layer in base_model.layers[:-5]:
#  layer.trainable = False
#model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['binary_accuracy'])
#model.fit_generator(
#        training_data,
#        validation_data=validation_data,
#        epochs=2,
#        callbacks=[save_model])
## save model
Пример #10
0
def main():
    opt = TestOptions()
    args = opt.initialize()

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = CreateModel(args)

    model.eval()
    model.cuda()
    targetloader = CreateTrgDataLoader(args)
    id_to_trainid = {2: 0, 1: 128, 0: 255}
    for index, batch in enumerate(targetloader):
        if index % 10 == 0:
            print('%d processd' % index)
        image, _, name = batch
        _, output, _, _ = model(Variable(image).cuda())  #[1,3,129,129]
        #import pdb;pdb.set_trace()
        output = nn.functional.softmax(output, dim=1)
        output = nn.functional.upsample(
            output, (1634, 1634), mode='bilinear',
            align_corners=True).cpu().data[0].numpy()
        output = output.transpose(1, 2, 0)  #(1634,1634,3)
        '''
        output_crop = output[14:526, 626:1138, 0:2]
        np.save("/extracephonline/medai_data2/zhengdzhang/eyes/qikan/cai/output_crop_1.npy", output_crop)
        #crop_img = Image.fromarray(output_crop)
        #crop_img.save("/extracephonline/medai_data2/zhengdzhang/eyes/qikan/cai/crop_img.png")
        '''
        output_nomask = np.asarray(np.argmax(output, axis=2),
                                   dtype=np.uint8)  #(1644,1634) unique:[0,1,2]
        cup_mask = get_bool(output_nomask, 0)
        disc_mask = get_bool(output_nomask, 0) + get_bool(output_nomask, 1)
        disc_mask = disc_mask.astype(np.uint8)
        cup_mask = cup_mask.astype(np.uint8)
        for i in range(5):
            disc_mask = scipy.signal.medfilt2d(disc_mask, 19)
            cup_mask = scipy.signal.medfilt2d(cup_mask, 19)
        disc_mask = morphology.binary_erosion(disc_mask,
                                              morphology.diamond(7)).astype(
                                                  np.uint8)  # return 0,1
        cup_mask = morphology.binary_erosion(cup_mask,
                                             morphology.diamond(7)).astype(
                                                 np.uint8)  # return 0,1
        disc_mask = get_largest_fillhole(disc_mask)
        cup_mask = get_largest_fillhole(cup_mask)

        disc_mask = morphology.binary_dilation(disc_mask,
                                               morphology.diamond(7)).astype(
                                                   np.uint8)  # return 0,1
        cup_mask = morphology.binary_dilation(cup_mask,
                                              morphology.diamond(7)).astype(
                                                  np.uint8)  # return 0,1

        disc_mask = get_largest_fillhole(disc_mask).astype(
            np.uint8)  # return 0,1
        cup_mask = get_largest_fillhole(cup_mask).astype(np.uint8)
        output_nomask = disc_mask + cup_mask

        output_col = np.ones(output_nomask.shape, dtype=np.float32)
        for k, v in id_to_trainid.items():
            output_col[output_nomask == k] = v
        output_col = Image.fromarray(output_col.astype(np.uint8))
        output_nomask = Image.fromarray(output_nomask)
        name = name[0].split('.')[0] + '.png'
        output_nomask.save('%s/%s' % (args.save, name))
        output_col.save('%s/color_%s' % (args.save, name))

    disc_dice, cup_dice = calculate_dice(args.gt_dir, args.save,
                                         args.devkit_dir)
    print('===> disc_dice:' + str(round(disc_dice, 3)) + '\t' + 'cup_dice:' +
          str(round(cup_dice, 3)))
Пример #11
0
def main():
    #load parameters######################################################################
    opt = TrainOptions()
    args = opt.initialize()

    #create timer##########################################################################
    _t = {'iter time' : Timer()}

    #create logs##########################################################################
    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)   
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    #Load data#############################################################################
    sourceloader, targetloader = CreateSrcDataLoader(args), CreateTrgDataLoader(args)
    targetloader_iter, sourceloader_iter = iter(targetloader), iter(sourceloader)

    #Load deeplab+ optimizer###############################################################
    model, optimizer = CreateModel(args)

    #Load Descriminator+ optimizer#########################################################
    model_D, optimizer_D = CreateDiscriminator(args)
    



    #start training###############################################################################################
    start_iter = 0

    #restore checkoint##############################################################################
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])
        #start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[2])

    #Tensorboard####################################################################################    
    train_writer = tensorboardX.SummaryWriter(os.path.join(args.snapshot_dir, "logs", model_name))
    
    bce_loss = torch.nn.BCEWithLogitsLoss()
    
    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    model_D.train()
    model_D.cuda()
    loss = ['loss_seg_src', 'loss_seg_trg', 'loss_D_trg_fake', 'loss_D_src_real', 'loss_D_trg_real']
    _t['iter time'].tic()
    for i in range(start_iter, args.num_steps):
        
        model.adjust_learning_rate(args, optimizer, i)
        model_D.adjust_learning_rate(args, optimizer_D, i)
        
        optimizer.zero_grad()
        optimizer_D.zero_grad()
        for param in model_D.parameters():
            param.requires_grad = False 
            
        src_img, src_lbl, _, _ = sourceloader_iter.next()
        src_img, src_lbl = Variable(src_img).cuda(), Variable(src_lbl.long()).cuda()
        src_seg_score = model(src_img, lbl=src_lbl) # M(S),Ys
        loss_seg_src = model.loss   # Lseg(M(S),Ys)
        loss_seg_src.backward()
        


        if args.data_label_folder_target is not None:
            trg_img, trg_lbl, _, _ = targetloader_iter.next()
            trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(trg_lbl.long()).cuda()
            trg_seg_score = model(trg_img, lbl=trg_lbl) 
            loss_seg_trg = model.loss
        else:
            trg_img, _, name = targetloader_iter.next()
            trg_img = Variable(trg_img).cuda()
            trg_seg_score = model(trg_img) # M(T)
            loss_seg_trg = 0
            

        		
        outD_trg = model_D(F.softmax(trg_seg_score), 0)
        loss_D_trg_fake = model_D.loss

        loss_trg = args.lambda_adv_target * loss_D_trg_fake + loss_seg_trg
        loss_trg.backward()
        
		
        for param in model_D.parameters():
            param.requires_grad = True
        
        src_seg_score, trg_seg_score = src_seg_score.detach(), trg_seg_score.detach()
        
        outD_src = model_D(F.softmax(src_seg_score), 0) # D(M(S), source)
        loss_D_src_real = model_D.loss / 2
        loss_D_src_real.backward()
		

        outD_trg = model_D(F.softmax(trg_seg_score), 1) # D(M(T), target)
        loss_D_trg_real = model_D.loss / 2
        loss_D_trg_real.backward()       
       
        
        optimizer.step()
        optimizer_D.step()
        
        
        for m in loss:
            train_writer.add_scalar(m, eval(m), i+1)
            
        if (i+1) % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(model.state_dict(), os.path.join(args.snapshot_dir, '%s_' %(args.source) +str(i+1)+'.pth' ))
            torch.save(model_D.state_dict(), os.path.join(args.snapshot_dir, '%s_' %(args.source) +str(i+1)+'_D.pth' ))   
            
        if (i+1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print('[it %d][src seg loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff))
            if i + 1 > args.num_steps_stop:
                print ('finish training')
                break
            _t['iter time'].tic()
Пример #12
0
                                                      mode='min',
                                                      verbose=0,
                                                      save_best_only=True,
                                                      save_weights_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
    f'./Logs/{NOW}_{kernel_initializer[ki]}_{Optimizers[opt]}.csv')
my_callbacks = [
    model_checkpoint,
    csv_logger,
    # tf.keras.callbacks.EarlyStopping(patience=2),
    # tf.keras.callbacks.TensorBoard(log_dir=f'./Logs/{NOW}_{kernel_initializer[ki]}_{Optimizers[opt]}'),
]
# =============================================================================
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
    MODEL = CreateModel(KI=kernel_initializer[ki], nfs=nfs)
    MODEL.compile(loss=MeanSquaredError(),
                  optimizer=optimizer,
                  metrics=[myAcc, maxSAD, avgSAD])

if os.path.isfile(HDF5):
    print(f"restore from {HDF5}")
    MODEL.load_weights(HDF5)

MODEL.evaluate(X, Y, batch_size=256**3)
MODEL.fit(X, Y, epochs=SUB_EPOCH, batch_size=256**2, callbacks=my_callbacks)

MODEL.evaluate(X, Y, batch_size=256**3)
MODEL.load_weights(HDF5)
MODEL.evaluate(X, Y, batch_size=256**3)
Пример #13
0
def main():

    opt = TrainOptions()
    args = opt.initialize()

    _t = {'iter time': Timer()}

    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    sourceloader, targetloader = CreateSrcDataLoader(
        args), CreateTrgDataLoader(args)
    targetloader_iter, sourceloader_iter = iter(targetloader), iter(
        sourceloader)

    model, optimizer = CreateModel(args)
    model_D1, optimizer_D1 = CreateDiscriminator(args, 1)
    model_D2, optimizer_D2 = CreateDiscriminator(args, 2)
    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])

    #train_writer = tensorboardX.SummaryWriter(os.path.join(args.snapshot_dir, "logs", model_name))

    bce_loss = torch.nn.BCEWithLogitsLoss()
    interp_target = nn.Upsample(size=(1024, 1024),
                                mode='bilinear',
                                align_corners=True)
    interp_source = nn.Upsample(size=(1024, 1024),
                                mode='bilinear',
                                align_corners=True)
    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    model_D1.train()
    model_D1.cuda()
    model_D2.train()
    model_D2.cuda()
    weight_loss = WeightedBCEWithLogitsLoss()
    loss = [
        'loss_seg_src', 'loss_seg_trg', 'loss_D_trg_fake', 'loss_D_src_real',
        'loss_D_trg_real'
    ]
    _t['iter time'].tic()

    load_selected_samples = args.load_selected_samples
    if load_selected_samples is None:
        total_num = args.total_number
    else:
        clean_ids = [i_id.strip() for i_id in open(load_selected_samples)]
        total_num = len(clean_ids)
    remember_rate = args.remember_rate
    loss_list, name_list, dice_list = [], [], []
    print('total_num:', total_num)
    predict_sum_disc = 0
    predict_sum_cup = 0
    noise_sum_disc = 0
    noise_sum_cup = 0
    threshold = 0.4
    for i in range(start_iter, start_iter + total_num):

        model.adjust_learning_rate(args, optimizer, i)
        model_D1.adjust_learning_rate(args, optimizer_D1, i)
        model_D2.adjust_learning_rate(args, optimizer_D2, i)
        optimizer.zero_grad()
        optimizer_D1.zero_grad()
        optimizer_D2.zero_grad()

        ##train G
        for param in model_D1.parameters():
            param.requires_grad = False
        for param in model_D2.parameters():
            param.requires_grad = False

        #import pdb;pdb.set_trace()
        try:
            src_img, src_lbl, weight_map, name = sourceloader_iter.next()
        except StopIteration:
            sourceloader_iter = iter(sourceloader)
            src_img, src_lbl, weight_map, name = sourceloader_iter.next()
        src_img, src_lbl, weight_map = Variable(src_img).cuda(), Variable(
            src_lbl.long()).cuda(), Variable(weight_map.long()).cuda()
        src_seg_score1, src_seg_score2, src_seg_score3, src_seg_score4 = model(
            src_img, lbl=src_lbl, weight=None)
        loss_seg_src = model.loss
        loss_seg_src.backward()
        loss_list.append(loss_seg_src)
        name_list.append(name)
        print(i, name)
        #import pdb;pdb.set_trace()
        output = nn.functional.softmax(src_seg_score2, dim=1)
        output = nn.functional.upsample(
            output, (2056, 2124), mode='bilinear',
            align_corners=True).cpu().data[0].numpy()
        output = output.transpose(1, 2, 0)  # (1634,1634,3)
        output_mask = np.asarray(np.argmax(output, axis=2),
                                 dtype=np.uint8)  # (1644,1634) unique:[0,1,2]
        predict_disc_dice, predict_cup_dice = calculate_dice(
            args.data_dir, output_mask, name)
        print('===>predict disc_dice:' + str(round(predict_disc_dice, 3)) +
              '\t' + 'cup_dice:' + str(round(predict_cup_dice, 3)))
        #import pdb;pdb.set_trace()
        label = torch.unsqueeze(src_lbl, 0)
        label = nn.functional.upsample(
            label.float(),
            size=(2056, 2124),
            mode='bilinear',
            align_corners=True).cpu().data[0].numpy().squeeze()
        noise_disc_dice, noise_cup_dice = calculate_dice(
            args.data_dir, label, name)
        print('===>noise-included disc_dice:' +
              str(round(noise_disc_dice, 3)) + '\t' + 'cup_dice:' +
              str(round(noise_cup_dice, 3)))
        predict_sum_disc += predict_disc_dice
        predict_sum_cup += predict_cup_dice
        noise_sum_disc += noise_disc_dice
        noise_sum_cup += noise_cup_dice

        #import pdb;pdb.set_trace()
        if load_selected_samples is not None:
            if (2 - predict_disc_dice - predict_cup_dice) > threshold:
                src_seg_score1, src_seg_score2, src_seg_score3, src_seg_score4 = model(
                    src_img, lbl=src_lbl, weight=weight_map)
                weightloss = 0.05 * model.loss
                print('weightloss:', weightloss)
                weightloss.backward()

        if args.data_label_folder_target is not None:
            trg_img, trg_lbl, _, _ = targetloader_iter.next()
            trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(
                trg_lbl.long()).cuda()
            trg_seg_score1, trg_seg_score2, trg_seg_score3, trg_seg_score4 = model(
                trg_img, lbl=trg_lbl)
            loss_seg_trg = model.loss
        else:
            trg_img, _, _ = targetloader_iter.next()
            trg_img = Variable(trg_img).cuda()
            trg_seg_score1, trg_seg_score2, trg_seg_score3, trg_seg_score4 = model(
                trg_img)
            loss_seg_trg = 0
        outD1_trg = model_D1(F.softmax(trg_seg_score1), 0)
        outD2_trg = model_D2(F.softmax(trg_seg_score2), 0)
        #import pdb;pdb.set_trace()
        outD1_trg = interp_target(outD1_trg)
        outD2_trg = interp_target(outD2_trg)

        if i > 9001:
            #import pdb;pdb.set_trace()
            weight_map1 = prob_2_entropy(F.softmax(trg_seg_score1))
            weight_map2 = prob_2_entropy(F.softmax(trg_seg_score2))
            loss_D1_trg_fake = weight_loss(
                outD1_trg,
                Variable(torch.FloatTensor(
                    outD1_trg.data.size()).fill_(0)).cuda(), weight_map1, 0.3,
                1)
            loss_D2_trg_fake = weight_loss(
                outD2_trg,
                Variable(torch.FloatTensor(
                    outD2_trg.data.size()).fill_(0)).cuda(), weight_map2, 0.3,
                1)
        else:
            loss_D1_trg_fake = model_D1.loss
            loss_D2_trg_fake = model_D2.loss
        #loss_D_trg_fake = model_D1.loss*0.2 + model_D2.loss
        loss_D_trg_fake = loss_D1_trg_fake * 0.2 + loss_D2_trg_fake
        loss_trg = args.lambda_adv_target * loss_D_trg_fake + loss_seg_trg
        loss_trg.backward()

        ###train D
        for param in model_D1.parameters():
            param.requires_grad = True
        for param in model_D2.parameters():
            param.requires_grad = True

        src_seg_score1, src_seg_score2, src_seg_score3, src_seg_score4, trg_seg_score1, trg_seg_score2, trg_seg_score3, trg_seg_score4 = src_seg_score1.detach(
        ), src_seg_score2.detach(), src_seg_score3.detach(
        ), src_seg_score4.detach(), trg_seg_score1.detach(
        ), trg_seg_score2.detach(), trg_seg_score3.detach(
        ), trg_seg_score4.detach()

        outD1_src = model_D1(F.softmax(src_seg_score1), 0)
        outD2_src = model_D2(F.softmax(src_seg_score2), 0)

        loss_D1_src_real = model_D1.loss / 2
        loss_D1_src_real.backward()
        loss_D2_src_real = model_D2.loss / 2
        loss_D2_src_real.backward()
        loss_D_src_real = loss_D1_src_real + loss_D2_src_real

        outD1_trg = model_D1(F.softmax(trg_seg_score1), 1)
        outD2_trg = model_D2(F.softmax(trg_seg_score2), 1)

        outD1_trg = interp_target(outD1_trg)
        outD2_trg = interp_target(outD2_trg)
        if i > 9001:
            weight_map1 = prob_2_entropy(F.softmax(trg_seg_score1))
            weight_map2 = prob_2_entropy(F.softmax(trg_seg_score2))
            loss_D1_trg_real = weight_loss(
                outD1_trg,
                Variable(torch.FloatTensor(
                    outD1_trg.data.size()).fill_(1)).cuda(), weight_map1, 0.3,
                1) / 2
            loss_D2_trg_real = weight_loss(
                outD2_trg,
                Variable(torch.FloatTensor(
                    outD2_trg.data.size()).fill_(1)).cuda(), weight_map2, 0.3,
                1) / 2

        else:
            loss_D1_trg_real = model_D1.loss / 2
            loss_D2_trg_real = model_D2.loss / 2

        loss_D1_trg_real.backward()
        loss_D2_trg_real.backward()
        loss_D_trg_real = loss_D1_trg_real + loss_D2_trg_real

        optimizer.step()
        optimizer_D1.step()
        optimizer_D2.step()

        # for m in loss:
        #     train_writer.add_scalar(m, eval(m), i+1)

        if (i + 1 == start_iter +
                total_num) and load_selected_samples is not None:
            print('taking snapshot ', args.snapshot_dir,
                  args.source + '_' + str(total_num))
            torch.save(
                model.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(total_num) + '.pth'))
            torch.save(
                model_D1.state_dict(),
                os.path.join(
                    args.snapshot_dir,
                    '%s_' % (args.source) + str(total_num) + '_D1.pth'))
            torch.save(
                model_D2.state_dict(),
                os.path.join(
                    args.snapshot_dir,
                    '%s_' % (args.source) + str(total_num) + '_D2.pth'))
        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print ('[it %d][src seg loss %.4f][trg loss %.4f][trg seg loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data,loss_trg.data, loss_seg_trg.data,optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff))
            if i + 1 > args.num_steps_stop:
                print('finish training')
                break
            _t['iter time'].tic()

        if i + 1 == start_iter + total_num and load_selected_samples is None:
            ind_sorted = np.argsort(loss_list)
            loss_sorted = np.array(loss_list)[ind_sorted]
            num_remember = int(remember_rate * len(loss_sorted))
            clean_ind_update = ind_sorted[:num_remember]
            clean_name_update = np.array(name_list)[clean_ind_update]
            #import pdb;pdb.set_trace()
            #dice = np.array(dice_list)[clean_ind_update]
            noise_ind_sorted = np.argsort(-np.array(loss_list))
            noise_loss_sorted = np.array(loss_list)[noise_ind_sorted]
            noise_num_remember = int(remember_rate * len(noise_loss_sorted))
            noise_ind_update = noise_ind_sorted[:noise_num_remember]
            noise_name_update = np.array(name_list)[noise_ind_update]

            with open(os.path.join(args.save_selected_samples), "w") as f:
                for i in range(len(clean_name_update)):
                    #f.write(str(clean_name_update[i][0]) +'\t'+ str(dice[:,0][i]) + '\t'+ str(dice[:,1][i]) +'\n')
                    f.write(str(clean_name_update[i][0]) + '\n')
            with open(os.path.join(args.noise_selected_samples), "w") as g:
                for j in range(len(noise_name_update)):
                    g.write(str(noise_name_update[j][0]) + '\n')
            print(args.save_selected_samples, 'Sample selection finished!')
            break

    print('\n predict disc_coef = {0:.4f}, cup_coef = {1:.4f}'.format(
        predict_sum_disc / total_num, predict_sum_cup / total_num))
    print('\n noise-included disc_coef = {0:.4f}, cup_coef = {1:.4f}'.format(
        noise_sum_disc / total_num, noise_sum_cup / total_num))
Пример #14
0
import tensorflowjs as tfjs
from model import CreateModel

model = CreateModel()
model.load_weights('model.h5')
tfjs.converters.save_keras_model(model, 'extension/model')
Пример #15
0
from keras.preprocessing.image import img_to_array
from keras.applications.mobilenetv2 import preprocess_input
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import keras.activations as activations

# load image
img = Image.open(os.sys.argv[1]).resize((128, 128)).convert('RGB')
img = img_to_array(img)
preprocessed_img = preprocess_input(img.copy())
batch = np.expand_dims(preprocessed_img, axis=0)

# get the last 8x8 conv layer of model
classifier = CreateModel()
classifier.load_weights('model.h5')
conv_layer = classifier.get_layer('Conv_1')

# compute activation map
gradient_var = K.gradients(classifier.output, conv_layer.output)[0]
activation_map_var = K.sum(gradient_var * conv_layer.output, axis=3)
activation_map = K.function([classifier.input],
                            [activation_map_var])([batch])[0][0]
activation_map -= activation_map.min()
activation_map /= activation_map.max()
plt.imshow(img / 255)
plt.imshow(activation_map,
           cmap=plt.cm.PiYG,
           extent=(0, 128, 128, 0),
           alpha=0.8,
Пример #16
0
def main():

    opt = TrainOptions()
    args = opt.initialize()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    _t = {'iter time': Timer()}

    opt.print_options(args)

    train_data = cityscapesLoader(args.data_dir,
                                  split='train',
                                  is_transform=True)
    train_loader = data.DataLoader(train_data, batch_size=args.batch_size)

    val_data = cityscapesLoader(args.data_dir, split='val', is_transform=True)
    val_loader = data.DataLoader(val_data, batch_size=1)

    model, optim = CreateModel(args)

    criterion = nn.CrossEntropyLoss(ignore_index=255)

    wandb.login(key=args.wandb_key)

    start_iter = 0
    valid_loss_min = np.Inf

    if args.restore is True:
        model, optim, start_iter, valid_loss_min = load_ckp(
            args.checkpoint_path, model, optim)
        wandb.init(project="DL-SegSem", resume='allow', id=args.wandb_id)

    else:
        wandb.init(project="DL-SegSem")

    wandb.config.update(args)

    cudnn.enabled = True
    cudnn.benchmark = True

    model.train()
    model.cuda()

    _t['iter time'].tic()

    for epoch in range(start_iter + 1, args.epochs):

        epoch_loss = 0.0

        for step, batch_data in enumerate(train_loader):

            # Get the inputs and labels
            inputs = batch_data[0].to('cuda')
            labels = batch_data[1].to('cuda').long()

            # Forward propagation
            outputs = model(inputs)

            # Loss computation
            loss = criterion(outputs, labels)

            print("Epoch:", epoch, "| Step:", step, "| Loss:", loss.item())

            # Backpropagation
            optim.zero_grad()
            loss.backward()
            optim.step()

            # Keep track of loss for current epoch
            epoch_loss += loss.item()

        val_data = evaulate(model, val_loader)
        val_loss = val_data['Loss']

        if epoch % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print("Epoch number:", epoch, "| Training Loss=",
                  epoch_loss / len(train_loader), "| Valiadtion Loss= ",
                  val_loss, "| Mean IOU (val)=", val_data['Mean IOU'], "(",
                  _t['iter time'].diff, "seconds )")
            _t['iter time'].tic()

        checkpoint = {
            'epoch': epoch,
            'valid_loss_min': valid_loss_min,
            'state_dict': model.state_dict(),
            'optimizer': optim.state_dict(),
        }

        save_ckp(checkpoint, False, args.checkpoint_path, args.best_model_path)

        wandb.log({
            "Training Loss": epoch_loss / len(train_loader),
            "Validation Loss": val_loss,
            "Mean IoU": val_data['Mean IOU'],
            "Predictions": val_data['Predictions']
        })

        if val_loss <= valid_loss_min:
            print(
                'Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'
                .format(valid_loss_min, val_loss))
            # save checkpoint as best model
            save_ckp(checkpoint, True, args.checkpoint_path,
                     args.best_model_path)
            valid_loss_min = val_loss
Пример #17
0
from keras.applications.mobilenetv2 import preprocess_input
import os
from keras.preprocessing.image import load_img, img_to_array
from model import CreateModel
import numpy as np

img = img_to_array(
    load_img(os.sys.argv[1], target_size=(128, 128)).convert('RGB'))
img = preprocess_input(img)
batch = np.expand_dims(img, axis=0)
model = CreateModel()
model.load_weights('model.h5')
predictions = model.predict(batch)
print(predictions[0])
Пример #18
0
def main():

    opt = TrainOptions()
    args = opt.initialize()

    _t = {'iter time': Timer()}

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    sourceloader, targetloader = CreateActSrcDataLoader(
        args), CreateActTrgDataLoader(args, 'train')
    testloader = CreateActTrgDataLoader(args, 'test')
    targetloader_iter, sourceloader_iter = iter(targetloader), iter(
        sourceloader)

    model, optimizer = CreateModel(args)
    model_D, optimizer_D = CreateDiscriminator(args)

    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])

    train_writer = tensorboardX.SummaryWriter(
        os.path.join(args.snapshot_dir, "logs"))

    bce_loss = torch.nn.BCEWithLogitsLoss()

    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    model_D.train()
    model_D.cuda()
    loss = [
        'loss_src', 'loss_trg', 'loss_D_trg_fake', 'loss_D_src_real',
        'loss_D_trg_real', 'eval_loss'
    ]
    _t['iter time'].tic()
    best_loss_eval = None
    best_step = 0
    eval_loss = np.array([0])
    for i in range(start_iter, args.num_steps):

        model.module.adjust_learning_rate(args, optimizer, i)
        model_D.module.adjust_learning_rate(args, optimizer_D, i)

        optimizer.zero_grad()
        optimizer_D.zero_grad()
        for param in model_D.parameters():
            param.requires_grad = False

        try:
            src_img, src_lbl, _, _ = next(sourceloader_iter)
        except StopIteration:
            sourceloader_iter = iter(sourceloader)
            src_img, src_lbl, _, _ = next(sourceloader_iter)
        src_img, src_lbl = Variable(src_img).cuda(), Variable(
            src_lbl.long()).cuda()
        src_score, loss_src = model(src_img, lbl=src_lbl)
        loss_src.mean().backward()

        try:
            trg_img, trg_lbl, _, _ = next(targetloader_iter)
        except StopIteration:
            targetloader_iter = iter(targetloader)
            trg_img, trg_lbl, _, _ = next(targetloader_iter)
        trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(
            trg_lbl.long()).cuda()
        trg_score, loss_trg = model(trg_img, lbl=trg_lbl)

        outD_trg, loss_D_trg_fake = model_D(F.softmax(trg_score, dim=1),
                                            0)  # do not apply softmax

        loss_trg = args.lambda_adv_target * loss_D_trg_fake + loss_trg
        loss_trg.mean().backward()

        for param in model_D.parameters():
            param.requires_grad = True

        src_score, trg_score = src_score.detach(), trg_score.detach()

        outD_src, model_D_loss = model_D(F.softmax(src_score, dim=1),
                                         0)  # do not apply softmax

        loss_D_src_real = model_D_loss / 2
        loss_D_src_real.mean().backward()

        outD_trg, model_D_loss = model_D(F.softmax(trg_score, dim=1),
                                         1)  # do not apply softmax

        loss_D_trg_real = model_D_loss / 2
        loss_D_trg_real.mean().backward()

        optimizer.step()
        optimizer_D.step()

        for m in loss:
            train_writer.add_scalar(m, eval(m).mean(), i + 1)

        if (i + 1) % args.save_pred_every == 0:
            with torch.no_grad():
                model.eval()
                eval_loss = 0
                for test_img, test_lbl, _, _ in testloader:
                    test_score, loss_test = model(test_img, lbl=test_lbl)
                    eval_loss += loss_test.mean().item() * test_img.size(0)
                eval_loss /= len(testloader.dataset)
                if best_loss_eval == None or eval_loss < best_loss_eval:
                    best_loss_eval = eval_loss
                    best_step = i + 1
                print('taking snapshot ... eval_loss: {}'.format(eval_loss))
                torch.save(
                    model.module.state_dict(),
                    os.path.join(args.snapshot_dir,
                                 str(i + 1) + '.pth'))
                eval_loss = np.array([eval_loss])

        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print('[it %d][src loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_src.mean().data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff))
            if i + 1 > args.num_steps_stop:
                print('finish training')
                break
            _t['iter time'].tic()
Пример #19
0
@jit(nopython=True)
def calcSAD(img_result, img_GT):

    absdiff = np.abs(img_result - img_GT)
    SAD = np.sum(absdiff, axis=1)

    maxSAD = np.max(SAD)
    avgSAD = np.mean(SAD)

    return maxSAD, avgSAD


strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
    MODEL = CreateModel(nfs=[4, 8, 16, 24, 40])

png_input = "../egis_3dluts/DATASET/PNGs/3DLUT_256_in.png"
img_input = imread(png_input)
img_input = np.reshape(img_input, (-1, 1, 1, 3))

for DIR in os.listdir('./experiments'):
    print(DIR)
    if 'gamma(1.100)' in DIR:
        png_GT = "../egis_3dluts/DATASET/PNGs/3DLUT_256_train_out_gamma(1.100).png"
    elif 'hsv(0.014)' in DIR:
        png_GT = "../egis_3dluts/DATASET/PNGs/3DLUT_256_train_out_hsv(0.014).png"
    img_GT = imread(png_GT)
    img_GT = np.reshape(img_GT, (-1, 1, 1, 3))
    img_GT = im2uint8(img_GT)
Пример #20
0
def main():
    opt = TrainOptions()  # loading train options(arg parser)
    args = opt.initialize()  # get arguments
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    _t = {'iter time': Timer()}

    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)  # print set options

    sourceloader, targetloader = CreateSrcDataLoader(
        args), CreateTrgDataLoader(args)
    sourceloader_iter, targetloader_iter = iter(sourceloader), iter(
        targetloader)

    model, optimizer = CreateModel(args)

    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])

    cudnn.enabled = True
    cudnn.benchmark = True

    model.train()
    model.cuda()

    # losses to log
    loss = ['loss_seg_src', 'loss_seg_trg']
    loss_train = 0.0
    loss_val = 0.0
    loss_train_list = []
    loss_val_list = []

    mean_img = torch.zeros(1, 1)
    class_weights = Variable(CS_weights).cuda()

    _t['iter time'].tic()
    for i in range(start_iter, args.num_steps):
        model.adjust_learning_rate(args, optimizer, i)  # adjust learning rate
        optimizer.zero_grad()  # zero grad

        src_img, src_lbl, _, _ = sourceloader_iter.next()  # new batch source
        trg_img, trg_lbl, _, _ = targetloader_iter.next()  # new batch target

        scr_img_copy = src_img.clone()

        if mean_img.shape[-1] < 2:
            B, C, H, W = src_img.shape
            mean_img = IMG_MEAN.repeat(B, 1, H, W)

        #-------------------------------------------------------------------#

        # 1. source to target, target to target
        src_in_trg = FDA_source_to_target(src_img, trg_img,
                                          L=args.LB)  # src_lbl
        trg_in_trg = trg_img

        # 2. subtract mean
        src_img = src_in_trg.clone() - mean_img  # src, src_lbl
        trg_img = trg_in_trg.clone() - mean_img  # trg, trg_lbl

        #-------------------------------------------------------------------#

        # evaluate and update params #####
        src_img, src_lbl = Variable(src_img).cuda(), Variable(
            src_lbl.long()).cuda()  # to gpu
        src_seg_score = model(src_img,
                              lbl=src_lbl,
                              weight=class_weights,
                              ita=args.ita)  # forward pass
        loss_seg_src = model.loss_seg  # get loss
        loss_ent_src = model.loss_ent

        # get target loss, only entropy for backpro
        trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(
            trg_lbl.long()).cuda()  # to gpu
        trg_seg_score = model(trg_img,
                              lbl=trg_lbl,
                              weight=class_weights,
                              ita=args.ita)  # forward pass
        loss_seg_trg = model.loss_seg  # get loss
        loss_ent_trg = model.loss_ent

        triger_ent = 0.0
        if i > args.switch2entropy:
            triger_ent = 1.0

        loss_all = loss_seg_src + triger_ent * args.entW * loss_ent_trg  # loss of seg on src, and ent on s and t

        loss_all.backward()
        optimizer.step()

        loss_train += loss_seg_src.detach().cpu().numpy()
        loss_val += loss_seg_trg.detach().cpu().numpy()

        if (i + 1) % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '.pth'))

        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print('[it %d][src seg loss %.4f][trg seg loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data, loss_seg_trg.data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff) )

            sio.savemat(args.tempdata, {
                'src_img': src_img.cpu().numpy(),
                'trg_img': trg_img.cpu().numpy()
            })

            loss_train /= args.print_freq
            loss_val /= args.print_freq
            loss_train_list.append(loss_train)
            loss_val_list.append(loss_val)
            sio.savemat(args.matname, {
                'loss_train': loss_train_list,
                'loss_val': loss_val_list
            })
            loss_train = 0.0
            loss_val = 0.0

            if i + 1 > args.num_steps_stop:
                print('finish training')
                break
            _t['iter time'].tic()
Пример #21
0
def main():
    opt = TrainOptions()
    args = opt.initialize()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    _t = {'iter time': Timer()}

    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    sourceloader, targetloader = CreateSrcDataLoader(
        args), CreateTrgDataLoader(args)
    sourceloader_iter, targetloader_iter = iter(sourceloader), iter(
        targetloader)

    pseudotrgloader = CreatePseudoTrgLoader(args)
    pseudoloader_iter = iter(pseudotrgloader)

    model, optimizer = CreateModel(args)

    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])
    if args.restore_optim_from is not None:
        optimizer.load_state_dict(torch.load(args.restore_optim_from))
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.cuda()

    cudnn.enabled = True
    cudnn.benchmark = True

    model.train()
    model.cuda()

    wandb.watch(model, log='gradient', log_freq=1)

    # losses to log
    loss = ['loss_seg_src', 'loss_seg_psu']
    loss_train = 0.0
    loss_val = 0.0
    loss_pseudo = 0.0
    loss_train_list = []
    loss_val_list = []
    loss_pseudo_list = []

    mean_img = torch.zeros(1, 1)
    class_weights = Variable(CS_weights).cuda()

    _t['iter time'].tic()
    for i in range(start_iter, args.num_steps):

        model.adjust_learning_rate(args, optimizer, i)  # adjust learning rate
        optimizer.zero_grad()  # zero grad

        src_img, src_lbl, _, _ = sourceloader_iter.next()  # new batch source
        trg_img, trg_lbl, _, _ = targetloader_iter.next()  # new batch target
        psu_img, psu_lbl, _, _ = pseudoloader_iter.next()

        scr_img_copy = src_img.clone()

        if mean_img.shape[-1] < 2:
            B, C, H, W = src_img.shape
            mean_img = IMG_MEAN.repeat(B, 1, H, W)

        #-------------------------------------------------------------------#

        # 1. source to target, target to target
        src_in_trg = FDA_source_to_target(src_img, trg_img,
                                          L=args.LB)  # src_lbl
        trg_in_trg = trg_img

        # 2. subtract mean
        src_img = src_in_trg.clone() - mean_img  # src_1, trg_1, src_lbl
        trg_img = trg_in_trg.clone() - mean_img  # trg_1, trg_0, trg_lbl
        psu_img = psu_img.clone() - mean_img

        #-------------------------------------------------------------------#

        # evaluate and update params #####
        src_img, src_lbl = Variable(src_img).cuda(), Variable(
            src_lbl.long()).cuda()  # to gpu
        src_seg_score = model(src_img,
                              lbl=src_lbl,
                              weight=class_weights,
                              ita=args.ita)  # forward pass
        loss_seg_src = model.loss_seg  # get loss
        loss_ent_src = model.loss_ent

        # use pseudo label as supervision
        psu_img, psu_lbl = Variable(psu_img).cuda(), Variable(
            psu_lbl.long()).cuda()
        psu_seg_score = model(psu_img,
                              lbl=psu_lbl,
                              weight=class_weights,
                              ita=args.ita)
        loss_seg_psu = model.loss_seg
        loss_ent_psu = model.loss_ent

        loss_all = loss_seg_src + (loss_seg_psu + args.entW * loss_ent_psu
                                   )  # loss of seg on src, and ent on s and t
        loss_all.backward()
        optimizer.step()

        loss_train += loss_seg_src.detach().cpu().numpy()
        loss_val += loss_seg_psu.detach().cpu().numpy()

        if (i + 1) % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '.pth'))
            torch.save(
                optimizer.state_dict(),
                os.path.join(args.snapshot_dir_optim,
                             '%s_' % (args.source) + '.pth'))
            wandb.log({
                "src seg loss": loss_seg_src.data,
                "psu seg loss": loss_seg_psu.data,
                "learnign rate": optimizer.param_groups[0]['lr'] * 10000
            })
        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print('[it %d][src seg loss %.4f][psu seg loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data, loss_seg_psu.data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff) )

            sio.savemat(args.tempdata, {
                'src_img': src_img.cpu().numpy(),
                'trg_img': trg_img.cpu().numpy()
            })

            loss_train /= args.print_freq
            loss_val /= args.print_freq
            loss_train_list.append(loss_train)
            loss_val_list.append(loss_val)
            sio.savemat(args.matname, {
                'loss_train': loss_train_list,
                'loss_val': loss_val_list
            })
            loss_train = 0.0
            loss_val = 0.0

            if i + 1 > args.num_steps_stop:
                print('finish training')
                break
            _t['iter time'].tic()
Пример #22
0
def main():
    # torch.manual_seed(1234)
    # torch.cuda.manual_seed(1234)
    opt = TrainOptions()
    args = opt.initialize()

    _t = {'iter time': Timer()}

    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    sourceloader, targetloader = CreateSrcDataLoader(
        args), CreateTrgDataLoader(args)
    targetloader_iter, sourceloader_iter = iter(targetloader), iter(
        sourceloader)

    model, optimizer = CreateModel(args)
    model_D, optimizer_D = CreateDiscriminator(args)

    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])

    train_writer = tensorboardX.SummaryWriter(
        os.path.join(args.snapshot_dir, "logs", model_name))

    bce_loss = torch.nn.BCEWithLogitsLoss()
    l1_loss = torch.nn.L1Loss()
    cos_loss = torch.nn.CosineSimilarity(dim=0, eps=1e-06)

    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    model_D.train()
    model_D.cuda()
    loss = [
        'loss_seg_src', 'loss_seg_trg', 'loss_D_trg_fake', 'loss_D_src_real',
        'loss_D_trg_real'
    ]
    _t['iter time'].tic()

    pbar = tqdm(range(start_iter, args.num_steps_stop))
    #for i in range(start_iter, args.num_steps):
    for i in pbar:

        model.adjust_learning_rate(args, optimizer, i)
        model_D.adjust_learning_rate(args, optimizer_D, i)

        optimizer.zero_grad()
        optimizer_D.zero_grad()
        for param in model_D.parameters():
            param.requires_grad = False

        src_img, src_lbl, _, _ = sourceloader_iter.next()
        src_img, src_lbl = Variable(src_img).cuda(), Variable(
            src_lbl.long()).cuda()
        src_seg_score, src_seg_score2 = model(src_img)
        loss_seg_src1 = CrossEntropy2d(src_seg_score, src_lbl)
        loss_seg_src2 = CrossEntropy2d(src_seg_score2, src_lbl)
        loss_seg_src = loss_seg_src1 + loss_seg_src2
        loss_seg_src.backward()

        if args.data_label_folder_target is not None:
            trg_img, trg_lbl, _, _ = targetloader_iter.next()
            trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(
                trg_lbl.long()).cuda()
            trg_seg_score = model(trg_img)
            loss_seg_trg = model.loss
        else:
            trg_img, _, name = targetloader_iter.next()
            trg_img = Variable(trg_img).cuda()
            trg_seg_score, trg_seg_score2 = model(trg_img)
            loss_seg_trg = 0

        outD_trg = model_D(F.softmax(trg_seg_score))
        outD_trg2 = model_D(F.softmax(trg_seg_score2))
        loss_D_trg_fake1 = bce_loss(
            outD_trg,
            Variable(torch.FloatTensor(outD_trg.data.size()).fill_(0)).cuda())
        loss_D_trg_fake2 = bce_loss(
            outD_trg2,
            Variable(torch.FloatTensor(outD_trg2.data.size()).fill_(0)).cuda())
        loss_D_trg_fake = loss_D_trg_fake1 + loss_D_trg_fake2

        loss_agree = l1_loss(F.softmax(trg_seg_score),
                             F.softmax(trg_seg_score2))

        loss_trg = args.lambda_adv_target * loss_D_trg_fake + loss_seg_trg + loss_agree
        loss_trg.backward()

        #Weight Discrepancy Loss

        W5 = None
        W6 = None
        if args.model == 'DeepLab2':

            for (w5, w6) in zip(model.layer5.parameters(),
                                model.layer6.parameters()):
                if W5 is None and W6 is None:
                    W5 = w5.view(-1)
                    W6 = w6.view(-1)
                else:
                    W5 = torch.cat((W5, w5.view(-1)), 0)
                    W6 = torch.cat((W6, w6.view(-1)), 0)

        #ipdb.set_trace()
        #loss_weight = (torch.matmul(W5, W6) / (torch.norm(W5) * torch.norm(W6)) + 1) # +1 is for a positive loss
        # loss_weight = loss_weight  * damping * 2
        loss_weight = args.weight_div * (cos_loss(W5, W6) + 1)
        loss_weight.backward()

        for param in model_D.parameters():
            param.requires_grad = True

        src_seg_score, trg_seg_score = src_seg_score.detach(
        ), trg_seg_score.detach()
        src_seg_score2, trg_seg_score2 = src_seg_score2.detach(
        ), trg_seg_score2.detach()

        outD_src = model_D(F.softmax(src_seg_score))
        loss_D_src_real1 = bce_loss(
            outD_src,
            Variable(torch.FloatTensor(
                outD_src.data.size()).fill_(0)).cuda()) / 2
        outD_src2 = model_D(F.softmax(src_seg_score2))
        loss_D_src_real2 = bce_loss(
            outD_src2,
            Variable(torch.FloatTensor(
                outD_src2.data.size()).fill_(0)).cuda()) / 2
        loss_D_src_real = loss_D_src_real1 + loss_D_src_real2
        loss_D_src_real.backward()

        outD_trg = model_D(F.softmax(trg_seg_score))
        loss_D_trg_real1 = bce_loss(
            outD_trg,
            Variable(torch.FloatTensor(
                outD_trg.data.size()).fill_(1)).cuda()) / 2
        outD_trg2 = model_D(F.softmax(trg_seg_score2))
        loss_D_trg_real2 = bce_loss(
            outD_trg2,
            Variable(torch.FloatTensor(
                outD_trg2.data.size()).fill_(1)).cuda()) / 2
        loss_D_trg_real = loss_D_trg_real1 + loss_D_trg_real2
        loss_D_trg_real.backward()

        d_loss = loss_D_src_real.data + loss_D_trg_real.data

        optimizer.step()
        optimizer_D.step()

        for m in loss:
            train_writer.add_scalar(m, eval(m), i + 1)

        if (i + 1) % args.save_pred_every == 0:
            print 'taking snapshot ...'
            torch.save(
                model.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '.pth'))
            torch.save(
                model_D.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '_D.pth'))

        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print '[it %d][src seg loss %.4f][adv loss %.4f][d loss %.4f][agree loss %.4f][div loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data, loss_D_trg_fake.data,d_loss,loss_agree.data,loss_weight.data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff)
            if i + 1 > args.num_steps_stop:
                print 'finish training'
                break
            _t['iter time'].tic()
Пример #23
0
def main():
    opt = TestOptions()
    args = opt.initialize()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    # 3 models are used because MBT method is used.
    args.restore_from = args.restore_opt1
    model1 = CreateModel(args)
    model1.eval()
    model1.cuda()

    args.restore_from = args.restore_opt2
    model2 = CreateModel(args)
    model2.eval()
    model2.cuda()

    args.restore_from = args.restore_opt3
    model3 = CreateModel(args)
    model3.eval()
    model3.cuda()

    targetloader = CreateTrgDataLoader(args)

    # change the mean for different dataset other than CS
    IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434),
                        dtype=np.float32)
    IMG_MEAN = torch.reshape(torch.from_numpy(IMG_MEAN), (1, 3, 1, 1))
    mean_img = torch.zeros(1, 1)

    # ------------------------------------------------- #
    # compute scores and save them
    with torch.no_grad():
        for index, batch in enumerate(targetloader):
            if index % 100 == 0:
                print('%d processd' % index)
            image, _, name = batch  # 1. get image
            # create mean image
            if mean_img.shape[-1] < 2:
                B, C, H, W = image.shape
                # 2. get mean image
                mean_img = IMG_MEAN.repeat(B, 1, H, W)
            image = image.clone() - mean_img  # 3, image - mean_img
            image = Variable(image).cuda()

            # forward
            output1 = model1(image)
            output1 = nn.functional.softmax(output1, dim=1)

            output2 = model2(image)
            output2 = nn.functional.softmax(output2, dim=1)

            output3 = model3(image)
            output3 = nn.functional.softmax(output3, dim=1)

            a, b = 0.3333, 0.3333
            output = a * output1 + b * output2 + (1.0 - a - b) * output3

            output = nn.functional.interpolate(
                output, (1024, 2048), mode='bilinear',
                align_corners=True).cpu().data[0].numpy()
            #output = nn.functional.upsample(   output, (1024, 2048), mode='bilinear', align_corners=True).cpu().data[0].numpy()
            output = output.transpose(1, 2, 0)

            output_nomask = np.asarray(np.argmax(output, axis=2),
                                       dtype=np.uint8)
            output_col = colorize_mask(output_nomask)
            output_nomask = Image.fromarray(output_nomask)
            name = name[0].split('/')[-1]
            output_nomask.save('%s/%s' % (args.save, name))
            output_col.save('%s/%s_color.png' %
                            (args.save, name.split('.')[0]))
    # scores computed and saved
    # ------------------------------------------------- #
    compute_mIoU(args.gt_dir, args.save, args.devkit_dir, args.restore_from)
Пример #24
0
def main():
    # torch.manual_seed(1234)
    # torch.cuda.manual_seed(1234)
    opt = TrainOptions()
    args = opt.initialize()
    
    _t = {'iter time' : Timer()}
    
    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)   
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)
    
    sourceloader, targetloader = CreateSrcDataLoader(args), CreateTrgDataLoader(args)
    targetloader_iter, sourceloader_iter = iter(targetloader), iter(sourceloader)
    
    model, optimizer = CreateModel(args)
    model_D, optimizer_D = CreateDiscriminator(args)
    
    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])
        
    train_writer = tensorboardX.SummaryWriter(os.path.join(args.snapshot_dir, "logs", model_name))
    
    bce_loss = torch.nn.BCEWithLogitsLoss()
    cent_loss=ConditionalEntropyLoss()
    
    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    model_D.train()
    model_D.cuda()
    loss = ['loss_seg_src', 'loss_seg_trg', 'loss_D_trg_fake', 'loss_D_src_real', 'loss_D_trg_real']
    _t['iter time'].tic()

    pbar = tqdm(range(start_iter,args.num_steps_stop))
    #for i in range(start_iter, args.num_steps):
    for i in pbar:
        
        model.adjust_learning_rate(args, optimizer, i)
        model_D.adjust_learning_rate(args, optimizer_D, i)
        
        optimizer.zero_grad()
        optimizer_D.zero_grad()
        for param in model_D.parameters():
            param.requires_grad = False 
            
        src_img, src_lbl, _, _ = sourceloader_iter.next()
        src_img, src_lbl = Variable(src_img).cuda(), Variable(src_lbl.long()).cuda()
        src_seg_score = model(src_img)
        loss_seg_src = CrossEntropy2d(src_seg_score, src_lbl)
        #loss_seg_src = model.loss   
        loss_seg_src.backward()
        
        if args.data_label_folder_target is not None:
            trg_img, trg_lbl, _, _ = targetloader_iter.next()
            trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(trg_lbl.long()).cuda()
            trg_seg_score = model(trg_img) 
            loss_seg_trg = model.loss
        else:
            trg_img, _, name = targetloader_iter.next()
            trg_img = Variable(trg_img).cuda()
            trg_seg_score = model(trg_img)
            #ipdb.set_trace()
            loss_seg_trg= cent_loss(trg_seg_score)
            #loss_seg_trg= entropy_loss(F.softmax(trg_seg_score))
            #loss_seg_trg = 0
        
        outD_trg = model_D(F.softmax(trg_seg_score))
        loss_D_trg_fake = bce_loss(outD_trg, Variable(torch.FloatTensor(outD_trg.data.size()).fill_(0)).cuda())
        #loss_D_trg_fake = model_D.loss
        
        loss_trg = args.lambda_adv_target * (loss_D_trg_fake + loss_seg_trg)
        loss_trg.backward()
        
        for param in model_D.parameters():
            param.requires_grad = True
        
        src_seg_score, trg_seg_score = src_seg_score.detach(), trg_seg_score.detach()
        
        outD_src = model_D(F.softmax(src_seg_score))
        loss_D_src_real = bce_loss(outD_src, Variable(torch.FloatTensor(outD_src.data.size()).fill_(0)).cuda())/ 2
        #loss_D_src_real = model_D.loss / 2
        loss_D_src_real.backward()
        
        outD_trg = model_D(F.softmax(trg_seg_score))
        loss_D_trg_real = bce_loss(outD_trg, Variable(torch.FloatTensor(outD_trg.data.size()).fill_(1)).cuda())/ 2
        #loss_D_trg_real = model_D.loss / 2
        loss_D_trg_real.backward()   

        d_loss=loss_D_src_real.data+  loss_D_trg_real.data
       
        
        optimizer.step()
        optimizer_D.step()
        
        
        for m in loss:
            train_writer.add_scalar(m, eval(m), i+1)
            
        if (i+1) % args.save_pred_every == 0:
            print 'taking snapshot ...'
            torch.save(model.state_dict(), os.path.join(args.snapshot_dir, '%s_' %(args.source) +str(i+1)+'.pth' )) 
            torch.save(model_D.state_dict(), os.path.join(args.snapshot_dir, '%s_' %(args.source) +str(i+1)+'_D.pth' ))   
            
        if (i+1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print '[it %d][src seg loss %.4f][adv loss %.4f][d loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data, loss_D_trg_fake.data,d_loss,optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff)
            if i + 1 > args.num_steps_stop:
                print 'finish training'
                break
            _t['iter time'].tic()
Пример #25
0
                                           (-1, 1)), torch.reshape(B,
                                                                   (-1, 1))),
    dim=1)
X = torch.unsqueeze(X, 2)
Y = torch.pow(X, 1 / 1.1)

X -= 0.5
Y -= 0.5

dataset = Data.TensorDataset(X, Y)
data_iter = Data.DataLoader(dataset,
                            batch_size=256 * 256,
                            shuffle=True,
                            num_workers=32)

MODEL, __ = CreateModel([4, 8, 16, 24, 40])
# MODEL = nn.DataParallel(MODEL,device_ids=[0,1]).cuda()

MODEL = torch.nn.DataParallel(MODEL, device_ids=device)

MODEL = MODEL.to('cuda')

# if 1:
#     avgSAD = list()
#     maxSAD = list()
#     for idx, data in enumerate(data_iter):
#         X_, Y_ = data
#         Y_= Y_.to('cuda')

#         Y_pred = MODEL(X_)
#         Y_pred += 0.5
Пример #26
0
def main():

    opt = TrainOptions()
    args = opt.initialize()

    _t = {'iter time': Timer()}

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    targetloader = CreateActTrgDataLoader(args, 'train')
    targetloader_iter = iter(targetloader)
    testloader = CreateActTrgDataLoader(args, 'test')

    model, optimizer = CreateModel(args)

    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])

    train_writer = tensorboardX.SummaryWriter(
        os.path.join(args.snapshot_dir, "logs"))

    bce_loss = torch.nn.BCEWithLogitsLoss()

    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    loss = ['loss_trg', 'eval_loss']
    _t['iter time'].tic()
    best_loss_eval = None
    best_step = 0
    eval_loss = np.array([0])
    for i in range(start_iter, args.num_steps):
        model.train()
        model.module.adjust_learning_rate(args, optimizer, i)

        optimizer.zero_grad()

        try:
            trg_img, trg_lbl, _, paths = next(targetloader_iter)
        except StopIteration:
            targetloader_iter = iter(targetloader)
            trg_img, trg_lbl, _, paths = next(targetloader_iter)
        trg_score, loss_trg = model(trg_img, lbl=trg_lbl)

        loss_trg = loss_trg
        loss_trg.mean().backward()

        optimizer.step()

        if (i + 1) % args.save_pred_every == 0:
            with torch.no_grad():
                model.eval()
                eval_loss = 0
                for test_img, test_lbl, _, _ in testloader:
                    test_score, loss_test = model(test_img, lbl=test_lbl)
                    eval_loss += loss_test.mean().item() * test_img.size(0)
                eval_loss /= len(testloader.dataset)
                if best_loss_eval == None or eval_loss < best_loss_eval:
                    best_loss_eval = eval_loss
                    best_step = i + 1
                print('taking snapshot ... eval_loss: {}'.format(eval_loss))
                torch.save(
                    model.module.state_dict(),
                    os.path.join(args.snapshot_dir,
                                 str(i + 1) + '.pth'))
                eval_loss = np.array([eval_loss])

        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print('[it %d][src loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_trg.mean().data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff))
            if i + 1 > args.num_steps_stop:
                print('finish training')
                break
            _t['iter time'].tic()

        for m in loss:
            train_writer.add_scalar(m, eval(m).mean(), i + 1)
Пример #27
0
def main():

    opt = TrainOptions()
    args = opt.initialize()

    _t = {'iter time': Timer()}

    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
        os.makedirs(os.path.join(args.snapshot_dir, 'logs'))
    opt.print_options(args)

    sourceloader, targetloader = CreateSrcDataLoader(
        args), CreateTrgDataLoader(args)
    targetloader_iter, sourceloader_iter = iter(targetloader), iter(
        sourceloader)

    model, optimizer = CreateModel(args)
    model_D1, optimizer_D1 = CreateDiscriminator(args, 1)
    model_D2, optimizer_D2 = CreateDiscriminator(args, 2)
    start_iter = 0
    if args.restore_from is not None:
        start_iter = int(args.restore_from.rsplit('/', 1)[1].rsplit('_')[1])

    train_writer = tensorboardX.SummaryWriter(
        os.path.join(args.snapshot_dir, "logs", model_name))

    bce_loss = torch.nn.BCEWithLogitsLoss()
    interp_target = nn.Upsample(size=(1024, 1024),
                                mode='bilinear',
                                align_corners=True)
    interp_source = nn.Upsample(size=(1024, 1024),
                                mode='bilinear',
                                align_corners=True)
    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model.cuda()
    model_D1.train()
    model_D1.cuda()
    model_D2.train()
    model_D2.cuda()
    weight_loss = WeightedBCEWithLogitsLoss()
    weight_map_loss = WeightMapLoss()
    loss = [
        'loss_seg_src', 'loss_seg_trg', 'loss_D_trg_fake', 'loss_D_src_real',
        'loss_D_trg_real'
    ]
    _t['iter time'].tic()
    for i in range(start_iter, args.num_steps):
        print(i)
        model.adjust_learning_rate(args, optimizer, i)
        model_D1.adjust_learning_rate(args, optimizer_D1, i)
        model_D2.adjust_learning_rate(args, optimizer_D2, i)
        optimizer.zero_grad()
        optimizer_D1.zero_grad()
        optimizer_D2.zero_grad()

        ##train G
        for param in model_D1.parameters():
            param.requires_grad = False
        for param in model_D2.parameters():
            param.requires_grad = False

        try:
            src_img, src_lbl, weight_map, _ = sourceloader_iter.next()
        except StopIteration:
            sourceloader_iter = iter(sourceloader)
            src_img, src_lbl, weight_map, _ = sourceloader_iter.next()
        src_img, src_lbl, weight_map = Variable(src_img).cuda(), Variable(
            src_lbl.long()).cuda(), Variable(weight_map.long()).cuda()
        src_seg_score1, src_seg_score2, src_seg_score3, src_seg_score4 = model(
            src_img, lbl=src_lbl, weight=weight_map)
        #import pdb;pdb.set_trace()
        #WeightLoss1 = weight_map_loss(src_seg_score1, src_lbl, weight_map)
        #WeightLoss2 = weight_map_loss(src_seg_score2, src_lbl, weight_map)
        loss_seg_src = model.loss
        #print('WeightLoss2, WeightLoss1:', WeightLoss2.data, WeightLoss1.data)
        loss_seg_src.backward()

        if args.data_label_folder_target is not None:
            trg_img, trg_lbl, _, name = targetloader_iter.next()
            trg_img, trg_lbl = Variable(trg_img).cuda(), Variable(
                trg_lbl.long()).cuda()
            trg_seg_score1, trg_seg_score2, trg_seg_score3, trg_seg_score4 = model(
                trg_img, lbl=trg_lbl)
            loss_seg_trg = model.loss
        else:
            trg_img, _, name = targetloader_iter.next()
            trg_img = Variable(trg_img).cuda()
            trg_seg_score1, trg_seg_score2, trg_seg_score3, trg_seg_score4 = model(
                trg_img)
            loss_seg_trg = 0
        outD1_trg = model_D1(F.softmax(trg_seg_score1), 0)
        outD2_trg = model_D2(F.softmax(trg_seg_score2), 0)
        #import pdb;pdb.set_trace()
        outD1_trg = interp_target(outD1_trg)  #[1, 1, 1024, 1024]
        outD2_trg = interp_target(outD2_trg)
        '''
        if i > 9001:
            #import pdb;pdb.set_trace()
            weight_map1 = prob_2_entropy(F.softmax(trg_seg_score1)) #[1, 1, 1024, 1024]
            weight_map2 = prob_2_entropy(F.softmax(trg_seg_score2)) #[1, 1, 1024, 1024]
            loss_D1_trg_fake = weight_loss(outD1_trg, Variable(torch.FloatTensor(outD1_trg.data.size()).fill_(0)).cuda(), weight_map1, 0.3, 1)
            loss_D2_trg_fake = weight_loss(outD2_trg, Variable(torch.FloatTensor(outD2_trg.data.size()).fill_(0)).cuda(), weight_map2, 0.3, 1)
        else:
            loss_D1_trg_fake = model_D1.loss
            loss_D2_trg_fake = model_D2.loss
        loss_D_trg_fake = loss_D1_trg_fake*0.2 + loss_D2_trg_fake
        '''

        loss_D_trg_fake = model_D1.loss * 0.2 + model_D2.loss
        loss_trg = args.lambda_adv_target * loss_D_trg_fake + loss_seg_trg
        loss_trg.backward()

        ###train D
        for param in model_D1.parameters():
            param.requires_grad = True
        for param in model_D2.parameters():
            param.requires_grad = True

        src_seg_score1, src_seg_score2, src_seg_score3, src_seg_score4, trg_seg_score1, trg_seg_score2, trg_seg_score3, trg_seg_score4 = src_seg_score1.detach(
        ), src_seg_score2.detach(), src_seg_score3.detach(
        ), src_seg_score4.detach(), trg_seg_score1.detach(
        ), trg_seg_score2.detach(), trg_seg_score3.detach(
        ), trg_seg_score4.detach()

        outD1_src = model_D1(F.softmax(src_seg_score1), 0)
        outD2_src = model_D2(F.softmax(src_seg_score2), 0)

        loss_D1_src_real = model_D1.loss / 2
        loss_D1_src_real.backward()
        loss_D2_src_real = model_D2.loss / 2
        loss_D2_src_real.backward()
        loss_D_src_real = loss_D1_src_real + loss_D2_src_real

        outD1_trg = model_D1(F.softmax(trg_seg_score1), 1)
        outD2_trg = model_D2(F.softmax(trg_seg_score2), 1)

        outD1_trg = interp_target(outD1_trg)
        outD2_trg = interp_target(outD2_trg)
        if i > 9001:
            weight_map1 = prob_2_entropy(F.softmax(trg_seg_score1))
            weight_map2 = prob_2_entropy(F.softmax(trg_seg_score2))
            loss_D1_trg_real = weight_loss(
                outD1_trg,
                Variable(torch.FloatTensor(
                    outD1_trg.data.size()).fill_(1)).cuda(), weight_map1, 0.3,
                1) / 2
            loss_D2_trg_real = weight_loss(
                outD2_trg,
                Variable(torch.FloatTensor(
                    outD2_trg.data.size()).fill_(1)).cuda(), weight_map2, 0.3,
                1) / 2

        else:
            loss_D1_trg_real = model_D1.loss / 2
            loss_D2_trg_real = model_D2.loss / 2

        loss_D1_trg_real.backward()
        loss_D2_trg_real.backward()
        loss_D_trg_real = loss_D1_trg_real + loss_D2_trg_real

        optimizer.step()
        optimizer_D1.step()
        optimizer_D2.step()

        for m in loss:
            train_writer.add_scalar(m, eval(m), i + 1)

        if (i + 1) % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '.pth'))
            torch.save(
                model_D1.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '_D1.pth'))
            torch.save(
                model_D2.state_dict(),
                os.path.join(args.snapshot_dir,
                             '%s_' % (args.source) + str(i + 1) + '_D2.pth'))
        if (i + 1) % args.print_freq == 0:
            _t['iter time'].toc(average=False)
            print ('[it %d][src seg loss %.4f][trg seg loss %.4f][lr %.4f][%.2fs]' % \
                    (i + 1, loss_seg_src.data,loss_seg_trg.data, optimizer.param_groups[0]['lr']*10000, _t['iter time'].diff))
            if i + 1 > args.num_steps_stop:
                print('finish training')
                break
            _t['iter time'].tic()