Пример #1
0
print("Params are {}".format(
    '\n'.join(['{}: {}'.format(n, p.size()) for n,p in net.named_parameters()]))
)
# optimizer = torch.optim.Adam(params, lr=0.001, eps=1e-4, weight_decay=weight_decay)
optimizer = torch.optim.SGD(params[8:], lr=lr, momentum=momentum, weight_decay=weight_decay)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
    cc = CrayonClient(hostname='127.0.0.1')
    if remove_all_log:
        cc.remove_all_experiments()
    if exp_name is None:
        exp_name = datetime.now().strftime('vgg16_%m-%d_%H-%M')
        exp = cc.create_experiment(exp_name)
    else:
        exp = cc.open_experiment(exp_name)

# training
train_loss = 0
tp, tf, fg, bg = 0., 0., 0, 0
step_cnt = 0
re_cnt = False
t = Timer()
t.tic()
for step in range(start_step, end_step+1):
Пример #2
0
class _baseModel(object):
    '''Base Model combine netG and netD to became a gans's model

    @Params:
    - opt: options for config gans'model
    - train: train or test
    - cc: crayon client or not
    - cuda: use cuda or not
    '''

    def __init__(self, opt):
        self.opt = opt
        self.istrain = opt.train
        self.cc = CrayonClient(hostname="localhost") if opt.cc else opt.cc
        self.cuda = opt.cuda

    def create_tensorboard(self):
        '''use docker create tensorboard
        '''
        if self.cc:
            self.cc.remove_all_experiments()
            self.D_exp = create_sigle_experiment(self.cc, 'D_loss')
            self.G_exp = create_sigle_experiment(self.cc, 'G_loss')
    
    def draft_data(self, input):
        '''input from datasetsloader, put those into X/Z
        '''
        pass

    def backward_D(self):
        '''backwrad netD
        '''
        pass

    def train(self):
        '''train gans
        '''
        pass

    def test(self):
        '''test gans
        '''
        pass

    def save_network(self, it, savepath):
        '''save checkpoints of netG and netD in savepath

        @Params:
        - it: number of iterations
        - savepath: in savepath, save network parameter
        '''
        torch.save(self.netG.state_dict(), '%s/netG_epoch_%d.pth' % (savepath, it))
        torch.save(self.netD.state_dict(), '%s/netD_epoch_%d.pth' % (savepath, it))
    
    def load_networkG(self, g_network_path):
        '''load network parameters of netG and netD

        @Params:
        - g_network_path: the path of netG
        '''
        self.netG.load_state_dict(torch.load(g_network_path))

    def load_networkD(self, d_network_path):
        '''load network parameters of netG and netD

        @Params:
        - d_network_path: the path of netG
        '''
        self.netD.load_state_dict(torch.load(d_network_path))

    def save_image(self, fake, it , savepath):
        '''save result of netG output

        @Params:
        - fake: the output of netG
        - it: number of iterations
        - savepath: in savepath, save network parameter
        '''
        vutils.save_image(fake.data,
                    '%s/fake_samples_epoch_%03d.png' % (savepath, it))
Пример #3
0
def remove_all_experiments(hostname, port):
    '''
    DANGER: don't use this, unless you're sure
    '''
    tb = CrayonClient(hostname=hostname, port=port)
    tb.remove_all_experiments()
Пример #4
0
    def parse(self):

        opt = self.gather_options()
        # opt.isTrain = self.isTrain   # train or test

        model = opt.model_name
        dataset_name = opt.dataset #dataset name - used for saving model file
        exp = 'v7-{}-{}-{}/'.format(dataset_name, model, datetime.now().strftime('exp-%m-%d_%H-%M'))
        expr_dir = './saved_models/{}/'.format(exp) #model files are saved here

        opt.crop_size = map(int, opt.crop_size.split('x'))

        if opt.save_model_para and not os.path.exists(expr_dir):
            makedirs('./saved_models')
            makedirs(expr_dir)
            makedirs(expr_dir+'./sup/')

        else:
            expr_dir = './temp1/'
            makedirs(expr_dir)
            if not os.path.exists(expr_dir+'./sup/'):
                os.mkdir(expr_dir+'./sup/')

        opt.expr_dir = expr_dir

        logger = logging.getLogger()
        fh = logging.FileHandler("{0}/{1}.log".format(expr_dir, 'log'), mode='w')
        fh.setFormatter(logging.Formatter(fmt="%(asctime)s  %(message)s", datefmt="%d-%H:%M"))
        logger.addHandler(fh)
        opt.logger =  logger

        self.opt = opt

        #Tensorboard  config
        use_tensorboard = opt.use_tensorboard
        remove_all_log = False   # remove all historical experiments in TensorBoardO
        use_tensorboard = use_tensorboard and CrayonClient is not None
        self.vis_exp = None

        if use_tensorboard:
            cc = CrayonClient(hostname='8.8.8.8', port=7879)
            if remove_all_log:
                cc.remove_all_experiments()
            random.seed(time.time())
            vis_exp_name = exp + str(random.random())
            opt.vis_exp_name = vis_exp_name
            self.vis_exp = cc.create_experiment(vis_exp_name)

        import socket
        hostname = socket.gethostname()

        # set gpu ids
        str_ids = opt.gpus.split(',')
        opt.gpus = []
        for str_id in str_ids:
            id = int(str_id)
            if id >= 0:
                opt.gpus.append(id)
        if len(opt.gpus) > 0:
            torch.cuda.set_device(opt.gpus[0])

        self.opt = opt
        self.print_options(opt)

        return self.opt
Пример #5
0
def main():
    parser = argparse.ArgumentParser(description='mcnn worldexp.')
    parser.add_argument('--preload', type=int, default=1)
    parser.add_argument('--data', type=str, default="/mnt/m2/mzcc/crowd_data/worldexpo", help='train, test, etc')
    args = parser.parse_args()
    method = 'mcnn'
    dataset_name = 'worldexpo'
    output_dir = './saved_models/'

    data_path = args.data
    train_path = data_path+'/train_frame'
    train_gt_path = data_path+'/train_dmap'
    train_mask_path = os.path.join(data_path,'train_roi')
    val_path = data_path+'/test_frame'
    val_gt_path = data_path+'/test_dmap'
    val_mask_path = os.path.join(data_path, 'test_roi')

    #training configuration
    start_step = 0
    end_step = 3000
    lr = 0.000001
    momentum = 0.9
    disp_interval = 500
    log_interval = 250


    #Tensorboard  config
    use_tensorboard = False
    save_exp_name = method + '_' + dataset_name + '_' + 'v1'
    remove_all_log = False   # remove all historical experiments in TensorBoard
    exp_name = None # the previous experiment name in TensorBoard

    # ------------
    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)


    # load net
    net = CrowdCounter()
    network.weights_normal_init(net, dev=0.01)
    # network.weights_xavier_init(net, gain=0.01)
    net.cuda()
    net.train()

    params = list(net.parameters())
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # tensorboad
    use_tensorboard = use_tensorboard and CrayonClient is not None
    if use_tensorboard:
        cc = CrayonClient(hostname='127.0.0.1')
        if remove_all_log:
            cc.remove_all_experiments()
        if exp_name is None:
            exp_name = save_exp_name
            exp = cc.create_experiment(exp_name)
        else:
            exp = cc.open_experiment(exp_name)

    # training
    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()

    data_loader = ExrImageDataLoader(train_path, train_gt_path, mask_path=train_mask_path,
                                     shuffle=True, gt_downsample=True, pre_load=args.preload)
    data_loader_val = ExrImageDataLoader(val_path, val_gt_path, mask_path=val_mask_path,
                                         shuffle=False, gt_downsample=True, pre_load=False)
    best_mae = 10000000

    for epoch in range(start_step, end_step+1):
        step = -1
        train_loss = 0
        for blob in data_loader:
            step = step + 1
            im_data = blob['data']
            gt_data = blob['gt_density']
            mask = blob['mask']
            density_map = net(im_data, gt_data, mask=mask)
            loss = net.loss
            train_loss += loss.item()#.data[0]
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % disp_interval == 0:
                print("current loss: {}".format(loss.item()))
                duration = t.toc(average=False)
                fps = step_cnt / duration
                gt_count = np.sum(gt_data)
                density_map = density_map.data.cpu().numpy()
                et_count = np.sum(density_map)
                utils.save_results(im_data,gt_data,density_map, output_dir)
                log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (epoch,
                    step, 1./fps, gt_count,et_count)
                log_print(log_text, color='green', attrs=['bold'])
                re_cnt = True


            if re_cnt:
                t.tic()
                re_cnt = False

        if (epoch % 2 == 0):
            save_name = os.path.join(output_dir, '{}_{}_{}.h5'.format(method,dataset_name,epoch))
            network.save_net(save_name, net)
            #calculate error on the validation dataset
            mae,mse = evaluate_model(save_name, data_loader_val)
            if mae < best_mae:
                best_mae = mae
                best_mse = mse
                best_model = '{}_{}_{}.h5'.format(method,dataset_name,epoch)
            log_text = 'EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (epoch,mae,mse)
            log_print(log_text, color='green', attrs=['bold'])
            log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (best_mae,best_mse, best_model)
            log_print(log_text, color='green', attrs=['bold'])
            if use_tensorboard:
                exp.add_scalar_value('MAE', mae, step=epoch)
                exp.add_scalar_value('MSE', mse, step=epoch)
                exp.add_scalar_value('train_loss', train_loss/data_loader.get_num_samples(), step=epoch)
Пример #6
0
def main():
    client = CrayonClient()
    client.remove_all_experiments()