コード例 #1
0
 def check_remain_time(self):
     if self.fold == 0 and self.epoch == 1:
         self.start_time = time.time()
     if self.fold == 0 and self.epoch == 2:
         util.writelog(
             '-> pre epoch cost time : ' +
             str(round(time.time() - self.start_time, 2)) + 's', self.opt,
             True, True)
     if (self.fold == 0 and self.epoch > 1) or self.fold != 0:
         v = (self.fold * self.opt.epochs + self.epoch -
              1) / (time.time() - self.start_time)
         remain = (self.opt.k_fold * self.opt.epochs -
                   (self.fold * self.opt.epochs + self.epoch)) / v
         self.opt.TBGlobalWriter.add_scalar(
             'RemainTime', remain / 3600,
             self.fold * self.opt.epochs + self.epoch)
コード例 #2
0
    def network_init(self, printflag=False):

        self.net = creatnet.creatnet(self.opt)
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          lr=self.opt.lr)
        self.criterion_classifier = nn.CrossEntropyLoss(self.opt.weight)
        self.criterion_autoencoder = nn.MSELoss()
        self.epoch = 1
        self.plot_result = {'train': [], 'eval': [], 'F1': []}
        self.confusion_mats = []
        self.test_flag = True

        if printflag:
            util.writelog('network:\n' + str(self.net), self.opt, True)
            show_paramsnumber(self.net, self.opt)

        if self.opt.pretrained != '':
            self.net.load_state_dict(torch.load(self.opt.pretrained))
        if self.opt.continue_train:
            self.net.load_state_dict(
                torch.load(os.path.join(self.opt.save_dir, 'last.pth')))
        if self.opt.gpu_id != -1:
            self.net.cuda()
コード例 #3
0
ファイル: statistics.py プロジェクト: HypoX64/candock
def statistics(mat, opt, logname, heatmapname):
    util.writelog(
        '------------------------------ ' + logname +
        ' result ------------------------------', opt, True)
    util.writelog(
        logname + ' -> macro-prec,reca,F1,err,kappa: ' + str(report(mat)), opt,
        True, True)
    util.writelog('confusion_mat:\n' + str(mat) + '\n', opt, True, False)
    plot.draw_heatmap(mat, opt, name=heatmapname)
コード例 #4
0
opt.parser.add_argument('--quality',
                        type=int,
                        default=45,
                        help='minimal quality')
opt.parser.add_argument('--outsize', type=int, default=286, help='')
opt.parser.add_argument('--startcnt', type=int, default=0, help='')
opt.parser.add_argument('--minsize',
                        type=int,
                        default=96,
                        help='minimal roi size')
opt.parser.add_argument('--no_sclectscene', action='store_true', help='')
opt = opt.getparse()

util.makedirs(opt.savedir)
util.writelog(
    os.path.join(opt.savedir, 'opt.txt'),
    str(time.asctime(time.localtime(time.time()))) + '\n' + util.opt2str(opt))

videopaths = util.Traversal(opt.datadir)
videopaths = util.is_videos(videopaths)
random.shuffle(videopaths)

# def network
net = loadmodel.bisenet(opt, 'roi')

result_cnt = opt.startcnt
video_cnt = 1
starttime = datetime.datetime.now()
for videopath in videopaths:
    try:
        if opt.no_sclectscene:
コード例 #5
0
ファイル: train.py プロジェクト: wliang410/DeepMosaics
opt.parser.add_argument('--norm',type=str,default='instance', help='')

opt.parser.add_argument('--maxiter',type=int,default=10000000, help='')
opt.parser.add_argument('--savefreq',type=int,default=10000, help='')
opt.parser.add_argument('--startiter',type=int,default=0, help='')
opt.parser.add_argument('--continuetrain', action='store_true', help='')
opt.parser.add_argument('--savename',type=str,default='MosaicNet', help='')


'''
--------------------------Init--------------------------
'''
opt = opt.getparse()
dir_checkpoint = os.path.join('checkpoints/',opt.savename)
util.makedirs(dir_checkpoint)
util.writelog(os.path.join(dir_checkpoint,'loss.txt'), 
              str(time.asctime(time.localtime(time.time())))+'\n'+util.opt2str(opt))

N = opt.N
loss_sum = [0.,0.,0.,0.]
loss_plot = [[],[]]
item_plot = []

videos = os.listdir('./dataset')
videos.sort()
lengths = []
print('check dataset...')
for video in videos:
    video_images = os.listdir('./dataset/'+video+'/ori')
    lengths.append(len(video_images))
if opt.hd:
    netG = videoHD_model.MosaicNet(3*N+1, 3, norm=opt.norm)
コード例 #6
0
def show_paramsnumber(net, opt):
    parameters = sum(param.numel() for param in net.parameters())
    parameters = round(parameters / 1e6, 2)
    util.writelog('net parameters: ' + str(parameters) + 'M', opt, True)
コード例 #7
0
def train(opt):
    core.network_init(printflag=True)

    categorys = os.listdir(opt.rec_tmp)
    categorys.sort()
    print('categorys:', categorys)
    category_num = len(categorys)

    received_signals = []
    received_labels = []

    sample_num = 1000
    for i in range(category_num):
        samples = os.listdir(os.path.join(opt.rec_tmp, categorys[i]))
        random.shuffle(samples)
        for j in range(len(samples)):
            txt = util.loadtxt(
                os.path.join(opt.rec_tmp, categorys[i], samples[j]))
            #print(os.path.join('./datasets/server/data',categorys[i],sample))
            txt_split = txt.split()
            signal_ori = np.zeros(len(txt_split))
            for point in range(len(txt_split)):
                signal_ori[point] = float(txt_split[point])

            for x in range(sample_num // len(samples)):
                ran = random.randint(1000, len(signal_ori) - 2000 - 1)
                this_signal = signal_ori[ran:ran + 2000]
                this_signal = arr.normliaze(this_signal, '5_95', truncated=4)

                received_signals.append(this_signal)
                received_labels.append(i)

    received_signals = np.array(received_signals).reshape(
        -1, opt.input_nc, opt.loadsize)
    received_labels = np.array(received_labels).reshape(-1, 1)
    received_signals_train,received_labels_train,received_signals_eval,received_labels_eval=\
    dataloader.segment_dataset(received_signals, received_labels, 0.8,random=False)
    print(received_signals_train.shape, received_signals_eval.shape)
    '''merge data'''
    signals_train, labels_train = dataloader.del_labels(
        ori_signals_train, ori_labels_train,
        np.linspace(0, category_num - 1, category_num, dtype=np.int64))
    signals_eval, labels_eval = dataloader.del_labels(
        ori_signals_eval, ori_labels_eval,
        np.linspace(0, category_num - 1, category_num, dtype=np.int64))

    signals_train = np.concatenate((signals_train, received_signals_train))
    labels_train = np.concatenate((labels_train, received_labels_train))
    signals_eval = np.concatenate((signals_eval, received_signals_eval))
    labels_eval = np.concatenate((labels_eval, received_labels_eval))

    label_cnt, label_cnt_per, label_num = statistics.label_statistics(
        labels_train)
    opt = options.get_auto_options(opt, label_cnt_per, label_num,
                                   signals_train)
    train_sequences = np.linspace(0,
                                  len(labels_train) - 1,
                                  len(labels_train),
                                  dtype=np.int64)
    eval_sequences = np.linspace(0,
                                 len(labels_eval) - 1,
                                 len(labels_eval),
                                 dtype=np.int64)

    for epoch in range(opt.epochs):
        t1 = time.time()

        core.train(signals_train, labels_train, train_sequences)
        core.eval(signals_eval, labels_eval, eval_sequences)

        t2 = time.time()
        if epoch + 1 == 1:
            util.writelog(
                '>>> per epoch cost time:' + str(round((t2 - t1), 2)) + 's',
                opt, True)
    plot.draw_heatmap(core.confusion_mats[-1], opt, name='final')
    core.save_traced_net()
コード例 #8
0
opt.parser.add_argument('--maxload',type=int,default=1000000, help='')
opt.parser.add_argument('--continuetrain', action='store_true', help='')
opt.parser.add_argument('--startepoch',type=int,default=0, help='')
opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='')
opt.parser.add_argument('--savename',type=str,default='face', help='')


'''
--------------------------Init--------------------------
'''
opt = opt.getparse()
dir_img = os.path.join(opt.dataset,'origin_image')
dir_mask = os.path.join(opt.dataset,'mask')
dir_checkpoint = os.path.join('checkpoints/',opt.savename)
util.makedirs(dir_checkpoint)
util.writelog(os.path.join(dir_checkpoint,'loss.txt'), 
              str(time.asctime(time.localtime(time.time())))+'\n'+util.opt2str(opt))
torch.cuda.set_device(opt.gpu_id)

def Totensor(img,use_gpu=True):
    size=img.shape[0]
    img = torch.from_numpy(img).float()
    if opt.use_gpu:
        img = img.cuda()
    return img

def loadimage(imagepaths,maskpaths,opt,test_flag = False):
    batchsize = len(imagepaths)
    images = np.zeros((batchsize,3,opt.finesize,opt.finesize), dtype=np.float32)
    masks = np.zeros((batchsize,1,opt.finesize,opt.finesize), dtype=np.float32)
    for i in range(len(imagepaths)):
        img = impro.resize(impro.imread(imagepaths[i]),opt.loadsize)
コード例 #9
0
ファイル: train.py プロジェクト: HypoX64/candock
# 1.type:numpydata   signals:np.float32   labels:np.int64
# 2.shape  signals:[num,ch,length]    labels:[num]
# num:samples_num, ch :channel_num,  length:length of each sample
# for example:
signals = np.zeros((10,1,10),dtype='np.float64')
labels = np.array([0,0,0,0,0,1,1,1,1,1])      #0->class0    1->class1
* step2: input  ```--dataset_dir your_dataset_dir``` when running code.
"""

#----------------------------Load Data----------------------------
t1 = time.time()
signals, labels = dataloader.loaddataset(opt)
if opt.gan:
    signals, labels = augmenter.dcgan(opt, signals, labels)
label_cnt, label_cnt_per, label_num = statistics.label_statistics(labels)
util.writelog('label statistics: ' + str(label_cnt), opt, True)
opt = options.get_auto_options(opt, signals, labels)
train_sequences, eval_sequences = transforms.k_fold_generator(
    len(labels), opt.k_fold, opt.fold_index)
t2 = time.time()
print('Cost time: %.2f' % (t2 - t1), 's')

core = core.Core(opt)
core.network_init(printflag=True)

print('Begin to train ...')
final_confusion_mat = np.zeros((opt.label, opt.label), dtype=int)
final_results = {}
for fold in range(opt.k_fold):
    if opt.k_fold != 1:
        util.writelog(
コード例 #10
0
opt.parser.add_argument('--fold', type=int, default=1, help='')
opt.parser.add_argument('--start', type=int, default=0, help='')
opt.parser.add_argument('--minsize',
                        type=int,
                        default=128,
                        help='when [square], minimal roi size')
opt.parser.add_argument('--quality',
                        type=int,
                        default=40,
                        help='when [square], minimal quality')

opt = opt.getparse()

util.makedirs(opt.savedir)
util.writelog(
    os.path.join(opt.savedir, 'opt.txt'),
    str(time.asctime(time.localtime(time.time()))) + '\n' + util.opt2str(opt))
opt.mod = (opt.mod).split(',')

#save dir
if opt.hd:
    train_A_path = os.path.join(opt.savedir, 'train_A')
    train_B_path = os.path.join(opt.savedir, 'train_B')
    util.makedirs(train_A_path)
    util.makedirs(train_B_path)
else:
    train_path = os.path.join(opt.savedir, 'train')
    util.makedirs(train_path)
if opt.savemask:
    mask_save_path = os.path.join(opt.savedir, 'mask')
    util.makedirs(mask_save_path)
コード例 #11
0
"""Use your own data to train
* step1: Generate signals.npy and labels.npy in the following format.
# 1.type:numpydata   signals:np.float64   labels:np.int64
# 2.shape  signals:[num,ch,length]    labels:[num]
# num:samples_num, ch :channel_num,  length:length of each sample
# for example:
signals = np.zeros((10,1,10),dtype='np.float64')
labels = np.array([0,0,0,0,0,1,1,1,1,1])      #0->class0    1->class1
* step2: input  ```--dataset_dir your_dataset_dir``` when running code.
"""

#----------------------------Load Data----------------------------
t1 = time.time()
signals, labels = dataloader.loaddataset(opt)
label_cnt, label_cnt_per, label_num = statistics.label_statistics(labels)
util.writelog('label statistics: ' + str(label_cnt), opt, True)
opt = options.get_auto_options(opt, label_cnt_per, label_num, signals)
train_sequences, eval_sequences = transformer.k_fold_generator(
    len(labels), opt.k_fold, opt.fold_index)
t2 = time.time()
print('Cost time: %.2f' % (t2 - t1), 's')

core = core.Core(opt)
core.network_init(printflag=True)

print('Begin to train ...')
fold_final_confusion_mat = np.zeros((opt.label, opt.label), dtype=int)
for fold in range(opt.k_fold):
    if opt.k_fold != 1:
        util.writelog(
            '------------------------------ k-fold:' + str(fold + 1) +
コード例 #12
0
ファイル: statistics.py プロジェクト: HypoX64/candock
def save_detail_results(opt, results):
    '''
    results:{
        0:{                                    #dict,index->fold
            'F1':[0.1,0.2...],                 #list,index->epoch
            'err':[0.9,0.8...],                #list,index->epoch
            'loss':[1.1,1.0...],               #list,index->epoch
            'confusion_mat':[
                [[1204  133  763  280]
                 [ 464  150  477  152]
                 [ 768   66 1276  308]
                 [ 159   23  293 2145]],
                 [[2505  251 1322  667]
                 [1010  283  834  353]
                 [1476  174 2448  766]
                 [ 376   46  446 4365]],
                 ......
            ],                                 #list,index->epoch
            'eval_detail':[                    #list,index->epoch
                {
                    'sequences':[],
                    'ture_labels':[],
                    'pre_labels':[]
                },
                {
                    'sequences':[],
                    'ture_labels':[],
                    'pre_labels':[]
                }
                ...
            ], 
            'best_epoch':0                     #int
        }
        1:{

        ...

        }
    }
    '''

    torch.save(results, os.path.join(opt.save_dir, 'results.pth'))
    util.writelog(
        'All eval results has saved. Read "./docs/how_to_load_results.md" before load it.',
        opt, True)

    # statistic by domain
    if os.path.isfile(os.path.join(opt.dataset_dir, 'domains.npy')):
        sequences = []
        ture_labels = []
        pre_labels = []
        for fold in results:
            sequences.append(results[fold]['eval_detail'][
                results[fold]['best_epoch']]['sequences'])
            ture_labels.append(results[fold]['eval_detail'][
                results[fold]['best_epoch']]['ture_labels'])
            pre_labels.append(results[fold]['eval_detail'][
                results[fold]['best_epoch']]['pre_labels'])

        sequences = np.array(flatten_list(sequences))
        ture_labels = np.array(flatten_list(ture_labels))
        pre_labels = np.array(flatten_list(pre_labels))

        if os.path.isfile(os.path.join(opt.dataset_dir, 'domains.npy')):
            domainUids = np.load(os.path.join(opt.dataset_dir, 'domains.npy'))
            domain_dict = {}
            for i in range(len(sequences)):
                Uid = str(domainUids[sequences[i]])
                if Uid not in domain_dict:
                    domain_dict[Uid] = {}
                    domain_dict[Uid]['ture'] = []
                    domain_dict[Uid]['pred'] = []

                domain_dict[Uid]['ture'].append(ture_labels[i])
                domain_dict[Uid]['pred'].append(pre_labels[i])

            domain_stat = []
            for Uid in domain_dict:
                domain_dict[Uid]['Acc'] = 1 - report(
                    predtrue2mat(domain_dict[Uid]['ture'],
                                 domain_dict[Uid]['pred'], opt.label))[3]
                domain_stat.append([int(Uid), domain_dict[Uid]['Acc']])
            domain_stat = np.array(domain_stat)
            domain_stat = domain_stat[np.argsort(domain_stat[:, 1])][::-1]
            domain_stat_txt = 'Domain,Acc(%)\n'
            for i in range(len(domain_stat)):
                domain_stat_txt += ('%03d' % domain_stat[i, 0] + ',' + '%.2f' %
                                    (100 * domain_stat[i, 1]) + '\n')
            util.savetxt(domain_stat_txt,
                         os.path.join(opt.save_dir, 'domain_statistic.csv'))
            opt.TBGlobalWriter.add_text('DomainStatistic',
                                        domain_stat_txt.replace('\n', '  \n'))