Пример #1
0
def getIpact(data,month):
    daofmo=calendar.monthrange(2018,month)[1]

    for key,value in data.items():
        if len(value):
            #创建活动记录保存目录
            savepath='/home/lzhang/SRTP_DNS/data_analysis/data/ipact/'+key
            mkdir.mkdir(savepath)
            print key
            
            for i in range(daofmo):
                #通过url抓取Ip活动记录
                try:
                    url ="http://211.65.197.210:8080/IPCIS/activityDatabase?IpSets=%s:32&TableName=2018-%02d-%02d&Mode=2" % (value[0], month, i)  
                    response = urllib2.urlopen(url, timeout=10)   
                    html = response.read()  
                    mystr = html.decode("utf8")  
                    response.close()
                    
                    if(mystr!="{}"):
                        #将查到的ip活动记录写入到文件夹
                        file = open(savepath+'/'+str(i)+'-'+str(month)+'.txt','w') 
                        file.write(mystr) 
                        file.close()
                except Exception as e:
                    print(key, i, time.asctime(time.localtime(time.time())))
                    pass
class log(object):
    # root logger setting
    mkdir(
        "/home/neuiva2/wangironman/SSD-change/pytorch-ssd-ad/experments/1_141_640_480_512baseline/logs/"
    )
    save_path = "/home/neuiva2/wangironman/SSD-change/pytorch-ssd-ad/experments/1_141_640_480_512baseline/logs/" + time.strftime(
        "%m_%d_%H_%M") + '.log'
    l = logging.getLogger()
    l.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

    # clear handler streams
    for it in l.handlers:
        l.removeHandler(it)

    # file handler setting
    config = cfg.RawConfigParser()
    config.read('util.config')
    save_dir = config.get('general', 'log_path')
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    save_path = os.path.join(save_dir, save_path)

    f_handler = logging.FileHandler(save_path)
    f_handler.setLevel(logging.DEBUG)
    f_handler.setFormatter(formatter)

    # console handler
    c_handler = logging.StreamHandler()
    c_handler.setLevel(logging.INFO)
    c_handler.setFormatter(formatter)

    l.addHandler(f_handler)
    l.addHandler(c_handler)
Пример #3
0
    def __init__(self, model_name: str):
        self.model_name = model_name
        self.checkpoint_dir = mkdir(f'./checkpoint/{model_name}/')

        self.log_dir = f'{self.checkpoint_dir}/log.log'
        self.state_dir = f'{self.checkpoint_dir}/state.tar'
        self.model_dir = f'{self.checkpoint_dir}/model.pth'
        self.anomaly_score_dir = f'{self.checkpoint_dir}/anomaly_score.npz'

        self.batch_list = []
        self.epoch_list = []
        self.train_loss_list_per_batch = []
        self.train_loss_list_per_epoch = []
        self.valid_loss_list = []
Пример #4
0
    def __init__(self, model_name, transfer_learning=False):

        self.model_name = model_name
        self.transfer_learning = transfer_learning

        self.checkpoint_dir = \
            mkdir('../checkpoint/AIFrenz_Season1/%s/'%model_name)

        self.log_dir = '%s/log.log' % self.checkpoint_dir
        self.state_dir = '%s/state.tar' % self.checkpoint_dir
        self.model_dir = '%s/model.pth' % self.checkpoint_dir

        self.tl_log_dir = '%s/tl_log.log' % self.checkpoint_dir
        self.tl_state_dir = '%s/tl_state.tar' % self.checkpoint_dir
        self.tl_model_dir = '%s/tl_learning.pth' % self.checkpoint_dir

        self.batch_list = []
        self.epoch_list = []
        self.train_loss_list_per_batch = []
        self.train_loss_list_per_epoch = []
        self.valid_loss_list = []
Пример #5
0
def train():
    net.train()
    # loss counters
    loc_loss = 0
    conf_loss = 0
    loc_loss_vis = 0  # epoch
    conf_loss_vis = 0
    seg_loss = 0
    seg_visible_loss = 0
    epoch = 0
    log.l.info('Loading Dataset...')

    dataset = DatasetSync(dataset=args.dataset, split='training')

    epoch_size = len(dataset) / args.batch_size
    log.l.info('Training SSD on {}'.format(dataset.name))
    step_index = 0
    batch_iterator = None

    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=False,
                                  collate_fn=detection_collate,
                                  pin_memory=True)

    lr = args.lr
    for iteration in range(start_iter, args.iterations + 1):
        if (not batch_iterator) or (iteration % epoch_size == 0):
            # create batch iterator
            batch_iterator = iter(data_loader)
        if iteration in stepvalues:
            step_index += 1
            lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                      iteration, epoch_size)

            # reset epoch loss counters
            loc_loss = 0
            conf_loss = 0
            loc_loss_vis = 0
            conf_loss_vis = 0
            seg_loss = 0
            seg_visible_loss = 0
            epoch += 1

        # load train data
        images, targets, targets_vis, seg_targets, seg_visible_targets = next(
            batch_iterator)

        if args.cuda:
            images = Variable(images.cuda())
            targets = [
                Variable(anno.cuda(), volatile=True) for anno in targets
            ]
            targets_vis = [
                Variable(anno.cuda(), volatile=True) for anno in targets_vis
            ]
            seg_targets = Variable(seg_targets.cuda())
            seg_visible_targets = Variable(seg_visible_targets.cuda())
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
            targets_vis = [
                Variable(anno, volatile=True) for anno in targets_vis
            ]
            seg_targets = Variable(seg_targets)
            seg_visible_targets = Variable(seg_visible_targets)
        # forward
        t0 = time.time()
        out, out_vis = net(images)

        # backprop
        optimizer.zero_grad()
        loss_l, loss_c, loss_l_vis, loss_c_vis, loss_seg, loss_seg_visible = criterion(
            out, out_vis, targets, targets_vis, seg_targets,
            seg_visible_targets)
        alpha = 4
        loss_all = loss_l + loss_c
        loss_vis = loss_l_vis + loss_c_vis
        loss = loss_all + loss_vis + alpha * loss_seg + alpha * loss_seg_visible

        loss.backward()
        optimizer.step()
        t1 = time.time()

        loc_loss += loss_l.item()
        conf_loss += loss_c.item()
        loc_loss_vis += loss_l_vis.item()
        conf_loss_vis += loss_c_vis.item()
        seg_loss += loss_seg.item()
        seg_visible_loss += loss_seg_visible.item()

        if iteration % 10 == 0:
            print(iteration, loss.item())
            log.l.info('''
                Timer: {:.3f} sec.\t LR: {}.\t Iter: {}.\t Loss: {:.4f}.\t Loss_a: {:.3f}.\t Loss_v: {:.3f}.\t Loss_seg:{:.3f}.\t Loss_seg_visible:{:.3f}.
                '''.format((t1 - t0), lr, iteration, loss.item(),
                           loss_all.item(), loss_vis.item(),
                           alpha * loss_seg.item(),
                           alpha * loss_seg_visible.item()))

        if iteration % 5000 == 0:
            log.l.info('Saving state, iter: {}'.format(iteration))
            mkdir("output4/")
            torch.save(ssd_net.state_dict(),
                       'output4/ssd640' + '_0712_' + repr(iteration) + '.pth')

    torch.save(ssd_net.state_dict(), 'output4/ssd640' + '.pth')
Пример #6
0
    T1cVol = T1cVol * T1cmask
    T1cVol = T1cVol / std

    # Read mask file
    maskImage = sitk.ReadImage(maskFile[0])
    maskVol = sitk.GetArrayFromImage(maskImage).astype(float)
    # print(np.unique(maskVol))

    maskVol = np.where(maskVol == 3, 0, maskVol)
    maskVol = np.where(maskVol == 4, 3, maskVol)

    # print(np.unique(maskVol))

    # Padding and cut image
    maskVol = maskVol[cut_slice:, crop_h1: (maskVol.shape[1] - crop_h2), crop_w1:(maskVol.shape[2] - crop_w2)]
    imageVol = np.concatenate((np.expand_dims(T1Vol, axis=0), np.expand_dims(T2Vol, axis=0), np.expand_dims(FLAIRVol, axis=0), np.expand_dims(T1cVol, axis=0)), axis=0)

    mkdir('data/BraTS18')
    np.save('data/BraTS18' + '/img_%s.npy' % (str(Patient_dir[nb_file].split('Brats18_')[-1])), imageVol)
    np.save('data/BraTS18' + '/mask_%s.npy' % (str(Patient_dir[nb_file].split('Brats18_')[-1])), maskVol)

    print('BraTS2018/HGG Image process {}/{} finished'.format(nb_file, len(Patient_dir)))


print('finished')

mean_all /= 210
std_all /= 210

print(mean_all, std_all)