Beispiel #1
0
 def __init__(self, env, *args, **kwargs):
     self.name = 'BanditAlgorithm Abstract'
     self.env = env
     self.k = env.get_k()
     self.vals = np.zeros(self.k)
     self.ntimes = np.zeros(self.k)
     self.history = History()
Beispiel #2
0
class BanditAlgorithm(object):
    def __init__(self, env, *args, **kwargs):
        self.name = 'BanditAlgorithm Abstract'
        self.env = env
        self.k = env.get_k()
        self.vals = np.zeros(self.k)
        self.ntimes = np.zeros(self.k)
        self.history = History()

    def reset(self):
        self.vals = np.zeros(self.k)
        self.ntimes = np.zeros(self.k)

    def store_params(self, prop, episode, val):
        self.history.store(prop, episode, val)

    def _update(self, *args, **kwargs):
        raise NotImplementedError()

    def _run(self, policy, i_episode):
        raise NotImplementedError()

    def run_episodes(self, policy, n_episodes=2000, n_steps=1000):
        self.history.clear()
        self.policy = policy
        self.n_episodes = n_episodes
        self.n_steps = n_steps

        for i_episode in range(n_episodes):
            self.reset()
            self._run(policy, i_episode)

    def report(self):
        raise NotImplementedError()
Beispiel #3
0
 def __init__(self):
     self.config = Config()
     self.cookies = login()
     logger.info(self.cookies)
     self.base_path = os.path.join(
         os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
         self.config.config['output']['path'])
     self.live_infos = Infos()
     self.display = Display()
     self.decoder = Decoder()
     self.uploader = Upload()
     logger.info('基路径:%s' % (self.base_path))
     self.load_room_info()
     self.get_live_url()
     self.history = History()
     logger.info('初始化完成')
from utils.data_loader import DataLoader
from deepNetworks.netArch import DeepNetArch1, DeepNetArch2, DeepNetArch3, DeepNetArch1L1, DeepNetArch2L1, \
    DeepNetArch3L1
from keras import backend as k
from utils.history import History

if __name__ == '__main__':
    logs_dir = 'DeepNetArch1-Div'
    model_type = 'DeepNetArch1'
    sl = 100
    ds_rate = 2
    early_stopping = True
    downsample = False
    bmode = True
    subdir = '/bmode/'
    model_history = History(logs_dir)
    opt_params, opt_model_uid = model_history.find_opt_model(auc=False,
                                                             loss=False,
                                                             acc=True)

    if downsample:
        ds = DataLoader(sl=sl, downsample=True, downsample_rate=ds_rate)
        sl = int(sl / ds_rate)
    else:
        ds = DataLoader(sl=sl)

    if model_type == 'DeepNetArch1':
        arch = DeepNetArch1(sl=sl,
                            initial_lr=float(opt_params['initial_lr']),
                            l2_reg=float(opt_params['l2_regulizer']),
                            dropout=float(opt_params['dropout']),
Beispiel #5
0
class Live():
    def __init__(self):
        self.config = Config()
        self.cookies = login()
        logger.info(self.cookies)
        self.base_path = os.path.join(
            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
            self.config.config['output']['path'])
        self.live_infos = Infos()
        self.display = Display()
        self.decoder = Decoder()
        self.uploader = Upload()
        logger.info('基路径:%s' % (self.base_path))
        self.load_room_info()
        self.get_live_url()
        self.history = History()
        logger.info('初始化完成')

    def create_duration(self, start_time, end_time):
        t = datetime.datetime.now()
        tt = t.strftime('%Y%m%d %H%M%S')
        if start_time == end_time == '0':
            return '0'
        tmp = datetime.datetime.strptime(
            tt.split(' ')[0] + ' %s' % start_time, '%Y%m%d %H%M%S')
        if t > tmp:
            base_time1 = tt.split(' ')[0]
            base_time2 = (t + datetime.timedelta(days=1)
                          ).strftime('%Y%m%d %H%M%S').split(' ')[0]
        else:
            base_time1 = (t - datetime.timedelta(days=1)
                          ).strftime('%Y%m%d %H%M%S').split(' ')[0]
            base_time2 = tt.split(' ')[0]
        if start_time > end_time:
            start_time = '%s %s' % (base_time1, start_time)
            end_time = '%s %s' % (base_time2, end_time)
        else:
            start_time = '%s %s' % (tt.split(' ')[0], start_time)
            end_time = '%s %s' % (tt.split(' ')[0], end_time)
        return '%s-%s' % (start_time, end_time)

    def check_live(self, key):
        duration = self.live_infos.get(key)['duration']
        if duration == '0':
            return True
        lst = duration.split('-')
        now_time = datetime.datetime.now()
        if len(lst) == 2:
            start_time = datetime.datetime.strptime(lst[0], '%Y%m%d %H%M%S')
            end_time = datetime.datetime.strptime(lst[1], '%Y%m%d %H%M%S')
            if now_time > start_time and now_time < end_time:
                return True
            else:
                # logger.debug('%s[RoomID:%s]不在直播时间段' % (self.live_infos.get(key)['uname'], key))
                return False
        else:
            return False

    def load_room_info(self):
        live_infos = self.live_infos.copy()
        for lst in self.config.config['live']['room_info']:
            if lst[0] not in live_infos:
                live_info = {}
                live_info['record_start_time'] = ''
                live_info['queue_status'] = 0
                live_info['recording'] = 0
                live_info['finish_time'] = ''
            else:
                live_info = live_infos[lst[0]]
            live_info['need_rec'] = lst[1]
            live_info['need_mask'] = lst[2]
            live_info['maxsecond'] = lst[3]
            live_info['need_upload'] = lst[4]
            live_info['duration'] = self.create_duration(lst[5], lst[6])
            live_info['cookies'] = self.cookies
            live_info['base_path'] = self.base_path
            self.live_infos.update(lst[0], live_info)

    def load_realtime(self):
        '''
        实时加载配置,更新房间信息
        '''
        self.config.load_cfg()
        # logger.info(self.config.config)
        room_lst = [i[0] for i in self.config.config['live']['room_info']]
        del_lst = []
        for key in self.live_infos.copy():
            if key not in room_lst:
                del_lst.append(key)
        for key in del_lst:
            self.live_infos.delete(key)
        self.load_room_info()

    def judge_in(self, key):
        room_lst = [i[0] for i in self.config.config['live']['room_info']]
        if key not in room_lst:
            return False
        return True

    def judge_download(self, key):
        if not self.judge_in(key):
            return False
        live_info = self.live_infos.copy()[key]
        if 'live_status' not in live_info:
            return False
        if live_info['live_status'] != 1:
            live_info['recording'] = 0
            self.live_infos.update(key, live_info)
            return False
        elif not self.check_live(key) and live_info['live_status'] == 1:
            live_info['recording'] = 2
            self.live_infos.update(key, live_info)
            return False
        elif self.check_live(key) and live_info[
                'live_status'] == 1 and live_info['need_rec'] == '0':
            live_info['recording'] = 3
            self.live_infos.update(key, live_info)
            return False
        elif live_info['live_status'] == 1 and live_info['need_rec'] == '1':
            live_info['recording'] = 1
            self.live_infos.update(key, live_info)
            return True
        else:
            logger.warning('%s[RoomID:%s]进入了未知的分支呢' %
                           (live_info['uname'], key))
            live_info['recording'] = 0
            self.live_infos.update(key, live_info)
            return False

    def get_live_url(self):
        '''
        获取所有监听直播间的信息
        '''
        room_lst = [i[0] for i in self.config.config['live']['room_info']]
        for id in room_lst:
            info = None
            while info is None:
                try:
                    r = requests.get(
                        'https://api.live.bilibili.com/xlive/web-room/v1/index/getInfoByRoom?room_id=%s'
                        % id)
                    info = json.loads(r.text)['data']
                    time.sleep(0.3)
                except:
                    logger.error('[RoomID:%s]获取信息失败,重新尝试' % (id))
                    continue
            try:
                live_info = self.live_infos.copy()[id]
            except:
                continue
            live_info['room_id'] = id
            live_info['real_id'] = info['room_info']['room_id']
            try:
                if live_info['live_status'] != 1 and info['room_info'][
                        'live_status'] == 1:
                    logger.info('%s[RoomID:%s]开播了' % (live_info['uname'], id))
                    self.history.add_info(id, 'live_status', '开始直播啦')
                    toaster = ToastNotifier()
                    toaster.show_toast("开播通知",
                                       '%s[RoomID:%s]开播了' %
                                       (live_info['uname'], id),
                                       icon_path=None,
                                       duration=3)
            except:
                pass
            try:
                live_info['live_status'] = info['room_info']['live_status']
                if info['room_info']['lock_status'] == 1:
                    live_info['live_status'] = 4
                live_info['uid'] = info['room_info']['uid']
                live_info['uname'] = info['anchor_info']['base_info']['uname']
                live_info['save_name'] = '%s_%s.flv' % (
                    live_info['uname'],
                    time.strftime("%Y%m%d%H%M%S", time.localtime()))
                live_info['title'] = info['room_info']['title']
                live_info['live_start_time'] = info['room_info'][
                    'live_start_time']
                self.live_infos.update(id, live_info)
                # logger.debug('%s[RoomID:%s]直播状态\t%s' % (live_info['uname'], id, live_info['live_status']))
            except Exception as e:
                logger.critical(e)
                logger.error('[RoomID:%s]房间信息更新失败' % (id))
                logger.error(info)
        # logger.info(self.live_infos.copy())

    def get_stream(self, key):
        '''
        获取直播流
        :param key: 房间显示id
        :return: stream
        '''
        if not self.judge_in(key):
            return None
        live_info = self.live_infos.copy()[key]
        logger.info('%s[RoomID:%s]获取直播流' % (live_info['uname'], key))
        session = streamlink.Streamlink()
        session.set_option("http-cookies", self.cookies)
        session.set_option("http-headers", headers)
        streams = None
        retry = 0
        while streams is None:
            try:
                if retry < 3:
                    streams = session.streams('https://live.bilibili.com/%s' %
                                              key)
                else:
                    logger.warning('%s[RoomID:%s]获取直播流失败,重试次数已达上限' %
                                   (live_info['uname'], key))
                    return None
            except:
                logger.warning('%s[RoomID:%s]获取直播流失败,正在重试' %
                               (live_info['uname'], key))
                retry += 1
                time.sleep(1)
        if streams == {}:
            logger.error('%s[RoomID:%s]未获取到直播流,可能是下播或者网络问题' %
                         (live_info['uname'], key))
            return None
        if 'best' in streams:
            logger.info('%s[RoomID:%s]获取到best直播流' % (live_info['uname'], key))
            return streams['best']
        elif 'source' in streams:
            logger.info('%s[RoomID:%s]获取到source直播流' %
                        (live_info['uname'], key))
            return streams['source']
        elif 'worst' in streams:
            logger.info('%s[RoomID:%s]获取到worst直播流' % (live_info['uname'], key))
            return streams['worst']
        else:
            logger.info('%s[RoomID:%s]未获取到直播流' % (live_info['uname'], key))
            return None

    def unlive(self, key, unlived):
        if not self.judge_in(key):
            return None
        live_info = self.live_infos.copy()[key]
        logger.info('%s[RoomID:%s]似乎下播了' % (live_info['uname'], key))
        self.history.add_info(key, 'live_status', '下播了')
        live_info['recording'] = 0
        logger.info('%s[RoomID:%s]录制结束,录制了%.2f分钟' %
                    (live_info['uname'], key,
                     (datetime.datetime.now() - datetime.datetime.strptime(
                         live_info['record_start_time'],
                         '%Y-%m-%d %H:%M:%S')).total_seconds() / 60.0))
        live_info['record_start_time'] = ''
        if unlived:
            logger.info('%s[RoomID:%s]确认下播,加入转码上传队列' %
                        (live_info['uname'], key))
            self.decoder.enqueue(key)
        self.live_infos.update(key, live_info)

    def download_live(self, key):
        try:
            if not self.judge_in(key):
                return None
            save_path = os.path.join(self.base_path,
                                     self.live_infos.get(key)['uname'],
                                     'recording')
            logger.info('%s[RoomID:%s]准备下载直播流,保存在%s' %
                        (self.live_infos.get(key)['uname'], key, save_path))
            live_info = self.live_infos.get(key)
            live_info['recording'] = 1
            if not os.path.exists(save_path):
                os.makedirs(save_path)

            stream = self.get_stream(key)
            if stream is None:
                logger.error('%s[RoomID:%s]获取直播流失败' %
                             (self.live_infos.get(key)['uname'], key))
                live_info['record_start_time'] = ''
                live_info['recording'] = 0
                self.live_infos.update(key, live_info)
                return
            filename = os.path.join(save_path,
                                    self.live_infos.get(key)['save_name'])
            live_info['record_start_time'] = datetime.datetime.now().strftime(
                '%Y-%m-%d %H:%M:%S')
            self.live_infos.update(key, live_info)
            try:
                fd = stream.open()
            except Exception as e:
                self.live_infos.update(key, live_info)
                self.unlive(key, unlived=False)
                logger.critical('%s[RoomID:%s]fd open error' %
                                (self.live_infos.get(key)['uname'], key))
                logger.error(e)
                return
            with open(filename, 'wb') as f:
                while self.judge_download(key) and self.check_live(key):
                    try:
                        data = fd.read(1024 * 8)
                        if len(data) > 0:
                            f.write(data)
                        else:
                            fd.close()
                            logger.warning(
                                '%s[RoomID:%s]直播流断开,尝试重连' %
                                (self.live_infos.get(key)['uname'], key))
                            stream = self.get_stream(key)
                            if stream is None:
                                logger.warning(
                                    '%s[RoomID:%s]重连失败' %
                                    (self.live_infos.get(key)['uname'], key))
                                self.live_infos.update(key, live_info)
                                self.unlive(key, True)
                                return
                            else:
                                logger.info(
                                    '%s[RoomID:%s]重连成功' %
                                    (self.live_infos.get(key)['uname'], key))
                                fd = stream.open()
                    except Exception as e:
                        fd.close()
                        self.live_infos.update(key, live_info)
                        self.unlive(key, unlived=False)
                        logger.critical(
                            '%s[RoomID:%s]遇到了什么问题' %
                            (self.live_infos.get(key)['uname'], key))
                        logger.error(e)
                        return
            fd.close()
            self.live_infos.update(key, live_info)
            self.unlive(key, True)
        except Exception as e:
            logger.critical(e)

    def start(self):
        threading.Thread(target=self.display.run).start()
        threading.Thread(target=self.decoder.run).start()
        threading.Thread(target=self.uploader.run).start()
        threading.Thread(target=self.history.heartbeat).start()
        while True:
            try:
                self.load_realtime()
                self.get_live_url()
                time.sleep(1)
                live_infos = self.live_infos.copy()
                for key in live_infos:
                    if live_infos[key][
                            'recording'] != 1 and self.judge_download(key):
                        threading.Thread(target=self.download_live,
                                         args=[
                                             key,
                                         ]).start()
                    time.sleep(0.2)
            except Exception as e:
                logger.critical(e)
                continue
Beispiel #6
0
def run(args):

    set_seed(args.seed)

    set_logging(ROOT_DIR, args)
    import pprint
    logging.info(
        pprint.pformat(vars(args)) if not isinstance(args, dict) else pprint.
        pformat(args))

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    loader = Dev_Loader(is_divide_variance=args.is_divide_variance)

    train_loader = loader.train(batch_size=args.batch_size)
    val_loader = loader.val(batch_size=args.batch_size)

    # model = getattr(net_archs, args.net)(args).cuda()
    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class,
                             drop_rate=args.drop_rate,
                             decay=args.decay).cuda()

    if args.optimizer == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.init_lr,
                              momentum=0.9,
                              nesterov=True)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.init_lr,
                               weight_decay=args.l2)
    if args.lr_factor < 1.0:
        scheduler = ReduceLROnPlateau(optimizer,
                                      mode='max',
                                      verbose=True,
                                      factor=args.lr_factor,
                                      patience=args.lr_patience)

    train_hist = History(name='train')
    test_list = ['a', 'bc', 'abc']
    val_hist = dict()
    for d in test_list:
        val_hist[d] = History(name='val/{}'.format(d))

    if args.continue_run:
        ckpt_file = Reporter(exp=args.exp).select_last(
            args.ckpt_prefix[0:5]).selected_ckpt
        logging.info('continue training from {}'.format(ckpt_file))

        ckpt_dicts = torch.load(ckpt_file)

        model.load_state_dict(ckpt_dicts['model_state_dict'])
        model.cuda()

        optimizer.load_state_dict(ckpt_dicts['optimizer_state_dict'])

        start_epoch = ckpt_dicts['epoch'] + 1
    else:
        start_epoch = 1

    # checkpoint after new History, order matters
    ckpter = CheckPoint(model=model,
                        optimizer=optimizer,
                        path='{}/ckpt/{}'.format(ROOT_DIR, args.exp),
                        prefix=args.ckpt_prefix,
                        interval=1,
                        save_num=1)

    for epoch in range(start_epoch, args.run_epochs):

        train_mixup_all(train_loader,
                        model,
                        optimizer,
                        device,
                        mix_alpha=args.mix_alpha)

        train_hist.add(logs=eval_model(train_loader, model, device),
                       epoch=epoch)

        a_logs = eval_model(val_loader['a'], model, device)
        bc_logs = eval_model(val_loader['bc'], model, device)
        avg_loss = (a_logs['loss'] + bc_logs['loss']) / 2
        avg_acc = (a_logs['acc'] + bc_logs['acc']) / 2
        avg_logs = {'loss': avg_loss, 'acc': avg_acc}
        val_hist['a'].add(logs=a_logs, epoch=epoch)
        val_hist['bc'].add(logs=bc_logs, epoch=epoch)
        val_hist['abc'].add(logs=avg_logs, epoch=epoch)

        if args.lr_factor < 1.0:
            scheduler.step(val_hist['abc'].recent['acc'])

        # plotting
        if args.plot:
            train_hist.clc_plot()
            for d in test_list:
                val_hist[d].plot()

        # logging
        logging.info("Epoch{:04d},{:6},{}".format(epoch, train_hist.name,
                                                  str(train_hist.recent)))
        for d in test_list:
            logging.info("Epoch{:04d},{:6},{}".format(epoch, val_hist[d].name,
                                                      str(val_hist[d].recent)))

        ckpter.check_on(epoch=epoch,
                        monitor='acc',
                        loss_acc=val_hist['abc'].recent)

    # explicitly save last
    ckpter.save(epoch=args.run_epochs - 1,
                monitor='acc',
                loss_acc=val_hist['abc'].recent)
Beispiel #7
0
                         config=out_conf,
                         gauss_folder=g_folder,
                         highpass_folder=hp_folder)
val_set = VNET3Dataset(xmls_folder=xml_folder_val,
                       height=img_height,
                       img_folder=img_folder,
                       config=out_conf,
                       gauss_folder=g_folder,
                       highpass_folder=hp_folder)
img_shape = tuple(train_set[0][1].shape[1:3])
train_loader = DataLoader(dataset=train_set,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=1,
                          drop_last=True)
history = History(num_epochs)
print("Dataset loaded, with image shape of {:}".format(img_shape))
net = VNET(input_shape=img_shape,
           latent_dim=128,
           output_dim=output_size,
           in_channels=5)
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=learning_rate)

train(net, optimizer, criterion, val_set, device, num_epochs, train_loader,
      verbose_interval, history)
plot_history(history, "VNET3")

xml_folder = "../../data/test_set/*"
test_set = VNET3Dataset(xmls_folder=xml_folder,
                        height=img_height,
Beispiel #8
0
def run(args):

    set_seed(args.seed)
    exp_dir = '{}/ckpt/{}/'.format(ROOT_DIR, args.exp)
    if not os.path.exists(exp_dir):
        os.makedirs(exp_dir)

    if args.ckpt_prefix == 'auto':
        args.ckpt_prefix = get_next_run(exp_dir)

    # setup logging info
    log_file = '{}/{}.log'.format(exp_dir, args.ckpt_prefix)
    logger = get_logger(log_file)
    logger.info('\n' + pprint.pformat(vars(args)))

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    dataset_cls = DATA_REGISTRY.get(args.dataset)
    time_stretch_args = eval(args.time_stretch_args)
    transform = Compose([
        FakePitchShift(target_sr=args.sr,
                       pitch_shift_steps=eval(args.pitch_shift_steps)),
        RandomCropWav(target_sr=args.sr, crop_seconds=args.crop_seconds)
        if args.crop_seconds > 0 else None,
        TimeStretch(target_sr=args.sr, stretch_args=time_stretch_args)
        if time_stretch_args[0] > 0 else None
    ])
    train_set = dataset_cls(fold=args.fold,
                            split='train',
                            target_sr=args.sr,
                            transform=transform)
    train_loader = DataLoader(dataset=train_set,
                              batch_size=args.batch_size,
                              drop_last=True,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(dataset=dataset_cls(fold=args.fold,
                                                split='valid',
                                                target_sr=args.sr),
                            batch_size=32,
                            drop_last=False,
                            shuffle=False,
                            num_workers=4)

    model_cls = ARCH_REGISTRY.get(args.net)
    model = model_cls(**vars(args)).to(device)

    logger.info(model)

    for param in model.feat.parameters():
        param.requires_grad = False

    if args.optimizer == 'sgd':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                     model.parameters()),
                              lr=args.init_lr,
                              momentum=0.9,
                              nesterov=True)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                      model.parameters()),
                               lr=args.init_lr,
                               weight_decay=args.l2)
    elif args.optimizer == 'adamw':
        optimizer = optim.AdamW(filter(lambda p: p.requires_grad,
                                       model.parameters()),
                                lr=args.init_lr,
                                weight_decay=args.l2)

    lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.init_lr,
        steps_per_epoch=len(train_loader),
        epochs=args.run_epochs,
        pct_start=0.08,
        cycle_momentum=False)

    train_hist, val_hist = History(name='train'), History(name='val')

    # checkpoint after new History, order matters
    ckpter = CheckPoint(model=model,
                        optimizer=optimizer,
                        path='{}/ckpt/{}'.format(ROOT_DIR, args.exp),
                        prefix=args.ckpt_prefix,
                        interval=1,
                        save_num=1,
                        fake_save=False)

    criterion = LabelSmoothingLoss(smoothing=args.label_smoothing)
    if args.cpc_loss == 'info_nce':
        criterion_cpc = LabelSmoothingLoss(smoothing=args.cpc_label_smoothing)
    elif args.cpc_loss == 'bce':
        criterion_cpc = CPCBCEWithLogitsLabelSmoothingLoss(
            smoothing=args.cpc_label_smoothing, balanced=False)
    elif args.cpc_loss == 'balanced_bce':
        criterion_cpc = CPCBCEWithLogitsLabelSmoothingLoss(
            smoothing=args.cpc_label_smoothing, balanced=True)
    else:
        raise ValueError('cpc_loss not supported.')

    from torch.utils.tensorboard import SummaryWriter
    train_writer = SummaryWriter('{}/ckpt/{}/{}/{}'.format(
        ROOT_DIR, args.exp, args.ckpt_prefix, 'train'))
    valid_writer = SummaryWriter('{}/ckpt/{}/{}/{}'.format(
        ROOT_DIR, args.exp, args.ckpt_prefix, 'valid'))

    train_writer.add_text(tag="args", text_string=str(args))

    for epoch in range(1, args.run_epochs + 1):
        train_cls_log, train_cpc_log = train_model_cpc(
            train_loader,
            model,
            optimizer,
            criterion,
            criterion_cpc,
            device,
            lr_scheduler,
            train_writer,
            epoch,
            cpc_warm_up=args.cpc_warm_up)
        train_hist.add(logs=train_cls_log, epoch=epoch)
        train_hist.add_cpc(logs=train_cpc_log, epoch=epoch)

        valid_cls_log, valid_cpc_log = eval_model_cpc(
            val_loader,
            model,
            criterion,
            criterion_cpc,
            device,
            valid_writer,
            epoch,
            cpc_warm_up=args.cpc_warm_up)
        val_hist.add(logs=valid_cls_log, epoch=epoch)
        val_hist.add_cpc(logs=valid_cpc_log, epoch=epoch)

        train_writer.add_scalar("loss", train_hist.recent['loss'], epoch)
        train_writer.add_scalar("acc", train_hist.recent['acc'], epoch)
        valid_writer.add_scalar("loss", val_hist.recent['loss'], epoch)
        valid_writer.add_scalar("acc", val_hist.recent['acc'], epoch)
        train_writer.add_scalar("lr", get_lr(optimizer), epoch)

        # plotting
        if args.plot:
            train_hist.clc_plot()
            val_hist.plot()

        # logging
        logger.info("Epoch{:04d},{:6},{}".format(epoch, train_hist.name,
                                                 str(train_hist.recent)))
        logger.info("Epoch{:04d},{:6},{}".format(epoch, val_hist.name,
                                                 str(val_hist.recent)))

        ckpter.check_on(epoch=epoch, monitor='acc', loss_acc=val_hist.recent)

    # explicitly save last
    ckpter.save(epoch=args.run_epochs - 1,
                monitor='acc',
                loss_acc=val_hist.recent)
    train_writer.close()
    valid_writer.close()
Beispiel #9
0
import sys

module_root = '..'
sys.path.append(module_root)

from utils.history import History

if __name__ == '__main__':
    logs_dir = 'DeepNetArch1-Div'
    model_history = History(logs_dir)

    # model_history.plot_results(train=True, validation=False, params=False, model_visualization=False,
    #                            loss=True, acc=False, auc=False, min_acc=0.4)
    model_history.filtered_learning_curve(train=True,
                                          validation=False,
                                          params=False,
                                          loss=True,
                                          acc=False,
                                          auc=False)
Beispiel #10
0
def run(exp_name='mean_teacher',
        ckpt_prefix='Run01',
        device='3',
        rampup_epochs=80,
        run_epochs=1000,
        is_bn=True,
        lr=1e-3,
        teacher_weight=3,
        teacher_ema_alhpa=0.99,
        log_level='DEBUG',
        min_value=-0.2,
        max_value=0.2,
        probability=0.9,
        is_plot=False
        ):

    # setup logging and save kwargs
    kwargs = locals()
    log_file = '{}/ckpt/{}/{}.log'.format(ROOT_DIR, exp_name, ckpt_prefix)
    if not os.path.exists(os.path.dirname(log_file)):
        os.makedirs(os.path.dirname(log_file))
    logging.basicConfig(filename=log_file, level=getattr(logging, log_level.upper(), None))
    logging.info(str(kwargs))

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
    device = torch.device('cuda')

    uda_loader = UdaLoader(min_value=min_value, max_value=max_value, probability=probability)

    # load train
    src_loader, dst_double_loader = uda_loader.train(batch_size=128, shuffle=True)

    # load train for eval
    src_loader_eval, dst_loader_eval = uda_loader.train_for_eval(batch_size=128, shuffle=False)

    # load val
    val_loaders = uda_loader.val(batch_size=128, shuffle=False)

    # load model to cuda
    student_model = net_archs.BaseConv(filters=32, is_bn=is_bn, is_drop=True)
    student_model.to(device)

    teacher_model = net_archs.BaseConv(filters=32, is_bn=is_bn, is_drop=True)
    teacher_model.to(device)

    student = Student(student_model, lr=lr, teacher_weight=teacher_weight)
    teacher = Teacher(teacher_model).bind(student, teacher_alpha=teacher_ema_alhpa)

    train_hist = dict()
    train_hist['T/A'] = History(name='teacher/train/A')
    train_hist['T/b'] = History(name='teacher/train/b')
    train_hist['S/A'] = History(name='student/train/A')
    train_hist['S/b'] = History(name='student/train/b')

    val_hist = dict()
    val_hist['T/p'] = History(name='teacher/val/p')
    val_hist['T/b'] = History(name='teacher/val/b')
    val_hist['S/p'] = History(name='student/val/p')
    val_hist['S/b'] = History(name='student/val/b')

    # checkpoint after new History, order matters
    teacher_ckpter = CheckPoint(model=teacher.model, optimizer=None,
                                path='{}/ckpt/{}/teacher'.format(ROOT_DIR, exp_name),
                                prefix=ckpt_prefix, interval=1, save_num=1)
    teacher_ckpter.bind_histories([train_hist['T/A'], train_hist['T/b'], val_hist['T/p'], val_hist['T/b']])

    student_ckpter = CheckPoint(model=student.model, optimizer=student.optimizer,
                                path='{}/ckpt/{}/student'.format(ROOT_DIR, exp_name),
                                prefix=ckpt_prefix, interval=1, save_num=1)
    student_ckpter.bind_histories([train_hist['S/A'], train_hist['S/b'], val_hist['S/p'], val_hist['S/b']])

    # setup rampup
    rampup = RampUp(rampup_epochs=rampup_epochs)

    for epoch in range(1, run_epochs):

        ts_train(src_loader, dst_double_loader, student, teacher, device, rampup.get_weight())

        train_hist['T/A'].add(
            logs=eval_model(src_loader_eval, teacher.model, device),
            epoch=epoch
        )
        train_hist['T/b'].add(
            logs=eval_model(dst_loader_eval, teacher.model, device),
            epoch=epoch
        )
        val_hist['T/p'].add(
            logs=eval_model(val_loaders['p'], teacher.model, device),
            epoch=epoch
        )
        val_hist['T/b'].add(
            logs=eval_model(val_loaders['b'], teacher.model, device),
            epoch=epoch
        )

        train_hist['S/A'].add(
            logs=eval_model(src_loader_eval, student.model, device),
            epoch=epoch
        )
        train_hist['S/b'].add(
            logs=eval_model(dst_loader_eval, student.model, device),
            epoch=epoch
        )

        val_hist['S/p'].add(
            logs=eval_model(val_loaders['p'], student.model, device),
            epoch=epoch
        )
        val_hist['S/b'].add(
            logs=eval_model(val_loaders['b'], student.model, device),
            epoch=epoch
        )

        train_hist['T/A'].clear()

        for key in train_hist.keys():
            train_hist[key].plot() if is_plot else None
            logging.info("Epoch{:04d},{:15},{}".format(epoch, train_hist[key].name, str(train_hist[key].recent)))

        for key in val_hist.keys():
            val_hist[key].plot() if is_plot else None
            logging.info("Epoch{:04d},{:15},{}".format(epoch, val_hist[key].name, str(val_hist[key].recent)))

        teacher_ckpter.check_on(epoch=epoch, monitor='acc', loss_acc=val_hist['T/b'].recent)
        student_ckpter.check_on(epoch=epoch, monitor='acc', loss_acc=val_hist['S/b'].recent)

    # explicitly save the last run
    teacher_ckpter.save(epoch=run_epochs-1, monitor='acc', loss_acc=val_hist['T/b'].recent)
    student_ckpter.save(epoch=run_epochs-1, monitor='acc', loss_acc=val_hist['T/b'].recent)
Beispiel #11
0
def run(args):

    set_seed(args.seed)

    # setup logging info
    log_file = '{}/ckpt/{}/{}.log'.format(ROOT_DIR, args.exp, args.ckpt_prefix)
    if not os.path.exists(os.path.dirname(log_file)):
        os.makedirs(os.path.dirname(log_file))
    logging.basicConfig(filename=log_file, level=logging.INFO)
    logging.info(str(args))

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    train_loader, val_loader = ASCDevLoader(device='a').train_val(
        batch_size=args.batch_size, shuffle=True)

    model = getattr(net_archs, args.net)(args=args).cuda()
    # model = net_archs.CNN3BiGRU(args=args).cuda()

    if args.optimizer == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.init_lr,
                              momentum=0.9,
                              nesterov=True)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.init_lr,
                               weight_decay=args.l2)
    if args.lr_factor < 1.0:
        scheduler = ReduceLROnPlateau(optimizer,
                                      mode='max',
                                      verbose=True,
                                      factor=args.lr_factor,
                                      patience=args.lr_patience)

    train_hist, val_hist = History(name='train'), History(name='val')

    # checkpoint after new History, order matters
    ckpter = CheckPoint(model=model,
                        optimizer=optimizer,
                        path='{}/ckpt/{}'.format(ROOT_DIR, args.exp),
                        prefix=args.ckpt_prefix,
                        interval=1,
                        save_num=1)

    from utils.utilities import WeightedBCE
    criterion = WeightedBCE(pos_weight=9 * torch.ones(10).cuda(),
                            reduction='sum')

    for epoch in range(1, args.run_epochs):
        train_hist.add(logs=train_model(train_loader, model, optimizer,
                                        criterion, device),
                       epoch=epoch)
        val_hist.add(logs=eval_model(val_loader, model, criterion, device),
                     epoch=epoch)
        if args.lr_factor < 1.0:
            scheduler.step(val_hist.recent['acc'])

        # plotting
        if args.plot:
            train_hist.clc_plot()
            val_hist.plot()

        # logging
        logging.info("Epoch{:04d},{:6},{}".format(epoch, train_hist.name,
                                                  str(train_hist.recent)))
        logging.info("Epoch{:04d},{:6},{}".format(epoch, val_hist.name,
                                                  str(val_hist.recent)))

        ckpter.check_on(epoch=epoch, monitor='acc', loss_acc=val_hist.recent)

    # explicitly save last
    ckpter.save(epoch=args.run_epochs - 1,
                monitor='acc',
                loss_acc=val_hist.recent)
Beispiel #12
0
def exp1(device='3', ckpt_prefix='Run01', run_epochs=1000, lr=1e-3):

    # setup logging and save kwargs
    kwargs = locals()
    log_file = '{}/ckpt/exp1/{}.log'.format(ROOT_DIR, ckpt_prefix)
    if not os.path.exists(os.path.dirname(log_file)):
        os.makedirs(os.path.dirname(log_file))
    logging.basicConfig(filename=log_file, level=logging.INFO)
    logging.info(str(kwargs))

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
    device = torch.device('cuda')

    # load input to cuda
    train_loader, val_loaders = Exp1Loader().train_val()

    # load model to cuda
    model = net_archs.BaseConv(filters=32, is_bn=True, is_drop=True)
    model.to(device)

    # from torchsummary import summary
    # summary(model, input_size=(1, 40, 500))

    # optimizer
    optimizer = optim.Adam(params=model.parameters(), lr=lr)

    train_hist, val_histp, val_histb = History(name='train/A'), History(
        name='val/p'), History(name='val/b')

    # checkpoint after new History, order matters
    ckpter = CheckPoint(model=model,
                        optimizer=optimizer,
                        path='{}/ckpt/exp1'.format(ROOT_DIR),
                        prefix=ckpt_prefix,
                        interval=2,
                        save_num=2)

    for epoch in range(1, run_epochs):
        train_hist.add(logs=train_model(train_loader, model, optimizer,
                                        device),
                       epoch=epoch)
        val_histp.add(logs=eval_model(val_loaders['p'], model, device),
                      epoch=epoch)

        val_histb.add(logs=eval_model(val_loaders['b'], model, device),
                      epoch=epoch)

        train_hist.clc_plot()
        val_histp.plot()
        val_histb.plot()
        logging.info("Epoch{:04d},{:15},{}".format(epoch, train_hist.name,
                                                   str(train_hist.recent)))
        logging.info("Epoch{:04d},{:15},{}".format(epoch, val_histp.name,
                                                   str(val_histp.recent)))
        logging.info("Epoch{:04d},{:15},{}".format(epoch, val_histb.name,
                                                   str(val_histb.recent)))

        ckpter.check_on(epoch=epoch, monitor='acc', loss_acc=val_histb.recent)
    # explicitly save last
    ckpter.save(epoch=run_epochs - 1, monitor='acc', loss_acc=val_histb.recent)