コード例 #1
0
    def update(self, try_reconnect=False):
        providers = utils.get_available_providers()
        channels = []

        for sl in providers:
            logger.log.info('Getting channels for account: %s' %
                            (sl.account.username))

            try:
                channels = channels + sl.channels()
            except skylink.TooManyDevicesException as e:
                if self._addon.getSetting('reuse_last_device') == 'true':
                    device = utils.get_last_used_device(e.devices)
                else:
                    device = utils.select_device(
                        e.devices) if try_reconnect else ''

                if device != '':
                    logger.log.info('reconnecting as: ' + device)
                    sl.reconnect(device)
                    channels = channels + sl.channels()
                else:
                    raise

        # Remove duplicate channels (same channels provided by skylink.sk and skylink.cz)
        if bool(self._addon.getSetting('playlist_unique')):
            logger.log.info('Filtering channels to create unique list...')
            channels = utils.unique_channels(channels)

        logger.log.info('Updating playlist [%d channels]' % len(channels))
        try:
            path = os.path.join(self._addon.getSetting('playlist_folder'),
                                self._addon.getSetting('playlist_file'))
            _skylink_logos = 'true' == self._addon.getSetting('sl_logos')
            exports.create_m3u(channels, path,
                               sl.getUrl() + '/' if _skylink_logos else None)
            result = 1
        except IOError as e:
            logger.log.error(str(e))
            raise skylink.SkylinkException(30503)

        if bool(self._addon.getSetting('epg_generate')):
            days = int(self._addon.getSetting('epg_days'))
            logger.log.info('Updating EPG [%d days from %s]' %
                            (days, datetime.datetime.now()))
            path = os.path.join(self._addon.getSetting('epp_folder'),
                                self._addon.getSetting('epg_file'))
            try:
                exports.create_epg(
                    channels, sl.epg(channels, datetime.datetime.now(), days),
                    path, self._addon,
                    sl.getUrl() + '/')
                result = 2
            except IOError as e:
                logger.log.error(str(e))
                raise skylink.SkylinkException(30504)

        return result
コード例 #2
0
ファイル: cdm_uavdt.py プロジェクト: TWSFar/CRGNet
    def _parse(self, kwargs):
        state_dict = self._state_dict()
        for k, v in kwargs.items():
            if k not in state_dict:
                raise ValueError('UnKnown Option: "--%s"' % k)
            setattr(self, k, v)

        self.device, self.gpu_id = select_device()

        print('======user config========')
        pprint(self._state_dict())
        print('==========end============')
コード例 #3
0
ファイル: visualize.py プロジェクト: hedgefair/SDAE_pytorch
def visualize(data_size=10000):
    '''
	Plot 3-dimensional feature space and reconstructed result. 
	Images are stored in 'result_SDAE'
	* Have to used autoencoder models with 3-dimensional output
	 Can only be used on training data *

	'''
    batchSize = 100
    device = utils.select_device()
    Test_dataset = utils.StateData(data_size=data_size)
    visu_loader = torch.utils.data.DataLoader(Test_dataset,
                                              batch_size=batchSize,
                                              shuffle=True)

    chekp = torch.load('model/chekp.pt')
    reconstruct_dim = chekp['in_dim']
    feature_dim = chekp['out_dim']
    chekp_model = chekp['model']
    stack_num = chekp['stack_num']

    model = StackDAE(reconstruct_dim, feature_dim, stack_num)

    model.to(device).load_state_dict(chekp_model)

    reconstruct_stack = torch.FloatTensor(reconstruct_dim).to(
        device).unsqueeze(0)
    feature_stack = torch.FloatTensor(feature_dim).to(device).unsqueeze(0)
    for i, data in enumerate(visu_loader):
        data = data.to(device)
        reconstruct = model.forward(data)
        reconstruct_stack = torch.cat((reconstruct_stack, reconstruct))
        feature_stack = torch.cat((feature_stack, model.hidden_feature))

    reconstruct_stack = reconstruct_stack[1:].detach().cpu().numpy()
    feature_stack = feature_stack[1:].detach().cpu().numpy()

    print('%d data points, input size: %d, feature size: %d' %
          (feature_stack.shape[0], reconstruct_dim, feature_stack.shape[1]))

    fig1 = plt.figure()
    ax = Axes3D(fig1)
    ax.scatter(feature_stack[:, 0], feature_stack[:, 1], feature_stack[:, 2])
    fig1.savefig('result_SDAE/result_feature.png')

    fig2 = plt.figure()
    ax = Axes3D(fig2)
    ax.scatter(reconstruct_stack[:, -8], reconstruct_stack[:, -7],
               reconstruct_stack[:, -6])
    fig2.savefig('result_SDAE/result_reconstruct.png')

    return feature_stack
コード例 #4
0
ファイル: main.py プロジェクト: Sha-Lab/qmc
def main(args=None):
    args = parse_args(args)

    select_device(0 if torch.cuda.is_available() and not args.cpu else -1)
    #select_device(-1)
    logger.prog('device: {}'.format(Config.DEVICE))

    if args.task == 'learn':
        exp_f = learning
    elif args.task == 'cost':
        exp_f = compare_cost
    elif args.task == 'grad':
        exp_f = compare_grad
    else:
        raise Exception('unsupported task')
    if args.post_f is not None:
        post_f = lambda results: getattr(postprocess, args.post_f)(args,
                                                                   results)
    else:
        post_f = None
    if args.mode == 'single':
        exp_f(args)
    elif args.mode == 'seeds':
        running_seeds(args.save_fn,
                      exp_f,
                      argparse.Namespace(**vars(args)),
                      args.n_seeds,
                      post_f=post_f)
    elif args.mode == 'collect':
        assert args.task in ['grad', 'learn']
        success_f = lambda result: len(result[-1]['out']) == 0
        collect_seeds(args.save_fn,
                      exp_f,
                      args,
                      success_f=success_f,
                      n_seeds=args.n_seeds,
                      max_seed=args.max_seed,
                      post_f=post_f)
コード例 #5
0
    def _set_cuda(self, model):
        has_device = hasattr(self.config, "device") 
        if has_device:
            device = self.config.device

        else:
            device = ""
        device = select_device(device, self.config.train.batch_size)
        model = torch.nn.DataParallel(model).to(device)
        self.device = device 
        self.cuda = self.device.type != 'cpu' 
        setattr(self.config, "device", device) 
        setattr(self.config, "cuda", self.cuda)
        return model
コード例 #6
0
ファイル: visualize.py プロジェクト: hedgefair/SDAE_pytorch
def visualize_orig(data_size=10000):
    '''
	Plot positions(x, y, z) of the last joint of original data(observation). 
	* Can only be used on training data *

	'''
    batchSize = 100
    device = utils.select_device()
    Test_dataset = utils.StateData(data_size)
    visu_loader = torch.utils.data.DataLoader(Test_dataset,
                                              batch_size=batchSize,
                                              shuffle=True)

    output_stack = torch.FloatTensor(48).unsqueeze(0)

    for i, data in enumerate(visu_loader):
        output_stack = torch.cat((output_stack, data))

    output_stack = output_stack[1:].numpy()
    fig = plt.figure()

    ax = Axes3D(fig)
    ax.scatter(output_stack[:, -8], output_stack[:, -7], output_stack[:, -6])
    fig.savefig('result_SDAE/result_orig.png')
コード例 #7
0
ファイル: test.py プロジェクト: Lornatang/One-Stage-Detector
def evaluate(cfg, args):
    device = select_device(args.device)
    # Initialize/load model
    if cfg.MODEL.META_ARCHITECTURE:

        # Initialize model
        model = YOLOv3(cfg).to(device)

        # Load weights
        if cfg.TEST.WEIGHTS.endswith(".pth"):
            state = torch.load(cfg.TEST.WEIGHTS, map_location=device)
            model.load_state_dict(state["state_dict"])
        else:
            load_darknet_weights(model, cfg.TEST.WEIGHTS)

        if device.type != "cpu" and torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:
        warnings.warn(
            "WARNNING: Backbone network cannot be empty! "
            f"Default load Darknet53 meta architecture for `{cfg.CONFIG_FILE}`!"
        )
        model = YOLOv3(cfg).to(device)

    if cfg.TEST.TASK == "visual":
        images = os.listdir(os.path.join(os.getcwd(), "data", "test"))
        for filename in images:
            path = os.path.join(
                os.path.join(os.getcwd(), "data", "test", filename))

            images = cv2.imread(path)
            assert images is not None

            bboxes_prd = Evaluator(model, cfg=cfg).get_bbox(images)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]

                visualize_boxes(image=images,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=cfg.CLASSES)
                path = os.path.join(f"./outputs/{filename}")

                cv2.imwrite(path, images)

    elif cfg.TEST.TASK == "eval":
        maps = 0.
        with torch.no_grad():
            aps = Evaluator(model, visiual=True,
                            cfg=cfg).calculate_aps(cfg.TEST.MULTI_SCALE,
                                                   cfg.TEST.FLIP)

            for i in aps:
                print(f"{i:25s} --> mAP : {aps[i]:.4f}")
                maps += aps[i]
            maps = maps / len(cfg.CLASSES)
            print(f'mAP:{maps:.6f}')

        return maps
コード例 #8
0
ファイル: train.py プロジェクト: wslerry/regresstorch
def train():
    input_dir, var1, var2, adam, device = opt.input, opt.var1, opt.var2, opt.adam, opt.device

    data = Dataset(input_dir).data

    device = utils.select_device(device, batch_size=opt.batch_size)

    for i in (var1, var2):
        variable_check(data, i)

    use_cuda = torch.cuda.is_available()

    X_reshape = data[var1].values.reshape(-1, 1)
    y_reshape = data[var2].values.reshape(-1, 1)

    if use_cuda:
        x, y = gpu_dataset(X_reshape, y_reshape)
    else:
        x, y = cpu_dataset(X_reshape, y_reshape)

    # Initialize model
    net = LinearNet(n_feature=x.size(1), n_output=y.size(1)).to(device)

    if adam:
        # optimizer using Adam
        optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
    else:
        # optimizer using SGD
        optimizer = torch.optim.SGD(net.parameters(), lr=0.001)

    loss_func = nn.MSELoss()

    batch_size = opt.batch_size
    n_epochs = opt.epoch
    batch_no = len(x) // batch_size
    train_loss = 0
    train_loss_min = np.Inf

    if use_cuda:
        for epoch in range(n_epochs):
            for i in range(batch_no):
                start = i * batch_size
                end = start + batch_size

                optimizer.zero_grad()
                prediction = net(x)
                loss = loss_func(prediction, y)
                loss.backward()
                optimizer.step()
                values, labels = torch.max(prediction, 1)
                num_right = np.sum(labels.cpu().data.numpy() == y[start:end])
                train_loss += loss.item() * batch_size

            train_loss = train_loss / len(x)
            if train_loss <= train_loss_min:
                print(
                    "Validation loss decreased ({:6f} ===> {:6f}). Saving the model..."
                    .format(train_loss_min, train_loss))
                torch.save(net.state_dict(), "regression_model.pt")
                train_loss_min = train_loss

            if epoch % 50 == 0:
                print('')
                print("Epoch: {} \tTrain Loss: {} \tTrain Accuracy: {}".format(
                    epoch + 1, train_loss, num_right / len(y[start:end])))
        print('Training Ended! ')
コード例 #9
0
ファイル: service.py プロジェクト: Pityke1105/plugin.video.sl
    def update(self, try_reconnect=False):
        result = -1

        _playlist_generate = 'true' == self._addon.getSetting(
            'playlist_generate')
        _epg_generate = 'true' == self._addon.getSetting('epg_generate')
        if not _playlist_generate and not _epg_generate:
            return result

        _username = self._addon.getSetting('username')
        _password = self._addon.getSetting('password')
        _profile = xbmc.translatePath(self._addon.getAddonInfo('profile'))
        _provider = 'directone.hu'
        _pin_protected_content = 'false' != self._addon.getSetting(
            'pin_protected_content')
        sl = skylink.Skylink(_username, _password, _profile, _provider,
                             _pin_protected_content)
        logger.log.info('SL created')

        try:
            channels = sl.channels()
        except skylink.TooManyDevicesException as e:
            if self._addon.getSetting('reuse_last_device') == 'true':
                device = utils.get_last_used_device(e.devices)
            else:
                device = utils.select_device(
                    e.devices) if try_reconnect else ''

            if device != '':
                logger.log.info('reconnecting as: ' + device)
                sl.reconnect(device)
                channels = sl.channels()
            else:
                raise

        if _playlist_generate:
            try:
                path = os.path.join(self._addon.getSetting('playlist_folder'),
                                    self._addon.getSetting('playlist_file'))
                _skylink_logos = 'true' == self._addon.getSetting('sl_logos')
                logger.log.info('Updating playlist [%d channels]' %
                                len(channels))
                exports.create_m3u(channels, path,
                                   sl._api_url if _skylink_logos else None)
                result = 1
            except IOError as e:
                logger.log.error(str(e))
                raise skylink.SkylinkException(30503)

        if _epg_generate:
            try:
                days = int(self._addon.getSetting('epg_days'))
                catchup_days = int(self._addon.getSetting('epg_days_catchup'))
                today = datetime.datetime.now()
                epgFrom = today - datetime.timedelta(
                    days=catchup_days) if catchup_days > 0 else today
                epgTo = today + datetime.timedelta(days=days)
                path = os.path.join(self._addon.getSetting('epp_folder'),
                                    self._addon.getSetting('epg_file'))
                logger.log.info('Updating EPG [from %s to %s]' %
                                (epgFrom, epgTo))
                exports.create_epg(channels, sl.epg(channels, epgFrom, epgTo),
                                   path)
                result = 2
            except IOError as e:
                logger.log.error(str(e))
                raise skylink.SkylinkException(30504)

        return result
コード例 #10
0
ファイル: train.py プロジェクト: Lornatang/One-Stage-Detector
    print(f"{epoch - start_epoch} epochs completed "
          f"in {(time.time() - start_time) / 3600:.3f} hours.\n")
    dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
    torch.cuda.empty_cache()


if __name__ == "__main__":
    mp.set_start_method("spawn", force=True)
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    args.weights = "weights/checkpoint.pth" if args.resume else args.weights

    print(args)

    device = select_device(args.device, apex=mixed_precision)

    if device.type == "cpu":
        mixed_precision = False

    try:
        os.makedirs("weights")
    except OSError:
        pass

    try:
        # Start Tensorboard with "tensorboard --logdir=runs"
        from torch.utils.tensorboard import SummaryWriter

        tb_writer = SummaryWriter()
    except:
コード例 #11
0
ファイル: val.py プロジェクト: noitq/mlflow_eval_detection
                     default='/u01/data/bdd100k/det/bdd100k.yaml',
                     help='data.yaml path')
 parser.add_argument('--epochs', type=int, default=1)
 parser.add_argument("--device", type=str, default='cpu')
 parser.add_argument("--batch_size", type=int, default=32)
 parser.add_argument("--input_size", type=int, default=640)
 parser.add_argument('--conf_thres', type=float, default=0.001)
 parser.add_argument('--iou_thres', type=float, default=0.6)
 parser.add_argument('--augment', action='store_true')
 parser.add_argument('--verbose', action='store_true')
 parser.add_argument('--save_txt', action='store_true')
 parser.add_argument('--save_conf', action='store_true')
 parser.add_argument('--save_json', action='store_true')
 parser.add_argument('--single_cls', action='store_true')
 opt = parser.parse_args()
 device = select_device(opt.device)
 """
 Init model
 """
 od = DetectionModel(opt.model_name, opt.model_version, opt)
 """
 Init data
 """
 with open(opt.data) as f:
     data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
 path = data['test']
 nc = 1 if opt.single_cls else int(data['nc'])  # number of classes
 iouv = torch.linspace(0.5, 0.95,
                       10).to(device)  # iou vector for [email protected]:0.95
 niou = iouv.numel()
 dataloader = create_dataloader(path,
コード例 #12
0
    #     return CosineAnnealingLR(optim, t_max)
    elif sche_type == "auto":
        return LambdaLR(optim, lambda x: (((1 + np.cos(x * np.pi / epochs)) / 2) ** 1.0) * 0.9 + 0.1)
    else:
        return None


if __name__ == "__main__":
    params_file = 'params.yml'
    params_file = check_file(params_file)
    params = Params('params.yml')  # load params

    params.save_dir = os.path.join(os.getcwd(), params.save_dir)
    os.makedirs(params.save_dir, exist_ok=True)  # build ouput directory

    device = select_device(params.device, batch_size=params.batch_size)  # build GPU env
    init_seeds(1)

    train_loader, val_loader = get_loaders(params.input_dir, params.num_classes, params.img_size, params.batch_size, params.num_workers)

    net, ckpt = get_model(params)
    net = nn.DataParallel(net).to(device, non_blocking=True)

    ''' This CrossEntropyLoss implementation comes with a softmax activation function,
        which is not suitable for this multi-label classification situation
    '''
    # loss = nn.CrossEntropyLoss()

    loss = nn.BCEWithLogitsLoss() if params.multilabels else nn.CrossEntropyLoss()

    ''' Adam optimizer has fastest training speed, but with Familiarity to data, SGD is recommanded