Example #1
0
 def __init__(self, AceConfig, AceProxy):
     self.AceConfig = AceConfig
     self.picons = self.channels = self.playlist = self.etag = self.last_modified = None
     self.playlisttime = gevent.time.time()
     self.headers = {'User-Agent': 'Magic Browser'}
     if config.updateevery:
         schedule(config.updateevery * 60, self.Playlistparser)
 def __init__(self, AceConfig, AceProxy):
     self.config = AceConfig
     self.logger = logging.getLogger('plugin_TorrentFilms')
     self.playlist = []
     self.videoextdefaults = ('.3gp', '.aac', '.ape', '.asf', '.avi', '.dv',
                              '.divx', '.flac', '.flc', '.flv', '.m2ts',
                              '.m4a', '.mka', '.mkv', '.mpeg', '.mpeg4',
                              '.mpegts', '.mpg4', '.mp3', '.mp4', '.mpg',
                              '.mov', '.m4v', '.ogg', '.ogm', '.ogv',
                              '.oga', '.ogx', '.qt', '.rm', '.swf', '.ts',
                              '.vob', '.wmv', '.wav', '.webm')
     if config.updateevery:
         schedule(config.updateevery * 60, self.playlistdata)
 def download_video(self, video_lists):
     print(">>视频列表下载开始...")
     print(video_lists)
     x = 0
     file_path = self.file_path
     if not os.path.exists(file_path):
         os.mkdir(file_path)
     for video in video_lists:
         video_name = video[0]
         video_url = video[1]
         # 获取真实地址
         print("解析地址中...")
         real_rul = self.get_real_url(video_url, 1)
         # print("真实地址"+real_rul)
         # 现在时间戳
         now_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
         # 文件名为当前时间戳+视频title
         file_name = str(now_time) + '_' + video_name
         print(file_name + '开始下载...')
         request.urlretrieve(real_rul,
                             file_path + '%s.mp4' % file_name,
                             reporthook=utils.schedule())
         print(file_name + '下载完成...')
         x = x + 1
     print('下载完成...共' + str(x) + '个视频')
     print('视频保存地址:' + file_path)
Example #4
0
    def init_callbacks(self, ):
        scheduler = None
        tensorboard_callback = None
        self.callbacks = []
        if (self.lr_decay_policy == 'poly'):
            scheduler = schedule(
                self.lr, self.lr_decay_power, self.epochs
            )  #PolynomialDecay(maxEpochs=100, initAlpha=0.01, power=self.lr_decay_power)
        # initializing tensorboard
#        shutil.rmtree(self.logdir+"/train/")
        os.makedirs(self.logdir, exist_ok=True)
        tensorboard_callback = TensorBoard(log_dir=self.logdir)

        if (scheduler):
            print("learning rate scheduler added\n\n")
            self.callbacks.append(LearningRateScheduler(scheduler))
        if (tensorboard_callback):
            print("tensor boarda callback added  with folder " + self.logdir +
                  "\n\n")
            self.callbacks.append(tensorboard_callback)
        # Create a callback that saves the model's weights every 5 epochs
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath="/home/essys/projects/segmentation/checkpoints/" +
            self.train_id + "/cp-{epoch:04d}.ckpt",
            verbose=0,
            save_weights_only=True,
            period=50)
        self.callbacks.append(cp_callback)
        diagnoser = ModelDiagonoser(self.training_generator, self.batch_size,
                                    self.n_samples, self.logdir,
                                    self.input_shape, self.n_classes)
        self.callbacks.append(diagnoser)
def swa_train(model, swa_model, train_iter, valid_iter, optimizer, criterion, pretrain_epochs, swa_epochs, swa_lr, cycle_length, device, writer, cpt_filename):
    swa_n = 1

    swa_model.load_state_dict(copy.deepcopy(model.state_dict()))

    utils.save_checkpoint(
        cpt_directory,
        1,
        '{}-swa-{:2.4f}-{:03d}-{}'.format(date, swa_lr, cycle_length, cpt_filename),
        state_dict=model.state_dict(),
        swa_state_dict=swa_model.state_dict(),
        swa_n=swa_n,
        optimizer=optimizer.state_dict()
    )

    for e in range(swa_epochs):
        epoch = e + pretrain_epochs
        time_ep = time.time()
        lr = utils.schedule(epoch, cycle_length, lr_init, swa_lr)
        utils.adjust_learning_rate(optimizer, lr)

        train_res = utils.train_epoch(model, train_iter, optimizer, criterion, device)
        valid_res = utils.evaluate(model, valid_iter, criterion, device)

        utils.moving_average(swa_model, model, swa_n)
        swa_n += 1
        utils.bn_update(train_iter, swa_model)
        swa_res = utils.evaluate(swa_model, valid_iter, criterion, device)

        time_ep = time.time() - time_ep
        values = [epoch + 1, lr, swa_lr, cycle_length, train_res['loss'], valid_res['loss'], swa_res['loss'], None, None, time_ep]
        writer.writerow(values)

        table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')
        if epoch % 20 == 0:
            table = table.split('\n')
            table = '\n'.join([table[1]] + table)
        else:
            table = table.split('\n')[2]
        print(table)

        utils.save_checkpoint(
            cpt_directory,
            epoch + 1,
            '{}-swa-{:2.4f}-{:03d}-{}'.format(date, swa_lr, cycle_length, cpt_filename),
            state_dict=model.state_dict(),
            swa_state_dict=swa_model.state_dict(),
            swa_n=swa_n,
            optimizer=optimizer.state_dict()
        )
    def train_network(self):
        for t in range(self.T):
            lr = schedule(self.lr0, self.d0, t)
            # data, labels = shuffle(self.train, self.labels)
            data = self.train
            labels = self.labels
            # for ex in range(self.N):
            for ex in range(1):
                features = data[ex, :]
                lab = labels[ex]
                print(features, lab)
                self.ff(features)
                self.bp(features, lab)
                for layer in self.layers:
                    if layer.layer_num == self.d:
                        layer.ws -= lr * layer.dws.T
                    else:
                        layer.ws -= lr * layer.dws

        return self
    def train_and_apply(self, train, test):
        train_err = np.zeros(self.T)
        test_err = np.zeros(self.T)
        for t in range(self.T):
            lr = schedule(self.lr0, self.d0, t)
            data, labels = shuffle(self.train, self.labels)
            for ex in range(self.N):
                features = data[ex, :]
                lab = labels[ex]
                self.ff(features)
                self.bp(features, lab)
                for layer in self.layers:
                    if layer.layer_type == 'out':
                        layer.ws -= lr * layer.dws.T
                    else:
                        layer.ws -= lr * layer.dws

            err = self.apply_network(train)
            train_err[t] = err
            err = self.apply_network(test)
            test_err[t] = err

        return self, train_err, test_err
Example #8
0
def print_pretty_matrix(matrix):
    col_labels = ['slot/day', 'Mon', 'Tu', 'Wed', 'Thur', 'Fri']
    table_vals = [[schedule()["pairs"][i], '', '', '', '', '']
                  for i in range(6)]

    table = prettytable.PrettyTable(col_labels, hrules=prettytable.ALL)

    for i in range(len(matrix)):

        for j in range(len(matrix[i])):
            text = ''
            teachers = set()
            for c in matrix[i][j]:
                teachers.add(c.subject.teacher)
                text += '[subject: {}; tutor: {}; room: {}]\n'.format(
                    c.subject.__repr__(), c.subject.teacher, c.classroom)
            table_vals[j][i + 1] = text
            if len(teachers) < len(matrix[i][j]):
                print("error")

    for row in table_vals:
        table.add_row(row)

    print(table)
Example #9
0
    else:
        logger.error('Cannot drop privileges to user %s' %
                     AceConfig.aceproxyuser)
        sys.exit(1)

# Creating ClientCounter
AceProxy.pool = Pool()
AceProxy.clientcounter = ClientCounter()
#### AceEngine startup
AceProxy.ace = findProcess('ace_engine.exe' if AceConfig.osplatform ==
                           'Windows' else os.path.basename(AceConfig.acecmd))
if not AceProxy.ace and AceConfig.acespawn:
    if spawnAce():
        logger.info('Local AceStream engine spawned with pid %s' %
                    AceProxy.ace.pid)
        schedule(AceConfig.acestartuptimeout,
                 checkAce)  # Start AceEngine alive watchdog
elif AceProxy.ace:
    AceProxy.ace = psutil.Process(AceProxy.ace)
    logger.info('Local AceStream engine found with pid %s' % AceProxy.ace.pid)

# If AceEngine started (found) localy
if AceProxy.ace:
    AceConfig.ace['aceHostIP'] = '127.0.0.1'
    # Refreshes the acestream.port file for OS Windows.....
    if AceConfig.osplatform == 'Windows': detectPort()
    else: gevent.sleep(AceConfig.acestartuptimeout)
else:
    url = 'http://{aceHostIP}:{aceHTTPport}/webui/api/service'.format(
        **AceConfig.ace)
    params = {
        'method': 'get_version',
Example #10
0
    # net = DPN92(args.cifar100)
    net = ACN(args.cifar100)

if use_cuda:
    net.cuda()
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=5e-4)

if args.sch: scheduler = schedule(optimizer, milestones=[150, 250], gamma=0.1)


# Training
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    print(net)
    print(vars(args))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
Example #11
0
def main(args):

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")

    train_loader, test_loader = load_dataset(args.label, args.batch_size,
                                             args.half_length, args.nholes)

    if args.label == 10:
        model = ShakeResNet(args.depth, args.w_base, args.label)
    else:
        model = ShakeResNeXt(args.depth, args.w_base, args.cardinary,
                             args.label)

    model = torch.nn.DataParallel(model).cuda()

    cudnn.benckmark = True

    if args.optimizer == 'sgd':
        print("using sgd")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr,
                        momentum=args.momentum,
                        weight_decay=args.weight_decay,
                        nesterov=args.nesterov)

    elif args.optimizer == 'abd':
        print("using adabound")
        opt = abd.AdaBound(model.parameters(),
                           lr=args.lr,
                           gamma=args.gamma,
                           weight_decay=args.weight_decay,
                           final_lr=args.final_lr)

    elif args.optimizer == 'swa':
        print("using swa")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr,
                        momentum=args.momentum,
                        weight_decay=args.weight_decay)
        steps_per_epoch = len(train_loader.dataset) / args.batch_size
        steps_per_epoch = int(steps_per_epoch)
        opt = swa(opt,
                  swa_start=args.swa_start * steps_per_epoch,
                  swa_freq=steps_per_epoch,
                  swa_lr=args.swa_lr)
    else:
        print("not valid optimizer")
        exit

    loss_func = nn.CrossEntropyLoss().cuda()

    headers = [
        "Epoch", "LearningRate", "TrainLoss", "TestLoss", "TrainAcc.",
        "TestAcc."
    ]

    #if args.optimizer=='swa':
    #   headers = headers[:-1] + ['swa_te_loss', 'swa_te_acc'] + headers[-1:]
    #  swa_res = {'loss': None, 'accuracy': None}

    logger = utils.Logger(args.checkpoint, headers, mod=args.optimizer)

    for e in range(args.epochs):

        if args.optimizer == 'swa':
            lr = utils.schedule(e, args.optimizer, args.epochs, args.swa_start,
                                args.swa_lr, args.lr)
            utils.adjust_learning_rate(opt, lr)
        elif args.optimizer == 'sgd':
            lr = utils.cosine_lr(opt, args.lr, e, args.epochs)
        else:
            exit

        #train
        train_loss, train_acc, train_n = utils.train_epoch(
            train_loader, model, opt)
        #eval
        test_loss, test_acc, test_n = utils.eval_epoch(test_loader, model)

        logger.write(e + 1, lr, train_loss / train_n, test_loss / test_n,
                     train_acc / train_n * 100, test_acc / test_n * 100)

        if args.optimizer == 'swa' and (
                e + 1) >= args.swa_start and args.eval_freq > 1:
            if e == 0 or e % args.eval_freq == args.eval_freq - 1 or e == args.epochs - 1:
                opt.swap_swa_sgd()
                opt.bn_update(train_loaders, model, device='cuda')
                #swa_res = utils.eval_epoch(test_loaders['test'], model)
                opt.swap_swa_sgd()