コード例 #1
0
ファイル: turn_wifi.py プロジェクト: JoaquinMirandaC/AutomADB
def run(turn, stf_info=None, name="", filename="log_wifi.txt"):
    stf = False
    device_params = {}
    # path to save logs
    path = os.path.dirname(os.path.abspath(__file__))
    path = os.path.join(path, "..", "..", "qa", "reports", "")
    # instantiate logger to save a report file
    logger = Logger(filename, path, name)
    # initiate connection to device(s)
    controller = PhoneControl()
    # if a serial was received, run test only for that single serial
    if stf_info is not None:
        serials = [stf_info['remote_serial']]
        device_params = utils.get_device_data(stf_info['device'])
        stf = True
    else:
        # read the serials
        serials = controller.read_serials()

    for i in range(len(serials)):
        logger.write_log(" Device {} = {}".format(i+1, serials))
        # init device with serial
        controller.init_device(serials[i])
        # get the device parameters from the device version json
        if not stf:
            device_params = utils.get_device_data(serials[i])
        logger.write_log("Script Turn Wifi " + turn + " ---------")
        try:
            action(turn, logger, controller, device_params)
            time.sleep(3)
        except Exception as ex:
            logger.error_log(ex.message)
コード例 #2
0
def run_framework():
    cwd_path = os.path.dirname(os.path.abspath(__file__))
    path = os.path.join(cwd_path, "qa", "reports", "")
    logger = Logger("logger_suite.txt", path)
    # here all the suites specified will be ran, to execute all test cases
    # Wifi Suite
    WifiSuite.run_suite()
    # Dialer Suite
    DialerSuite.run_suite()
    # Calculator Suite
    CalculatorSuite.run_suite()
    # Twilio Suite
    twilio.run_suite()
    logger.end_suit()
コード例 #3
0
ファイル: main.py プロジェクト: javdet/remove-branches
def main():
    """
    Метод запускает процедуру проверки веток на соответствие
    условиям удаления/оповещения и выполнении действий
    Возвращает void
    """

    logger = Logger(config.LOG_FILE)
    logger.Write("Start")

    branch_handler = BranchHandler()
    branch_handler.Handle()

    logger.Write("Stop")
    logger.Close()
コード例 #4
0
def run(filename, path):
    logger = Logger(filename, path)

    controller = PhoneControl(3)

    serial = controller.read_serial()
    logger.write_log("1st Device on List = {}".format(serial))

    controller.init_device()

    logger.write_log("Script Turn Wifi On---------")
    try:
        action(logger, controller)
        time.sleep(3)
    except Exception as ex:
        logger.error_log(ex.message)
コード例 #5
0
def run(number_to=None, number_from=None, filename="log_twilio.txt"):
    # path to save logs
    path = os.path.dirname(os.path.abspath(__file__))
    path = os.path.join(path, "..", "..", "qa", "reports", "")
    # instantiate logger to save a report file
    logger = Logger(filename, path)
    logger.write_log("Script Twilio Voice Mail  ---------")
    try:
        action(logger, number_to, number_from)
    except Exception as ex:
        logger.end_log(ex.message)
コード例 #6
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)
    Detector = detector_factory[opt.task]

    # split = 'val'if not opt.trainval else 'test'
    detector = Detector(opt)

    datasets = build_dataset(Dataset, opt, is_train=False)
    datasets = datasets[0]

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(
        opt, datasets, detector.pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=True)

    results = {}
    num_iters = len(datasets)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind, (img_id, pre_processed_images) in enumerate(data_loader):
        ret = detector.run(pre_processed_images)
        results[img_id.numpy().astype(np.int32)[0]] = ret['results']
        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
                t, tm=avg_time_stats[t])
        bar.next()
    bar.finish()
    for t in avg_time_stats:
        print('|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
            t, tm=avg_time_stats[t]))
    datasets.run_eval(results, opt.save_dir)
コード例 #7
0
def run(turn, filename="log_wifi.txt"):
    # path to save logs
    path = os.path.dirname(os.path.abspath(__file__))
    path = os.path.join(path, "..", "..", "qa", "reports", "")
    # instantiate logger to save a report file
    logger = Logger(filename, path)
    # initiate connection to device(s)
    controller = PhoneControl()
    # read the serials
    serials = controller.read_serials()
    for i in range(len(serials)):
        logger.write_log(" Device {} = {}".format(i + 1, serials))
        # init device with serial
        controller.init_device(serials[i])
        # get the device parameters from the device version dictionary
        device_params = utils.get_device_data(serials[i])
        logger.write_log("Script Turn Wifi " + turn + " ---------")
        try:
            action(turn, logger, controller, device_params)
            time.sleep(3)
        except Exception as ex:
            logger.error_log(ex.message)
コード例 #8
0
ファイル: test.py プロジェクト: zyg11/houghnet
def test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)
    Detector = detector_factory[opt.task]

    split = 'val' if not opt.trainval else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind in range(num_iters):
        img_id = dataset.images[ind]
        img_info = dataset.coco.loadImgs(ids=[img_id])[0]
        img_path = os.path.join(dataset.img_dir, img_info['file_name'])

        if opt.task == 'ddd':
            ret = detector.run(img_path, img_info['calib'])
        else:
            ret = detector.run(img_path)

        results[img_id] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                t, avg_time_stats[t].avg)
        bar.next()
    bar.finish()
    dataset.run_eval(results, opt.save_dir)
コード例 #9
0
def run(a, op, b, filename="log_calculator.txt"):
    # path to save logs
    cwd_path = os.path.dirname(os.path.abspath(__file__))
    path = os.path.join(cwd_path, "..", "..", "qa", "reports", "")
    logger = Logger(filename, path)
    controller = PhoneControl(1)

    serials = controller.read_serials()
    for i in range(len(serials)):
        logger.write_log(" Device {} = {}".format(i + 1, serials))

        controller.init_device(serials[i])
        device_params = utils.get_device_data(serials[i])
        logger.write_log("Script UI Calculator -----------------")

        try:
            calculator = Calculator(a, op, b)
            if calculator.valid:
                logger.write_log("Operation: " + a + op + b)
                action(logger, controller, calculator, device_params)
            else:
                logger.error_log("Validation of Inputs Failed")
        except SyntaxError as ex:
            logger.end_log(ex.message)
        except ValueError as ex:
            logger.end_log(ex.message)
        except TypeError as ex:
            logger.end_log(ex.message)
        except ZeroDivisionError as ex:
            logger.end_log(ex.message)
        except Exception as ex:
            logger.error_log(ex.message)
コード例 #10
0
def run(number=None, stf_info=None, name="", filename="log_dialer.txt"):
    stf = False
    device_params = {}
    # path to save logs
    path = os.path.dirname(os.path.abspath(__file__))
    path = os.path.join(path, "..", "..", "qa", "reports", "")
    logger = Logger(filename, path, name)

    controller = PhoneControl()

    if stf_info is not None:
        serials = [stf_info['remote_serial']]
        device_params = utils.get_device_data(stf_info['device'])
        stf = True
    else:
        serials = controller.read_serials()
    for i in range(len(serials)):
        logger.write_log(" Device {} = {}".format(i + 1, serials))

        controller.init_device(serials[i])

        if not stf:
            device_params = utils.get_device_data(serials[i])

        logger.write_log("Script Dial Number---------")
        if number is None:
            n = raw_input("Enter the number to dial : ")
        else:
            n = number
        try:
            dialable, parsedNumber, msg = utils.validate_number(n)
            if dialable:
                logger.write_log(msg + " " + str(parsedNumber))
                action(logger, controller, parsedNumber, device_params)
            else:
                logger.end_log("Invalid phone number")
        except Exception as ex:
            logger.error_log(ex.message)
コード例 #11
0
def run(filename, path, number=None):

    logger = Logger(filename, path)

    controller = PhoneControl(3)

    serial = controller.read_serial()
    logger.write_log("1st Device on List = {}".format(serial))

    controller.init_device()

    logger.write_log("Script Dial Number---------")
    if number is None:
        n = raw_input("Enter the number to dial : ")
    else:
        n = number

    try:
        dialable, parsedNumber, msg = utils.validate_number(n)
        if dialable:
            logger.write_log(msg + " " + str(parsedNumber))
            action(logger, controller, parsedNumber)
        else:
            logger.error_log("Invalid phone number")
    except Exception as ex:
        logger.error_log(ex.message)
コード例 #12
0
ファイル: main.py プロジェクト: liweixue0215/houghnet
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    # torch.backends.cudnn.enabled = False
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt.houghnet,
                         opt.region_num, opt.vote_field_size)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'train'),  #train
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True)

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
コード例 #13
0
def main(opt):
    torch.manual_seed(
        opt.seed
    )  # opt.seed: default=317  ;加上torch.manual_seed这个函数调用的话,打印出来的随机数每次都一样。
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(
        opt.dataset, opt.task
    )  # opt.dataset = coco, opt.task = ctdet (| ddd | multi_pose | exdet)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    # val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'), batch_size=1, shuffle=False, num_workers=1,pin_memory=True)  # modified by zy
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'test'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    output_choice_log = '/home/zy/zy/2new_network/CenterNet-master/output_choice.log'
    if os.path.exists(output_choice_log):
        os.remove(output_choice_log)

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        try:
            log_dict_train, _ = trainer.train(
                epoch, train_loader
            )  # !!!!!!!!  train = self.run_epoch('train', epoch, data_loader)
        except Exception as e:  # 如果发生异常,那就返回预设的loss值
            print('Error_train!!!', e)
            print(traceback.format_exc())
            continue
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                try:
                    log_dict_val, preds = trainer.val(epoch, val_loader)
                except Exception as e:  # 如果发生异常,那就返回预设的loss值
                    print('Error_train!!!', e)
                    print(traceback.format_exc())
                    continue
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
コード例 #14
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test

    print('Setting up data...')
    Dataset = get_dataset(opt.dataset, opt.task)
    f = open(opt.data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    transforms = T.Compose([T.ToTensor()])
    dataset = Dataset(opt,
                      dataset_root,
                      trainset_paths, (1088, 608),
                      augment=True,
                      transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0

    # Get dataloader

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   trainer.optimizer,
                                                   opt.resume, opt.lr,
                                                   opt.lr_step)

    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if epoch % 5 == 0 or epoch >= 25:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
    logger.close()
コード例 #15
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
    opt.device = torch.device("cuda" if opt.gpus[0] >= 0 else "cpu")
    logger = Logger(opt)

    print("Creating model...")
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != "":
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt, optimizer)

    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print("Setting up validation data...")
        val_loader = torch.utils.data.DataLoader(
            Dataset(opt, "val"), batch_size=1, shuffle=False, num_workers=1, pin_memory=True
        )

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print("Setting up train data...")
    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, "train"),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True,
    )

    print("Starting training...")
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else "last"
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write("epoch: {} |".format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary("train_{}".format(k), v, epoch)
            logger.write("{} {:8f} | ".format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, "model_{}.pth".format(mark)), epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary("val_{}".format(k), v, epoch)
                logger.write("{} {:8f} | ".format(k, v))
        else:
            save_model(os.path.join(opt.save_dir, "model_last.pth"), epoch, model, optimizer)
        logger.write("\n")
        if epoch in opt.save_point:
            save_model(os.path.join(opt.save_dir, "model_{}.pth".format(epoch)), epoch, model, optimizer)
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
            print("Drop LR to", lr)
            for param_group in optimizer.param_groups:
                param_group["lr"] = lr
    logger.close()