Esempio n. 1
0
def run(cfg_path=None):
    # check for a configuration file
    check_config(cfg_path)

    cmds = {
        'model': run_model,
        'rew': run_rew,
        'help': run_help,
        'commands': run_commands,
        'dataset': run_dataset
    }

    # run_dataset()
    # return

    welcome()

    if CLI_ENABLED:
        while 1:
            cmd = raw_input('>>> ')
            try:
                func = cmds[cmd]
            except KeyError:
                continue

            func()
    else:
        run_model(cfg_path)
Esempio n. 2
0
 def __init__(self):
     #Vérification de la config du system
     config.check_config()
     #Instanciation des objet metier
     #Icon
     print "start icons scan"
     self.icon_dict  =  IconDict()
     #Emerald
     print "start emerald scan"
     self.emerald_dict = EmeraldDict()
     #Metacity
     print "start metacity scan"
     self.metacity_dict = MetacityDict()
     #Mouse
     print "start mouse scan"
     self.mouse_dict = MouseDict()
     #Gtk
     print "start gtk scan"
     self.gtk_dict = GtkDict()
     #Wallpaper
     self.wallpapers_dict = WallPaperDict()
     print "start wallpapers scan"
     #Cubemodel
     print "start cubemodels scan"
     self.cubemodels_dict = CubemodelsDict()
     #Fullpack
     print "start fullpacks scan"
     self.fullpack_dict = FullPackDict()
     
     #Instanciation de l'interface graphique
     self.gtk_gui = MainWindow()
     self.gtk_gui.initialisation_gui(self)
     
     gtk.main()
Esempio n. 3
0
def main():
    """
    Enter point
    """
    sys.stdout.write("\x1b]0;poezio\x07")
    sys.stdout.flush()
    import config
    config_path = config.check_create_config_dir()
    config.run_cmdline_args(config_path)
    config.create_global_config()
    config.check_create_log_dir()
    config.check_create_cache_dir()
    config.setup_logging()
    config.post_logging_setup()

    from config import options

    if options.check_config:
        config.check_config()
        sys.exit(0)

    import theming
    theming.update_themes_dir()

    import logger
    logger.create_logger()

    import roster
    roster.create_roster()

    import core

    log = logging.getLogger('')

    signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c
    cocore = singleton.Singleton(core.Core)
    signal.signal(signal.SIGUSR1, cocore.sigusr_handler) # reload the config
    signal.signal(signal.SIGHUP, cocore.exit_from_signal)
    signal.signal(signal.SIGTERM, cocore.exit_from_signal)
    if options.debug:
        cocore.debug = True
    cocore.start()

    # Warning: asyncio must always be imported after the config. Otherwise
    # the asyncio logger will not follow our configuration and won't write
    # the tracebacks in the correct file, etc
    import asyncio
    loop = asyncio.get_event_loop()

    loop.add_reader(sys.stdin, cocore.on_input_readable)
    loop.add_signal_handler(signal.SIGWINCH, cocore.sigwinch_handler)
    cocore.xmpp.start()
    loop.run_forever()
    # We reach this point only when loop.stop() is called
    try:
        cocore.reset_curses()
    except:
        pass
Esempio n. 4
0
def main():
    args = parse_args()

    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, "valid")

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    _, dataset = make_test_dataloader(cfg)

    total_size = len(dataset)
    pred_queue = Queue(100)
    workers = []
    for i in range(args.world_size):
        indices = list(range(i, total_size, args.world_size))
        p = Process(target=worker,
                    args=(i, dataset, indices, cfg, logger, final_output_dir,
                          pred_queue))
        p.start()
        workers.append(p)
        logger.info("==>" +
                    " Worker {} Started, responsible for {} images".format(
                        i, len(indices)))

    all_preds = []
    for idx in range(args.world_size):
        all_preds += pred_queue.get()

    for p in workers:
        p.join()

    res_folder = os.path.join(final_output_dir, "results")
    if not os.path.exists(res_folder):
        os.makedirs(res_folder)
    res_file = os.path.join(res_folder,
                            "keypoints_%s_results.json" % dataset.dataset)

    json.dump(all_preds, open(res_file, 'w'))

    info_str = dataset._do_python_keypoint_eval(res_file, res_folder)
    name_values = OrderedDict(info_str)

    if isinstance(name_values, list):
        for name_value in name_values:
            _print_name_value(logger, name_value, cfg.MODEL.NAME)
    else:
        _print_name_value(logger, name_values, cfg.MODEL.NAME)
def main():
    args = parse_args()
    # args.world_size = 8

    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(cfg, args.cfg, "inference")

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    img_dir = args.img_dir
    save_dir = args.save_dir
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    
    dataset = glob(os.path.join(img_dir, "*"))

    total_size = len(dataset)
    pred_queue = Queue(100)
    workers = []
    for i in range(args.world_size):
        sub_img_list = dataset[i::args.world_size]
        p = Process(
            target = worker,
            args = (
                i, sub_img_list, cfg, logger, final_output_dir, save_dir, pred_queue
            )
        )
        p.start()
        workers.append(p)
        logger.info("==>" + " Worker {} Started, responsible for {} images".format(i, len(sub_img_list)))
    
    all_preds = []
    for idx in range(args.world_size):
        all_preds += pred_queue.get()
    
    for p in workers:
        p.join()

    res_file = os.path.join(save_dir, "keypoints_results.json")

    json.dump(all_preds, open(res_file, 'w'))
Esempio n. 6
0
def set_engine(uri, user, password, host, port, name):
    global engine
    config.check_config()
    connection_string = ''.join([uri,
                                 user,
                                 ':',
                                 password,
                                 '@',
                                 host,
                                 ':',
                                 port,
                                 '/',
                                 name])
    engine = create_engine(connection_string, isolation_level="AUTOCOMMIT")
    Base.metadata.bind = engine
Esempio n. 7
0
def execute_swift_client(args):
    """

    Argument:
        args: argument object
    """
    (auth_url, username, password,
     rotate_limit, verify) = config.check_config(args.config)
    b = backup.Backup(auth_url, username, password, verify=verify)
    if args.list:
        # listing backup data
        backup_l = b.retrieve_backup_data_list(args.verbose)
        utils.list_data(backup_l)
    elif args.path:
        # backup data to swift
        b.backup(args.path)
    elif args.retrieve:
        # retrive backup data
        b.retrieve_backup_data(args.retrieve, args.output)
    elif args.delete:
        # delete backup data
        b.delete_backup_data(args.delete)
Esempio n. 8
0
def execute_swift_client(args):
    """

    Argument:
        args: argument object
    """
    (auth_url, username, password, rotate_limit,
     verify) = config.check_config(args.config)
    b = backup.Backup(auth_url, username, password, verify=verify)
    if args.list:
        # listing backup data
        backup_l = b.retrieve_backup_data_list(args.verbose)
        utils.list_data(backup_l)
    elif args.path:
        # backup data to swift
        b.backup(args.path)
    elif args.retrieve:
        # retrive backup data
        b.retrieve_backup_data(args.retrieve, args.output)
    elif args.delete:
        # delete backup data
        b.delete_backup_data(args.delete)
Esempio n. 9
0
LAUNCH_DIR = abspath(sys.path[0])
LIB_DIR = join(LAUNCH_DIR, '..', 'lib')
sys.path.insert(0, LIB_DIR)

from log import Logger
from mail import send_mail
from dpkg_control import DebianControlFile
from lockfile import LockFile
from config import check_config


config_file = "%s/debfactory/etc/debfactory.conf" % os.environ['HOME']
config = ConfigObj(config_file)

# Check for required configuration
check_config(config, ['sender_email'])

# Clean up all files older than 24h
CLEANUP_TIME = 24*3600

Log = Logger()

def check_incoming_dir():
    """
    Check the ftp incoming directory for release directories
    """
    global options
    file_list = glob.glob("%s/*" % options.input_dir)
    for file in file_list:
        if os.path.isdir(file):
            release = os.path.basename(file)
Esempio n. 10
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)
    # pose_dir = prepare_output_dirs(args.outputDir)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    dump_input = torch.rand(
        (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE))
    logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        pretrian_model_state = torch.load(model_state_file)

        for name, param in model.state_dict().items():

            model.state_dict()[name].copy_(pretrian_model_state['1.' + name])
        # model.load_state_dict(torch.load(model_state_file))

    # model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    # Input to the model
    # batch_size = 1
    x = torch.randn(1, 3, 256, 256, requires_grad=True)
    torch_out = model(x)

    # Export the model
    torch.onnx.export(
        model,  # model being run
        x,  # model input (or a tuple for multiple inputs)
        args.
        output_onnx,  # where to save the model (can be a file or file-like object)
        export_params=
        True,  # store the trained parameter weights inside the model file
        opset_version=11,  # the ONNX version to export the model to
        do_constant_folding=
        True,  # whether to execute constant folding for optimization
        input_names=['input'],  # the model's input names
        output_names=['output1']  # the model's output names
    )
Esempio n. 11
0
from os.path import join, dirname, exists, realpath, abspath
LAUNCH_DIR = abspath(sys.path[0])
LIB_DIR = join(LAUNCH_DIR, '..', 'lib')
sys.path.insert(0, LIB_DIR)

from log import Logger
from dpkg_control import DebianControlFile
from lockfile import LockFile
from config import check_config

config_file = "%s/debfactory/etc/debfactory.conf" % os.environ['HOME']
config = ConfigObj(config_file)

# Check for required configuration
check_config(config, ['sender_email'])

Log = Logger()


def extract_changelog(changes_file, component, pool_dir):
    """ 
    Extract the changelog according to the changes_file
    If handling a _source.changes, extract the real changelog
    If handling binary .changes just creat a link for each of the binaries
    """
    global config, options

    extract_dir = '/tmp/changelog_extract'
    control_file = DebianControlFile(changes_file)
    name = control_file['Source']
Esempio n. 12
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'test'
    )

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
            ]
        )
    else:
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
                torchvision.transforms.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]
                )
            ]
        )

    HMparser = HeatmapParser(cfg)  # ans, scores

    res_folder = os.path.join(args.outdir, 'results')
    if not os.path.exists(res_folder):
        os.makedirs(res_folder)
    video_name = args.video_path.split('/')[-1].split('.')[0]
    res_file = os.path.join(res_folder, '{}.json'.format(video_name))

    # read frames in video
    stream = cv2.VideoCapture(args.video_path)
    assert stream.isOpened(), 'Cannot capture source'


    # fourcc = int(stream.get(cv2.CAP_PROP_FOURCC))
    fps = stream.get(cv2.CAP_PROP_FPS)
    frameSize = (int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
                      int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    video_dir = os.path.join(args.outdir, 'video', args.data)
    if not os.path.exists(video_dir):
        os.makedirs(video_dir)

    image_dir=os.path.join(args.outdir, 'images', args.data)
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)

    if args.video_format == 'mp4':
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        video_path = os.path.join(video_dir, '{}.mp4'.format(video_name))
    else:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        video_path = os.path.join(video_dir, '{}.avi'.format(video_name))

    if args.save_video:
        out = cv2.VideoWriter(video_path, fourcc, fps, frameSize)

    num = 0
    annolist = []
    while (True):
        ret, image = stream.read()
        print("num:", num)

        if ret is False:
            break

        all_preds = []
        all_scores = []

        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR)
        )

        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR)
                )
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size
                )

                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags
                )

            final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
            tags = torch.cat(tags_list, dim=4)
            grouped, scores = HMparser.parse(
                final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE
            )

            final_results = get_final_preds(  # joints for all persons in a image
                grouped, center, scale,
                [final_heatmaps.size(3), final_heatmaps.size(2)]
            )

        image=draw_image(image, final_results, dataset=args.data)
        all_preds.append(final_results)
        all_scores.append(scores)

        img_id = num
        num += 1
        file_name = '{}.jpg'.format(str(img_id).zfill(6))
        annorect = person_result(all_preds, scores, img_id)

        annolist.append({
            'annorect': annorect,
            'ignore_regions': [],
            'image': [{'name': file_name}]
        })
        # print(annorect)

        if args.save_video:
            out.write(image)

        if args.save_img:
            img_path = os.path.join(image_dir, file_name)
            cv2.imwrite(img_path, image)

    final_results = {'annolist': annolist}
    with open(res_file, 'w') as f:
        json.dump(final_results, f)
    print('=> create test json finished!')

    # print('=> finished! you can check the output video on {}'.format(save_path))
    stream.release()
    # out.release()
    cv2.destroyAllWindows()
Esempio n. 13
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid'
    )

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(
            final_output_dir, 'model_best.pth.tar'
        )
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    #dump_input = torch.rand(
    #    (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE)
    #)
    #logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    data_loader, test_dataset = make_test_dataloader(cfg)

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
            ]
        )
    else:
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
                torchvision.transforms.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]
                )
            ]
        )

    parser = HeatmapRegParser(cfg)

    # for only kpts
    all_reg_preds = []
    all_reg_scores = []

    # for pred kpts and pred heat
    all_preds = []
    all_scores = []

    pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None
    for i, (images, joints, masks, areas) in enumerate(data_loader):
        assert 1 == images.size(0), 'Test batch size should be 1'

        image = images[0].cpu().numpy()
        joints = joints[0].cpu().numpy()
        mask = masks[0].cpu().numpy()
        area = areas[0].cpu().numpy()
        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, 1.0
        )

        with torch.no_grad():
            heatmap_fuse = 0
            final_heatmaps = None
            final_kpts = None
            input_size = cfg.DATASET.INPUT_SIZE

            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)):

                image_resized, joints_resized, _, center, scale = resize_align_multi_scale(
                    image, joints, mask, input_size, s, 1.0
                )

                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, kpts = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST
                )
                final_heatmaps, final_kpts = aggregate_results(
                    cfg, final_heatmaps, final_kpts, heatmaps, kpts
                )

            for heatmap in final_heatmaps:
                heatmap_fuse += up_interpolate(
                    heatmap,
                    size=(base_size[1], base_size[0]),
                    mode='bilinear'
                )
            heatmap_fuse = heatmap_fuse/float(len(final_heatmaps))

            # for only pred kpts
            grouped, scores = parser.parse(
                final_heatmaps, final_kpts, heatmap_fuse[0], use_heatmap=False
            )

            if len(scores) == 0:
                all_reg_preds.append([])
                all_reg_scores.append([])
            else:
                final_results = get_final_preds(
                    grouped, center, scale,
                    [heatmap_fuse.size(-1),heatmap_fuse.size(-2)]
                )
                if cfg.RESCORE.USE:
                    scores = rescore_valid(cfg, final_results, scores)
                all_reg_preds.append(final_results)
                all_reg_scores.append(scores)

            # for pred kpts and pred heatmaps
            grouped, scores = parser.parse(
                final_heatmaps, final_kpts, heatmap_fuse[0], use_heatmap=True
            )
            if len(scores) == 0:
                all_preds.append([])
                all_scores.append([])
                if cfg.TEST.LOG_PROGRESS:
                    pbar.update()
                continue

            final_results = get_final_preds(
                grouped, center, scale,
                [heatmap_fuse.size(-1),heatmap_fuse.size(-2)]
            )

            if cfg.RESCORE.USE:
                scores = rescore_valid(cfg, final_results, scores)

            all_preds.append(final_results)
            all_scores.append(scores)

        if cfg.TEST.LOG_PROGRESS:
            pbar.update()
    
    sv_all_preds = [all_reg_preds, all_preds]
    sv_all_scores = [all_reg_scores, all_scores]
    sv_all_name = ['regression', 'final']

    if cfg.TEST.LOG_PROGRESS:
        pbar.close()

    for i in range(len(sv_all_preds)):
        print('Testing '+sv_all_name[i])
        preds = sv_all_preds[i]
        scores = sv_all_scores[i]
        name_values, _ = test_dataset.evaluate(
            cfg, preds, scores, final_output_dir, sv_all_name[i]
        )

        if isinstance(name_values, list):
            for name_value in name_values:
                _print_name_value(logger, name_value, cfg.MODEL.NAME)
        else:
            _print_name_value(logger, name_values, cfg.MODEL.NAME)
from os.path import join, dirname, exists, realpath, abspath
LAUNCH_DIR = abspath(sys.path[0])
LIB_DIR = join(LAUNCH_DIR, '..', 'lib')
sys.path.insert(0, LIB_DIR)

from log import Logger
from mail import send_mail
from dpkg_control import DebianControlFile
from lockfile import LockFile
from config import check_config

config_file = "%s/debfactory/etc/debfactory.conf" % os.environ['HOME']
config = ConfigObj(config_file)

# Check for required configuration
check_config(config, ['sender_email', 'base_url'])

Log = Logger()


def check_pre_build_dir():
    """
    Check the pre build directory for release directories
    """
    global options
    file_list = glob.glob("%s/*" \
        % (options.input_dir))
    for file in file_list:
        if os.path.isdir(file):
            release = os.path.basename(file)
            check_release_dir(release)
Esempio n. 15
0
from os.path import join, dirname, exists, realpath, abspath
LAUNCH_DIR = abspath(sys.path[0])
LIB_DIR = join(LAUNCH_DIR, '..', 'lib')
sys.path.insert(0, LIB_DIR)

from log import Logger
from mail import send_mail
from dpkg_control import DebianControlFile
from lockfile import LockFile
from config import check_config

config_file = "%s/debfactory/etc/debfactory.conf" % os.environ['HOME']
config = ConfigObj(config_file)

# Check for required configuration
check_config(config, ['sender_email', 'base_url'])

Log = Logger()

def check_pre_build_dir():
    """
    Check the pre build directory for release directories
    """
    global options
    file_list = glob.glob("%s/*" \
        % (options.input_dir))
    for file in file_list:
        if os.path.isdir(file):
            release = os.path.basename(file)
            check_release_dir(release)
Esempio n. 16
0
import os
import sys
import menus
import config
import login
import functions

functions.clear()

config.check_config()
config.check_folders()
config.check_services()

if config.get_value("ftp", "enabled") == "true":
    print("Logging into FTP")
    login.ftp_login()

if config.get_value("dropbox", "enabled") == "true":
    print("Logging into Dropbox")
    login.dropbox_login()

if config.get_value("googledrive", "enabled") == "true":
    print("Logging into Google Drive")
    login.googledrive_login()

print("\n")

while True:
    menus.start_menu()
Esempio n. 17
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)
    pose_dir = prepare_output_dirs(args.outputDir)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    dump_input = torch.rand(
        (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE))
    logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        # model.load_state_dict(torch.load(model_state_file))
        pretrian_model_state = torch.load(model_state_file)

        for name, param in model.state_dict().items():

            model.state_dict()[name].copy_(pretrian_model_state['1.' + name])

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    # data_loader, test_dataset = make_test_dataloader(cfg)
    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])
    else:
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                             std=[0.229, 0.224, 0.225])
        ])

    parser = HeatmapParser(cfg)
    # Loading an video
    vidcap = cv2.VideoCapture(args.videoFile)
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    if fps < args.inferenceFps:
        print('desired inference fps is ' + str(args.inferenceFps) +
              ' but video fps is ' + str(fps))
        exit()
    skip_frame_cnt = round(fps / args.inferenceFps)
    frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    outcap = cv2.VideoWriter(
        '{}/{}_pose.avi'.format(
            args.outputDir,
            os.path.splitext(os.path.basename(args.videoFile))[0]),
        cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), int(skip_frame_cnt),
        (frame_width, frame_height))

    count = 0
    while vidcap.isOpened():
        total_now = time.time()
        ret, image_bgr = vidcap.read()
        count += 1

        if not ret:
            continue

        if count % skip_frame_cnt != 0:
            continue

        image_debug = image_bgr.copy()
        now = time.time()
        image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
        # image = image_rgb.cpu().numpy()
        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR))
        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR,
                                           reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR))
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size)

                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags)

            final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
            tags = torch.cat(tags_list, dim=4)
            grouped, scores = parser.parse(final_heatmaps, tags,
                                           cfg.TEST.ADJUST, cfg.TEST.REFINE)

            final_results = get_final_preds(
                grouped, center, scale,
                [final_heatmaps.size(3),
                 final_heatmaps.size(2)])
        for person_joints in final_results:
            for joint in person_joints:
                x, y = int(joint[0]), int(joint[1])
                cv2.circle(image_debug, (x, y), 4, (255, 0, 0), 2)
        then = time.time()
        print("Find person pose in: {} sec".format(then - now))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        img_file = os.path.join(pose_dir, 'pose_{:08d}.jpg'.format(count))
        cv2.imwrite(img_file, image_debug)
        outcap.write(image_debug)

    vidcap.release()
    outcap.release()
Esempio n. 18
0
        if post_content:
            post_text_nodes = post_content.select("p")
            post_text = [node.get_text() for node in post_text_nodes]
            if post_text:
                post_text_joined = " ".join(post_text)
                if test:
                    notification_trigger = test_mention(post_text_joined)
                else:
                    notification_trigger = is_ticket_mentioned(post_text_joined)
                if notification_trigger:
                    notify(author_name)


if __name__ == "__main__":

    if check_config():

        args = arg_parser.parse_args()
        driver = webdriver.Chrome()
        login_facebook()
        time.sleep(10)

        while True:
            parse_feed(test=args.test)
            time.sleep(random.randint(10, 30))
            simulate_activity()
            time.sleep(random.randint(30, 60))
            driver.refresh()
            time.sleep(10) # wait for page to reload
    else:
        print "Please fill out the config properties correctly!"
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid'
    )

    logger.info(pprint.pformat(args))

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(
            final_output_dir, 'model_best.pth.tar'
        )
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    data_loader, test_dataset = make_test_dataloader(cfg)

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
            ]
        )
    else:
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
                torchvision.transforms.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]
                )
            ]
        )

    parser = HeatmapParser(cfg)

    vid_file = 0 # Or video file path
    print("Opening Camera " + str(vid_file))
    cap = cv2.VideoCapture(vid_file)

    while True:
        ret, image = cap.read()

        a = datetime.datetime.now()

        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR)
        )

        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR)
                )
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size
                )

                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags
                )

            final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
            tags = torch.cat(tags_list, dim=4)
            grouped, scores = parser.parse(
                final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE
            )

            final_results = get_final_preds(
                grouped, center, scale,
                [final_heatmaps.size(3), final_heatmaps.size(2)]
            )

        b = datetime.datetime.now()
        inf_time = (b - a).total_seconds()*1000
        print("Inf time {} ms".format(inf_time))

        # Display the resulting frame
        for person in final_results:
            color = np.random.randint(0, 255, size=3)
            color = [int(i) for i in color]
            add_joints(image, person, color, test_dataset.name, cfg.TEST.DETECTION_THRESHOLD)

        image = cv2.putText(image, "{:.2f} ms / frame".format(inf_time), (40, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
        cv2.imshow('frame', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Esempio n. 20
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    print("Initilaized.")

    if cfg.TEST.MODEL_FILE:
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        raise Exception("No weight file. Would you like to test with your hammer?")

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    # data_loader, test_dataset = make_test_dataloader(cfg)

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
            ]
        )
    else:
        # 默认是用这种
        transforms = torchvision.transforms.Compose(
            [
                torchvision.transforms.ToTensor(),
                torchvision.transforms.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]
                )
            ]
        )

    parser = HeatmapParser(cfg)

    print("Load model successfully.")

    ENABLE_CAMERA = 1
    ENABLE_VIDEO = 1

    VIDEO_ROTATE = 0
    
    if ENABLE_CAMERA:
        # 读取视频流
        cap = cv2.VideoCapture(-1)
        ret, image = cap.read()
        x, y = image.shape[0:2]
        print((x, y))
        # 创建视频文件
        fourcc = cv2.VideoWriter_fourcc(*'I420')
        # fourcc = cv2.VideoWriter_fourcc(*'X264')
        out = cv2.VideoWriter('./result.avi', fourcc, 24, (y, x), True)
        while ret:
            ret, image = cap.read()
            if not ret:
                break
            # 实时视频自动禁用scale search
            base_size, center, scale = get_multi_scale_size(
                image, cfg.DATASET.INPUT_SIZE, 1.0, 1.0
            )
            with torch.no_grad():
                final_heatmaps = None
                tags_list = []
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, 1.0, 1.0
                )
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size
                )

                final_heatmaps, tags_list = aggregate_results(
                    cfg, 1.0, final_heatmaps, tags_list, heatmaps, tags
                )

                final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
                tags = torch.cat(tags_list, dim=4)
                grouped, scores = parser.parse(
                    final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE
                )

                final_results = get_final_preds(
                    grouped, center, scale,
                    [final_heatmaps.size(3), final_heatmaps.size(2)]
                )

            detection = save_demo_image(image, final_results, mode=1)

            detection = cv2.cvtColor(detection, cv2.COLOR_BGR2RGB)
            cv2.imshow("Pose Estimation", detection)
            out.write(detection)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        out.release()
        os.system("ffmpeg -i result.avi -c:v libx265 camera.mp4")
        cv2.destroyAllWindows()
    elif ENABLE_VIDEO:
        # 读取视频流
        video_name = "./videos/test04.mp4"
        cap = cv2.VideoCapture(video_name)
        # 创建视频文件 
        fourcc = cv2.VideoWriter_fourcc(*'I420')
        out = cv2.VideoWriter('./result.avi', fourcc, 24, (704, 576), True)
        while cap.isOpened():
            ret, image = cap.read()
            if not ret:
                break
            if VIDEO_ROTATE: # 仅适用于扔实心球
                image = cv2.resize(image, (960, 540)).transpose((1, 0, 2))
            # 实时视频自动禁用scale search
            base_size, center, scale = get_multi_scale_size(
                image, cfg.DATASET.INPUT_SIZE, 1.0, 1.0
            )
            with torch.no_grad():
                final_heatmaps = None
                tags_list = []
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, 1.0, 1.0
                )
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size
                )

                final_heatmaps, tags_list = aggregate_results(
                    cfg, 1.0, final_heatmaps, tags_list, heatmaps, tags
                )

                final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
                tags = torch.cat(tags_list, dim=4)
                grouped, scores = parser.parse(
                    final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE
                )

                final_results = get_final_preds(
                    grouped, center, scale,
                    [final_heatmaps.size(3), final_heatmaps.size(2)]
                )

            detection = save_demo_image(image, final_results, mode=1)

            detection = cv2.cvtColor(detection, cv2.COLOR_BGR2RGB)
            cv2.imshow("Pose Estimation", detection)
            out.write(detection)
            # print("frame")
            cv2.waitKey(1)

        cap.release()
        out.release()
        os.system("ffmpeg -i result.avi -c:v libx265 det04.mp4")
        cv2.destroyAllWindows()
    else:
        img_name = "./test.jpg"
        images = cv2.imread(img_name)
        image = images
        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR)
        )

        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            print(cfg.TEST.SCALE_FACTOR)
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR)
                )
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size
                )

                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags
                )

            final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
            tags = torch.cat(tags_list, dim=4)
            grouped, scores = parser.parse(
                final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE
            )

            final_results = get_final_preds(
                grouped, center, scale,
                [final_heatmaps.size(3), final_heatmaps.size(2)]
            )

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        save_demo_image(image, final_results, file_name="./result.jpg")
Esempio n. 21
0
from os.path import join, dirname, exists, realpath, abspath

LAUNCH_DIR = abspath(sys.path[0])
LIB_DIR = join(LAUNCH_DIR, "..", "lib")
sys.path.insert(0, LIB_DIR)

from log import Logger
from dpkg_control import DebianControlFile
from lockfile import LockFile
from config import check_config

config_file = "%s/debfactory/etc/debfactory.conf" % os.environ["HOME"]
config = ConfigObj(config_file)

# Check for required configuration
check_config(config, ["sender_email"])

Log = Logger()


def extract_changelog(changes_file, component, pool_dir):
    """ 
    Extract the changelog according to the changes_file
    If handling a _source.changes, extract the real changelog
    If handling binary .changes just creat a link for each of the binaries
    """
    global config, options

    extract_dir = "/tmp/changelog_extract"
    control_file = DebianControlFile(changes_file)
    name = control_file["Source"]
Esempio n. 22
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    dump_input = torch.rand(
        (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE))
    logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()
    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])
    else:
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                             std=[0.229, 0.224, 0.225])
        ])
    transforms_pre = torchvision.transforms.Compose([
        ToNumpy(),
    ])
    # iterate over all datasets
    datasets_root_path = "/media/jld/DATOS_JLD/datasets"
    datasets = ["cityscapes", "kitti", "tsinghua"]
    # testing sets from cityscapes and kitti does not have groundtruth --> processing not required
    datasplits = [["train", "val"], ["train"], ["train", "val", "test"]]
    keypoints_output_root_path = "/media/jld/DATOS_JLD/git-repos/paper-revista-keypoints/results"
    model_name = osp.basename(
        cfg.TEST.MODEL_FILE).split('.')[0]  # Model name + configuration
    for dsid, dataset in enumerate(datasets):
        dataset_root_path = osp.join(datasets_root_path, dataset)
        output_root_path = osp.join(keypoints_output_root_path, dataset)
        for datasplit in datasplits[dsid]:
            loggur.info(f"Processing split {datasplit} of {dataset}")
            input_img_dir = osp.join(dataset_root_path, datasplit)
            output_kps_json_dir = osp.join(output_root_path, datasplit,
                                           model_name)
            loggur.info(f"Input image dir: {input_img_dir}")
            loggur.info(f"Output pose JSON dir: {output_kps_json_dir}")
            # test_dataset = torchvision.datasets.ImageFolder("/media/jld/DATOS_JLD/git-repos/paper-revista-keypoints/test_images/", transform=transforms_pre)
            test_dataset = dsjld.BaseDataset(input_img_dir,
                                             output_kps_json_dir,
                                             transform=transforms_pre)
            test_dataset.generate_io_samples_pairs()
            # Stablish weight of keypoints scores (like openpifpaf in https://github.com/vita-epfl/openpifpaf/blob/master/openpifpaf/decoder/annotation.py#L44)
            n_keypoints = 17
            kps_score_weights = numpy.ones((17, ))
            kps_score_weights[:3] = 3.0
            # Normalize weights to sum 1
            kps_score_weights /= numpy.sum(kps_score_weights)
            data_loader = torch.utils.data.DataLoader(test_dataset,
                                                      batch_size=1,
                                                      shuffle=False,
                                                      num_workers=0,
                                                      pin_memory=False)
            parser = HeatmapParser(cfg)
            all_preds = []
            all_scores = []

            pbar = tqdm(
                total=len(test_dataset))  # if cfg.TEST.LOG_PROGRESS else None
            for i, (img, imgidx) in enumerate(data_loader):
                assert 1 == img.size(0), 'Test batch size should be 1'

                img = img[0].cpu().numpy()
                # size at scale 1.0
                base_size, center, scale = get_multi_scale_size(
                    img, cfg.DATASET.INPUT_SIZE, 1.0,
                    min(cfg.TEST.SCALE_FACTOR))

                with torch.no_grad():
                    final_heatmaps = None
                    tags_list = []
                    for idx, s in enumerate(
                            sorted(cfg.TEST.SCALE_FACTOR, reverse=True)):
                        input_size = cfg.DATASET.INPUT_SIZE
                        image_resized, center, scale = resize_align_multi_scale(
                            img, input_size, s, min(cfg.TEST.SCALE_FACTOR))
                        image_resized = transforms(image_resized)
                        image_resized = image_resized.unsqueeze(0).cuda()

                        outputs, heatmaps, tags = get_multi_stage_outputs(
                            cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                            cfg.TEST.PROJECT2IMAGE, base_size)

                        final_heatmaps, tags_list = aggregate_results(
                            cfg, s, final_heatmaps, tags_list, heatmaps, tags)

                    final_heatmaps = final_heatmaps / float(
                        len(cfg.TEST.SCALE_FACTOR))
                    tags = torch.cat(tags_list, dim=4)
                    grouped, scores = parser.parse(final_heatmaps, tags,
                                                   cfg.TEST.ADJUST,
                                                   cfg.TEST.REFINE)

                    final_results = get_final_preds(
                        grouped, center, scale,
                        [final_heatmaps.size(3),
                         final_heatmaps.size(2)])

                # if cfg.TEST.LOG_PROGRESS:
                pbar.update()
                # Save all keypoints in a JSON dict
                final_json_results = []
                for kps in final_results:
                    kpsdict = {}
                    x = kps[:, 0]
                    y = kps[:, 1]
                    kps_scores = kps[:, 2]
                    kpsdict['keypoints'] = kps[:, 0:3].tolist()
                    # bounding box by means of minmax approach (without zero elements)
                    xmin = numpy.float64(numpy.min(x[numpy.nonzero(x)]))
                    xmax = numpy.float64(numpy.max(x))
                    width = numpy.float64(xmax - xmin)
                    ymin = numpy.float64(numpy.min(y[numpy.nonzero(y)]))
                    ymax = numpy.float64(numpy.max(y))
                    height = numpy.float64(ymax - ymin)
                    kpsdict['bbox'] = [xmin, ymin, width, height]
                    # Calculate pose score as a weighted mean of keypoints scores
                    kpsdict['score'] = numpy.float64(
                        numpy.sum(kps_score_weights *
                                  numpy.sort(kps_scores)[::-1]))
                    final_json_results.append(kpsdict)

                with open(test_dataset.output_json_files_list[imgidx],
                          "w") as f:
                    json.dump(final_json_results, f)

                all_preds.append(final_results)
                all_scores.append(scores)

            if cfg.TEST.LOG_PROGRESS:
                pbar.close()
def evaluate(file,
             error_rates,
             error_mode,
             n_games,
             n_jobs,
             verbose,
             file_suffix='',
             transfer_to_distance=None):
    time_id = datetime.now()

    # Load the corresponding config files
    savedir = file[:file.rfind("/")]

    if not os.path.exists("%s/config.json" % savedir):
        raise ValueError("Configuration file does not exist (%s)." %
                         ("%s/config.json" % savedir))

    with open("%s/config.json" % savedir) as f:
        config = json.load(f)

    config = check_config(config)

    # Load the genome to be evaluated
    if not os.path.exists(file):
        raise ValueError("Genome file does not exist.")

    with open(file, "rb") as f:
        genome = pickle.load(f)

    if not os.path.exists("%s/population-config" % savedir):
        raise ValueError("Population configuration file does not exist.")

    population_config = neat.Config(neat.DefaultGenome,
                                    neat.DefaultReproduction,
                                    neat.DefaultSpeciesSet,
                                    neat.DefaultStagnation,
                                    "%s/population-config" % savedir)

    if config["Training"]["network_type"] == 'ffnn':
        if transfer_to_distance is None:
            net = SimpleFeedForwardNetwork.create(genome, population_config)
            code_distance = config["Physics"]["distance"]
        elif transfer_to_distance > config["Physics"]["distance"]:

            generate_config_file(savedir, config, transfer_to_distance)

            pop_config_transferred = neat.Config(
                neat.DefaultGenome, neat.DefaultReproduction,
                neat.DefaultSpeciesSet, neat.DefaultStagnation, savedir +
                "/population-config-temp-d" + str(transfer_to_distance))

            new_genome = pop_config_transferred.genome_type(0)
            new_genome.configure_new(pop_config_transferred.genome_config)
            new_genome.connections = {}
            new_genome.nodes = {}

            transplantate(pop_config_transferred.genome_config, new_genome,
                          transfer_to_distance, genome,
                          config["Physics"]["distance"])
            net = SimpleFeedForwardNetwork.create(new_genome,
                                                  pop_config_transferred)
            code_distance = transfer_to_distance

    elif config["Training"]["network_type"] == 'cppn':
        # HyperNEAT: possibility of evaluating a CPPN trained on d=3 data on d>3 data
        if transfer_to_distance is None:
            code_distance = config["Physics"]["distance"]
            connection_weight_scale = 1
        elif transfer_to_distance > config["Physics"]["distance"]:
            code_distance = transfer_to_distance
            # As they are more connections, in larger codes, we need to scale down the connection weight by this factor
            connection_weight_scale = config["Physics"][
                "distance"]**2 / transfer_to_distance**2
            #connection_weight_scale = 0.01
        else:
            raise ValueError(
                "Transfer knwoledge can only be done to higher distance codes."
            )

        if config["Training"]["substrate_type"] == 0:
            substrate = SubstrateType0(
                code_distance,
                config["Training"]["rotation_invariant_decoder"])
        elif config["Training"]["substrate_type"] == 1:
            substrate = SubstrateType1(code_distance)

        #print(code_distance, connection_weight_scale)
        cppn_network = FeedForwardNetwork.create(genome, population_config)
        net = PhenotypeNetwork.create(cppn_network, substrate,
                                      connection_weight_scale)

    # DIRTY: To ensure that samples are generated according to transfer_to_distance
    config["Physics"]["distance"] = code_distance

    ## (PARALLEL) EVALUATION LOOP
    fitness = []
    results = {
        "fitness": [],
        "error_rate": [],
        "outcome": [],
        "nsteps": [],
        "initial_qubits_flips": []
    }

    # with statement to close properly the parallel processes
    with Pool(n_jobs) as pool:
        # Game evaluation
        for error_rate in error_rates:
            fitness.append(0)

            jobs = []
            for i in range(n_games):
                #
                jobs.append(
                    pool.apply_async(get_fitness,
                                     (net, config, error_rate, error_mode)))

            for job in jobs:
                output, errors_id = job.get(timeout=None)

                fitness[-1] += output["fitness"]
                for k, v in output.items():
                    results[k].append(v)
                results["initial_qubits_flips"].append(errors_id)

            fitness[-1] /= n_games
            print("Evaluation on error_rate=%.2f is done, %.2f success." %
                  (error_rate, fitness[-1]))

        elapsed = datetime.now() - time_id
        print("Total running time:", elapsed.seconds, ":",
              elapsed.microseconds)

        # Always overwrite the result of evaluation
        # Synthesis report
        if transfer_to_distance is not None:
            file_suffix += ".transfered_distance%i" % transfer_to_distance

        savefile = "%s_evaluation.ngames=%i.errormode=%i.%s.csv" % (
            file.replace(".pkl", ""), n_games, error_mode, file_suffix)
        if os.path.exists(savefile):
            print("Deleting evaluation file %s" % savefile)
            os.remove(savefile)

        print([error_rates, fitness])
        df = pd.DataFrame(list(zip(error_rates, fitness)),
                          columns=["error_rate", "mean_fitness"])
        df.to_csv(savefile)

        # Detailed report
        savefile = "%s_detailed_results_evaluation.ngames=%i.%s.csv" % (
            file.replace(".pkl", ""), n_games, file_suffix)
        if os.path.exists(savefile):
            print("Deleting evaluation file %s" % savefile)
            os.remove(savefile)

        pd.DataFrame.from_dict(results).to_csv(savefile)

    return error_rates, fitness
Esempio n. 24
0
def main():

    # ---------- Your code here ---------- #
    async def trader_bot(data):
        """
        User trade function, fill in with trade logic.

        Recieves blockchain data from Token Analyst websocket.  

        
        """

        # EXAMPLE
        # - checks for outflow above threshold to Bitmex
        # - checks to see if we can make API and stay within rate limit
        # - makes limit buy order
        # - places order on Bitmex
        # - logs order and order reponse

        last_trade_price = bitmex.get_last_trade_price()

        outflow_threshold = 1000
        outflow_result = token_analyst.check_for_outflow(
            data=data, threshold=outflow_threshold)

        if outflow_result:
            rate_limit.check()

            price = int(last_trade_price - 100)
            my_order = trade.limit_buy(quantity=10, price=price)
            order_reponse = await bitmex.place_order(order=my_order)

            my_orders.append(order_reponse)
            order_logger.info("outflow trade - %s - response - %s" %
                              (my_order, order_reponse))

    # ----------------- end trader_bot ------------------- #

    logging.debug("---------------- New Start -------------------")
    order_logger.info("---------------- New Start -------------------")

    (TOKEN_ANALYST_API_KEY, BITMEX_API_KEY, BITMEX_API_SECRET,
     DEFAULT_BITMEX_SYMBOL, BITMEX_BASE_URL, BITMEX_WS_URL) = check_config()

    token_analyst = TokenAnalyst(key=TOKEN_ANALYST_API_KEY)

    bitmex = BitMEX(key=BITMEX_API_KEY,
                    secret=BITMEX_API_SECRET,
                    symbol=DEFAULT_BITMEX_SYMBOL,
                    base_url=BITMEX_BASE_URL,
                    ws_url=BITMEX_WS_URL)

    trade = Trade(symbol=DEFAULT_BITMEX_SYMBOL, orderIDPrefex="traderbot_")

    rate_limit = RateLimitTracker(limit=30, timeframe=60)

    my_orders = []

    # Below creates an event loop and 2 main tasks,
    # reading the Bitmex and Token Analyst websockets.
    #
    # Connecting to Bitmex gets us updates on
    # position, margin, order, wallet, execution and trade data.
    #
    # Connecting to the Token Analyst websocket
    # yields us on-chain data we can use to make trades.
    # When that data is recieved it is sent to the
    # trader_bot function above for us to act on.

    loop = asyncio.get_event_loop()

    async def token_analyst_ws_loop():
        """
        Connects to Token Analyst websocket, 
        recieves on-chain data and sends data to trader_bot.
        
        """
        async for data in token_analyst.connect(
                channel="btc_confirmed_exchange_flows"):
            if (data == None):
                continue
            else:
                await trader_bot(data)

    async def bitmex_ws_loop():
        """
        Connects to bitmex websocket to get updates on 
        position, margin, order, wallet, and trade data.
        
        """
        await bitmex.connect()

    try:
        loop.create_task(bitmex_ws_loop())
        loop.create_task(token_analyst_ws_loop())

        loop.run_forever()
    finally:
        loop.stop()
Esempio n. 25
0
            interval = 5
        # await asyncio.sleep(interval)
        time.sleep(interval)


def ask_exit(_sig_name):
    logging.warning('got signal {}: exit'.format(_sig_name))
    loop.stop()


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)-8s : %(message)s')
    logging.info('start...')
    read_config()
    check_config()
    cfg['record_id'] = get_record_id(cfg['domain'], cfg['sub_domain'])
    logging.info("get record_id: %s" % str(cfg['record_id']))
    logging.info("watching ip for ddns: %s.%s" %
                 (cfg['sub_domain'], cfg['domain']))

    loop = asyncio.get_event_loop()
    for sig_name in ('SIGINT', 'SIGTERM'):
        try:
            loop.add_signal_handler(getattr(signal, sig_name),
                                    functools.partial(ask_exit, sig_name))
        except NotImplementedError:
            pass  # 使兼容 WINDOWS
    try:
        loop.run_until_complete(main())
    except (KeyboardInterrupt, RuntimeError):
Esempio n. 26
0
def main():
    """
    Enter point
    """
    sys.stdout.write("\x1b]0;poezio\x07")
    sys.stdout.flush()
    import config
    config_path = config.check_create_config_dir()
    config.run_cmdline_args(config_path)
    config.create_global_config()
    config.check_create_log_dir()
    config.check_create_cache_dir()
    config.setup_logging()
    config.post_logging_setup()

    from config import options

    if options.check_config:
        config.check_config()
        sys.exit(0)

    import theming
    theming.update_themes_dir()

    import logger
    logger.create_logger()

    import roster
    roster.create_roster()

    import core

    log = logging.getLogger('')

    signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c
    cocore = singleton.Singleton(core.Core)
    signal.signal(signal.SIGUSR1, cocore.sigusr_handler) # reload the config
    signal.signal(signal.SIGHUP, cocore.exit_from_signal)
    signal.signal(signal.SIGTERM, cocore.exit_from_signal)
    if options.debug:
        cocore.debug = True
    cocore.start()

    from slixmpp.exceptions import IqError, IqTimeout
    def swallow_iqerrors(loop, context):
        """Do not log unhandled iq errors and timeouts"""
        if not isinstance(context['exception'], (IqError, IqTimeout)):
            loop.default_exception_handler(context)

    # Warning: asyncio must always be imported after the config. Otherwise
    # the asyncio logger will not follow our configuration and won't write
    # the tracebacks in the correct file, etc
    import asyncio
    loop = asyncio.get_event_loop()
    loop.set_exception_handler(swallow_iqerrors)

    loop.add_reader(sys.stdin, cocore.on_input_readable)
    loop.add_signal_handler(signal.SIGWINCH, cocore.sigwinch_handler)
    cocore.xmpp.start()
    loop.run_forever()
    # We reach this point only when loop.stop() is called
    try:
        cocore.reset_curses()
    except:
        pass
Esempio n. 27
0
    parser = argparse.ArgumentParser()
    parser.add_argument("--config",
                        default="coco_hrnet_w32_512.yaml",
                        help="config file (.yaml)")
    parser.add_argument(
        "--image",
        default=
        "https://miro.medium.com/max/1200/1*56MtNM2fh_mdG3iGnD7_ZQ.jpeg",
        help="pass any image URL")
    parser.add_argument('--model_path',
                        default="weights/pose_higher_hrnet_w32_512.pth",
                        help="path to model file")
    args = parser.parse_args()

    update_config(cfg, args)
    check_config(cfg)

    print("-" * 70)
    print(":: Loading the model")
    model = PoseHigherResolutionNet(cfg)
    map_location = "cpu"
    if CUDA_IS_AVAILABLE:
        model = model.cuda()
        model = nn.DataParallel(model)
        map_location = "cuda:0"
    state_dict = torch.load(args.model_path, map_location=map_location)
    model.load_state_dict(state_dict)
    model.eval()

    # define the image transformations to apply to each image
    image_transformations = transforms.Compose([
Esempio n. 28
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    dump_input = torch.rand(
        (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE))
    logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    test_dataset = HIEDataset(DATA_PATH)
    data_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=False)

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])
    else:
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                # mean=[0.485, 0.456, 0.406],
                # std=[0.229, 0.224, 0.225]
                mean=[0.5, 0.5, 0.5],
                std=[0.5, 0.5, 0.5])
        ])

    parser = HeatmapParser(cfg)
    all_preds = []
    all_scores = []

    pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None
    for i, images in enumerate(data_loader):
        # for i, (images, annos) in enumerate(data_loader):
        assert 1 == images.size(0), 'Test batch size should be 1'

        image = images[0].cpu().numpy()
        # size at scale 1.0
        if (i % 100 == 0):
            print("Start process images %d" % i)
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR))
        # print("Multi-scale end")

        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR,
                                           reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR))
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size)

                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags)

            final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
            tags = torch.cat(tags_list, dim=4)
            grouped, scores = parser.parse(final_heatmaps, tags,
                                           cfg.TEST.ADJUST, cfg.TEST.REFINE)

            final_results = get_final_preds(
                grouped, center, scale,
                [final_heatmaps.size(3),
                 final_heatmaps.size(2)])

        if cfg.TEST.LOG_PROGRESS:
            pbar.update()

        if i % cfg.PRINT_FREQ == 0:
            prefix = '{}_{}'.format(
                os.path.join(final_output_dir, 'result_valid'), i)
            # logger.info('=> write {}'.format(prefix))
            # save_valid_image(image, final_results, '{}.jpg'.format(prefix),         dataset=test_dataset.name)
            # save_valid_image(image, final_results, '{}.jpg'.format(prefix),dataset='HIE20')
            # save_debug_images(cfg, image_resized, None, None, outputs, prefix)

        all_preds.append(final_results)
        all_scores.append(scores)

    if cfg.TEST.LOG_PROGRESS:
        pbar.close()

    # save preds and scores as json
    test_dataset.save_json(all_preds, all_scores)
    print('Save finished!')
Esempio n. 29
0
def downloadQuery(queryString, configFileName, pcdbFileName, outputDir, download_division_id):
    pcdbFileName = helper.removeQuotes(pcdbFileName)
    outputDir = helper.removeQuotes(outputDir)    
    
    # Check that the first three arguments specify valid paths.
    if not os.path.exists(pcdbFileName):
        print('ERROR: could not find PCDB file "' + pcdbFileName + '"')
        sys.exit(1)
    if not os.path.exists(outputDir):
        print('ERROR: could not find output directory "' + outputDir + '"')
        sys.exit(1)
    
    if not os.path.exists(configFileName):
        print('ERROR: could not find config file "' + configFileName + '"')
        sys.exit(1)
    else:
        # Read the config file.
        configFile = open(configFileName, 'r')
    
        for line in configFile:
            line = line.strip()
            if len(line) == 0:
                continue
            
            # Skip over comments.
            if line[0] == '#':
                continue
            
            splitLine = line.split(':')
            if len(splitLine) != 2:
                continue
            
            tag = splitLine[0].strip()
            value = splitLine[1].strip()
            
            if len(tag) == 0 or len(value) == 0:
                continue
            
            if tag == 'startTime':
                config.startTime = int(value)
                print('startTime read from config file...')
            elif tag == 'endTime':
                config.endTime = int(value)
                print('endTime read from config file...')
            elif tag == 'numSearchDivisions':
                config.numSearchDivisions = int(value)
                print('numSearchDivisions read from config file...')
            elif tag == 'numSearchThreads':
                config.numSearchThreads = int(value)
                print('numSearchThreads read from config file...')
            elif tag == 'thumbnailSize':
                config.thumbnailSize = int(value)
                print('thumbnailSize read from config file...')
            elif tag == 'imagesPerFolder':
                config.imagesPerFolder = int(value)
                print('imagesPerFolder read from config file...')
            elif tag == 'getExifEnabled':
                config.getExifEnabled = (value.lower() == 'true')
                print('getExifEnabled read from config file...')
            elif tag == 'createThumbnailEnabled':
                config.createThumbnailEnabled = (value.lower() == 'true')
                print('createThumbnailEnabled read from config file...')
            elif tag == 'computeFocalEnabled':
                config.computeFocalEnabled = (value.lower() == 'true')
                print('computeFocalEnabled read from config file...')
            elif tag == 'maxImages':
                config.maxImages = int(value)
            
        configFile.close()
        config.check_config()
   
    
    if len(queryString) == 0:
        print('ERROR: no query string found in query file')
        sys.exit(1)
    
    basePathName = outputDir + '/' + helper.replaceSpaces(queryString)
    metaPath = basePathName + config.metaExtension
    completedPath = basePathName + config.completedExtension
    logPath = basePathName + config.logExtension
    indicesPath = logPath + '/' + config.indicesFolderName
    
    # Create the meta/log/completed folders if the don't already exist.
    if not os.path.exists(metaPath):
        os.mkdir(metaPath)
    if not os.path.exists(completedPath):
        os.mkdir(completedPath)
    if not os.path.exists(logPath):
        os.mkdir(logPath)
    if not os.path.exists(indicesPath):
        os.mkdir(indicesPath)
    
    pcdb = PCDB(pcdbFileName)
    
    settingsFileName = logPath + '/settings.txt'
    
    # Check to see if existing settings have been written to file.
    if os.path.exists(settingsFileName):
        settingsFile = open(settingsFileName, 'r')
        
        # Iterate through the existing settings and check to see if any have changed.
        settingsChanged = False
        pcdbChanged = False
        for line in settingsFile:
            line = line.strip()
            if len(line) == 0:
                continue
            
            colon = line.find(':')
            if colon == -1:
                continue
            
            tag = line[:colon].strip()
            value = line[colon+1:].strip()
            
            if len(tag) == 0 or len(value) == 0:
                continue
            
            if tag == 'queryString':
                if value != queryString:
                    print('')
                    print('WARNING: queryString value has changed!')
                    settingsChanged = True
            elif tag == 'startTime':
                if int(value) != config.startTime:
                    print('')
                    print('WARNING: startTime value has changed!')
                    settingsChanged = True
            elif tag == 'endTime':
                if int(value) != config.endTime:
                    print('')
                    print('WARNING: endTime value has changed!')
                    settingsChanged = True
            elif tag == 'numSearchDivisions':
                if int(value) != config.numSearchDivisions:
                    print('')
                    print('WARNING: numSearchDivisions value has changed!')
                    settingsChanged = True
            elif tag == 'thumbnailSize':
                if int(value) != config.thumbnailSize:
                    print('')
                    print('WARNING: thumbnailSize value has changed!')
                    settingsChanged = True
            elif tag == 'imagesPerFolder':
                if int(value) != config.imagesPerFolder:
                    print('')
                    print('WARNING: imagesPerFolder value has changed!')
                    settingsChanged = True
            elif tag == 'getExifEnabled':
                if value != str(config.getExifEnabled):
                    print('')
                    print('WARNING: getExifEnabled value has changed!')
                    settingsChanged = True
            elif tag == 'createThumbnailEnabled':
                if value != str(config.createThumbnailEnabled):
                    print('')
                    print('WARNING: createThumbnailEnabled value has changed!')
                    settingsChanged = True
            elif tag == 'computeFocalEnabled':
                if value != str(config.computeFocalEnabled):
                    print('')
                    print('WARNING: computeFocalEnabled value has changed!')
                    settingsChanged = True
            elif tag == 'pcdb' and not pcdbChanged:
                dot = value.find('.')
                dirName = value[:dot]
                value = value[dot+1:]
                
                equal = value.find('=')
                varName = value[:equal]
                value = value[equal+1:]
                
                if json.loads(value) != getattr(pcdb.dirs[dirName], varName):
                    print('')
                    print('WARNING: pcdb has changed!')
                    settingsChanged = True
                    pcdbChanged = True
        
        settingsFile.close()
        
        if settingsChanged:
            print('Continue? (y/n)')
            answer = input().strip().lower()
            if answer not in ('y', 'yes'):
                sys.exit(0)
            print('')
    
    # Write the current settings to file.
    settingsFile = open(settingsFileName, 'w')
    settingsFile.write('queryString: ' + queryString + '\n')
    settingsFile.write('startTime: ' + str(config.startTime) + '\n')
    settingsFile.write('endTime: ' + str(config.endTime) + '\n')
    settingsFile.write('numSearchDivisions: ' + str(config.numSearchDivisions) + '\n')
    settingsFile.write('thumbnailSize: ' + str(config.thumbnailSize) + '\n')
    settingsFile.write('imagesPerFolder: ' + str(config.imagesPerFolder) + '\n')
    settingsFile.write('getExifEnabled: ' + str(config.getExifEnabled) + '\n')
    settingsFile.write('createThumbnailEnabled: ' + str(config.createThumbnailEnabled) + '\n')
    settingsFile.write('computeFocalEnabled: ' + str(config.computeFocalEnabled) + '\n')
    for key in pcdb.dirs:
        settingsFile.write('pcdb: ' + key + '.extension=' + json.dumps(pcdb.dirs[key].extension) + '\n')
        settingsFile.write('pcdb: ' + key + '.divisions=' + json.dumps(pcdb.dirs[key].divisions) + '\n')
        settingsFile.write('pcdb: ' + key + '.folders=' + json.dumps(pcdb.dirs[key].folders) + '\n')
    settingsFile.close()
    
    print('Query String: ' + queryString)
    print('')
    
    pcdb.checkDirExistence(PCDB.imageKey)
    if config.createThumbnailEnabled:
        pcdb.checkDirExistence(PCDB.thumbnailKey)
    
    # Create the folder structures for the image and thumbnail downloads.
    print('Creating folders...')
    numImageFoldersCreated = pcdb.createFolders(PCDB.imageKey)
    print(str(numImageFoldersCreated) + ' image folders created')
    numImageFoldersCreated = pcdb.createFolders(PCDB.meta)
    print(str(numImageFoldersCreated) + ' meta folders created')
    if config.createThumbnailEnabled:
        numThumbnailFoldersCreated = pcdb.createFolders(PCDB.thumbnailKey)
        print(str(numThumbnailFoldersCreated) + ' thumbnail folders created')
    print('')
    
    maxImageIndex = pcdb.getNumFolders(PCDB.imageKey)
    if config.createThumbnailEnabled:
        maxImageIndex = min(maxImageIndex, pcdb.getNumFolders(PCDB.thumbnailKey))
    maxImageIndex *= config.imagesPerFolder
    FlickrDownloaderThread.globalMaxImageIndex = maxImageIndex
    print('Maximum image index: ' + str(FlickrDownloaderThread.globalMaxImageIndex))
    print('')

    if (config.maxImages != -1):
        print('Set max images as ' + str(config.maxImages))
        FlickrDownloaderThread.globalMaxImageIndex = config.maxImages

    global_image_index = computeStartingIndex(indicesPath)
    if global_image_index >= FlickrDownloaderThread.globalMaxImageIndex:
        print('MAXIMUM IMAGE INDEX REACHED')
        sys.exit(0)
        
    try:
        next_search_thread_id = download_division_id * config.numThreadPerDivisions;
        activeThreads = [None] * config.numSearchThreads

        FlickrDownloaderThread.globalImageIndex = global_image_index
        print('Starting at image index: ' + str(FlickrDownloaderThread.globalImageIndex))

        for i in range(0, config.numSearchThreads):
            processNum = next_search_thread_id
            next_search_thread_id = next_search_thread_id + 1;
            print('Starting thread: ' + str(processNum))
            print('')
            activeThreads[i] = createThread(processNum, queryString, pcdbFileName, outputDir)
        
        for i in range(0, config.numSearchThreads):
            activeThreads[i].start()
            time.sleep(1)

    
        numCompleted = 0
        numStarted = config.numSearchThreads
        # While all of the processes have not finished
        while numCompleted < config.numThreadPerDivisions:
            # Iterate over the active processes
            for processNum in range(0, config.numSearchThreads):
                # If a process actually exists
                if activeThreads[processNum] != None:
                    # If the process is finished
                    if not activeThreads[processNum].is_alive():
                        numCompleted += 1
                        activeThreads[processNum] = None
                        if numStarted < config.numSearchDivisions:
                          print('Starting thread: ' + str(numStarted))
                          print('')
                          activeThreads[processNum] = createThread(next_search_thread_id, queryString, pcdbFileName, outputDir)
                          activeThreads[processNum].start()
                          next_search_thread_id = next_search_thread_id + 1
                          numStarted += 1
                          time.sleep(1)
                    
            time.sleep(5)
            index = activeThreads[0].getImageIndex()
            if index >= FlickrDownloaderThread.globalMaxImageIndex:
                print('')
                print('MAXIMUM IMAGE INDEX REACHED')
                print('Stopping threads...')
                waitForThreads(activeThreads, 2)
                stopThreads(activeThreads)
                break
                
    except KeyboardInterrupt:
        print('')
        print('KEYBOARD INTERRUPT')
        print('Stopping threads...')
        print('')
        stopThreads(activeThreads)
        subprocess.call([config.PythonCommand,
                         config.CreateImageListPath,
                         completedPath])
        sys.exit(1)
    
    subprocess.call([config.PythonCommand,
                     config.CreateImageListPath,
                     completedPath])
Esempio n. 30
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    dump_input = torch.rand(
        (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE))
    logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    data_loader, test_dataset = make_test_dataloader(cfg)

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])
    else:
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                             std=[0.229, 0.224, 0.225])
        ])

    parser = HeatmapParser(cfg)
    all_preds = []
    all_scores = []

    # pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None
    pbar = tqdm(total=len(test_dataset))
    for i, (images, annos) in enumerate(data_loader):
        assert 1 == images.size(0), 'Test batch size should be 1'

        image = images[0].cpu().numpy()
        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR))

        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR,
                                           reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR))
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size)

                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags)

            final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))
            tags = torch.cat(tags_list, dim=4)
            grouped, scores = parser.parse(final_heatmaps, tags,
                                           cfg.TEST.ADJUST, cfg.TEST.REFINE)

            final_results = get_final_preds(
                grouped, center, scale,
                [final_heatmaps.size(3),
                 final_heatmaps.size(2)])
            if cfg.RESCORE.USE:
                try:
                    scores = rescore_valid(cfg, final_results, scores)
                except:
                    print("got one.")
        # if cfg.TEST.LOG_PROGRESS:
        #     pbar.update()
        pbar.update()

        if i % cfg.PRINT_FREQ == 0:
            prefix = '{}_{}'.format(
                os.path.join(final_output_dir, 'result_valid'), i)
            # logger.info('=> write {}'.format(prefix))
            save_valid_image(image,
                             final_results,
                             '{}.jpg'.format(prefix),
                             dataset=test_dataset.name)
            # for scale_idx in range(len(outputs)):
            #     prefix_scale = prefix + '_output_{}'.format(
            #         # cfg.DATASET.OUTPUT_SIZE[scale_idx]
            #         scale_idx
            #     )
            #     save_debug_images(
            #         cfg, images, None, None,
            #         outputs[scale_idx], prefix_scale
            #     )
        all_preds.append(final_results)
        all_scores.append(scores)

    if cfg.TEST.LOG_PROGRESS:
        pbar.close()

    name_values, _ = test_dataset.evaluate(cfg, all_preds, all_scores,
                                           final_output_dir)

    if isinstance(name_values, list):
        for name_value in name_values:
            _print_name_value(logger, name_value, cfg.MODEL.NAME)
    else:
        _print_name_value(logger, name_values, cfg.MODEL.NAME)
Esempio n. 31
0
def main():
    args = get_args()

    # process the arguments
    cDNA = args.cDNA
    genome = args.genome
    threads = args.threads
    if args.transcriptome:
        trans_mode = True
    else:
        trans_mode = False
    if args.db_dir:
        dbDflag = True
        dbDir = args.db_dir
    else:
        dbDflag = False
        dbDir = ''.join([os.getcwd(), '/', args.prefix, '-gmap-index-dir'])
    if args.db_name:
        dbNflag = True
        dbName = args.db_name
    else:
        dbNflag = False
        dbName = '-'.join([args.prefix, 'gmap-index'])
    if args.genetic_map:
        check_gm = True
        gmfile = args.genetic_map
    else:
        check_gm = False
    if args.identity < 1:
        ident_thold = args.identity
    else:
        ident_thold = float(args.identity) / 100.0
    if args.coverage < 1:
        cov_thold = args.coverage
    else:
        cov_thold = float(args.coverage) / 100.0

    # check what needs to be done
    checkU, checkM, checkD = preflight(args.prefix, 'pre')

    # load the alignments if pre-existing ones were found
    if checkU and checkM and checkD:
        uniqDat, duplDat, tlocDat = load_data(checkU, checkM, checkD,
                                              args.prefix)
    # if not, then proceed with alignments
    else:
        # generate the gmap config file if it doesn't exist yet
        if config.check_config():
            pass  # found gmap, don't panic
        else:
            print ''.join([
                'ERROR: Failed to locate gmap binaries. Please install',
                ' or specify path in gmap_config.txt manually.\n',
                'e.g. export PATH=/home/myuser/gmap/bin:$PATH'
            ])
            sys.exit(1)
        # run the alignments
        alignment.run_gmap(args.prefix, dbDir, dbDflag, dbName, dbNflag,
                           threads, cDNA, genome, trans_mode)
        # re-check what alignments we have
        checkU, checkM, checkD = preflight(args.prefix)

    # setup results dict
    cDNA_dict = {
        'Complete': [],
        'Duplicated': [],
        'Partial': [],
        'Fragmented': [],
        'Poorly mapped': []
    }

    # load the fresh alignments
    uniqDat, duplDat, tlocDat = load_data(checkU, checkM, checkD, args.prefix)

    # run assessment
    print '\n=== Evaluating alignments ==='
    cDNA_res = assess(checkU, checkM, checkD, uniqDat, tlocDat, duplDat,
                      cDNA_dict, ident_thold, cov_thold)
    print 'Done!'
    print util.report_time()

    # count total number of query sequences
    check_missing = reporting.find_missing(cDNA, cDNA_res)
    TOT = check_missing[1]
    cDNA_res['Missing'] = check_missing[0]

    # load genetic map data
    if check_gm:
        if not checkU:
            print ''.join([
                'WARNING: There were no uniquely-aligned cDNAs',
                ' detected, so the genetic map analysis will',
                ' not be performed'
            ])
            sys.exit(2)
        else:
            mapDat, uMap, uniqDatMap_select = load_gm(gmfile, uniqDat,
                                                      cDNA_res)
            # belatedly output the cDNA results with GM info
            reporting.output_cDNA(args.prefix, cDNA_res, mapDat)
            reporting.report_cDNA(args.prefix, cDNA_res, TOT)
        # check if there's anything to work with
        if len(uniqDatMap_select) == 0:
            print 'ERROR: There are no cDNAs from the genetic map to evaluate.'
            print ''.join([
                'This can happen if the cDNA sequence IDs do not match those',
                ' in the genetic map.'
            ])
            sys.exit(2)
        else:
            gmres = assess_gm(uMap, mapDat)
            reporting.output_gm(args.prefix, gmres)
            gm_cdna_stat = reporting.report_gm_cDNA(gmres, uniqDatMap_select,
                                                    args.prefix)  # per cDNA
            reporting.report_gm(uniqDatMap_select, gmres, gm_cdna_stat,
                                args.prefix)  # per scaffold

            # output updated genetic map
            gnavOut = '-'.join([args.prefix, 'full-cDNA-results-table.tsv'])
            uniqF = '.'.join([args.prefix, 'uniq'])
            duplF = '.'.join([args.prefix, 'mult'])
            tlocF = '.'.join([args.prefix, 'transloc'])
            parseGmap.wrapper(gnavOut, uniqF, duplF, tlocF, gmfile,
                              args.prefix)


# if no genetic map data, write out the cDNA results
    else:
        # belatedly output the cDNA results without GM info
        reporting.output_cDNA(args.prefix, cDNA_res)
        reporting.report_cDNA(args.prefix, cDNA_res, TOT)