Example #1
0
def user_menu(session, directory):
    """ Show the menu in the first tmux window.
    Retrieve the information about the devices to use for the analysis
    and the folder containig the apps to analyze."""
    devices = {}
    uManagers = []

    logger = util.init_logger(config.configFile, 'Manager')
    logger.info('BareDroid init')
    devices = initDevice(logger)

    windows = divide_et_impera(devices, session)

    #watch log and errors
    logFolder = os.path.join(os.getcwd(),\
                    util.read_config(config.configFile, 'Log', 'folder'))
    windows[1].list_panes()[0].send_keys('tail -F %smanager.log'%logFolder)
    windows[1].list_panes()[1].send_keys('tail -F %smanager_error.log'%logFolder)

    ans=True
    while ans:
        print '-----------------'
        print ("""
    1) List devices
    2) Start Analysis
    3) Stop Analysis
    4) Select Devices
    q) Quit
    m) Menu
        """)
        print ''
        ans=raw_input(">> ")
        if ans=="1":
            listDevices(config.deviceInfoFile, logger)
        elif ans=="2":
            folder = createFolder(logger)
            #initialize
            uManagers = initAnalysis(directory,\
                int(util.read_config(config.configFile,'Analysis', 'apps')),\
                logger, devices)
            #start
            startAnalysis(uManagers, folder, windows, logger)
        elif ans=="3":
            stopAnalysis(uManagers)
        elif ans=="4":
            #TODO select devices
            print 'TODO'
        elif ans=='m' or ans == 'M':
            print ''
        elif ans=='q' or ans == 'Q':
            check(directory, logger)
            logger.info('exit')
            #print print_on_window(windows[0], '\nExit')
            quit()
        elif ans !="":
            print("\n Not valid, try again...")
            ans = True
        else:
            print("\n Not valid, try again...")
            ans = True
Example #2
0
 def watch_update(self):
     force, new_version, update_fn, fn_md5 = unpackb(self.sock.recv(), encoding='utf-8')
     cur_version = read_config()['version']
     if force == 0 and new_version == cur_version:
         log.logger.info('current version is the latest version')
         return
     retry = 5
     index = 1
     while index <= retry:
         # wget update package and check md5
         ##
         ##
         try:
             shutil.copy(update_fn, os.path.join(prog_dir(), '..'))
             log.logger.info('copy update file done')
         except Exception, e:
             log.logger.error(e)
             index += 1
             continue
         if fn_md5 != get_file_md5(os.path.join(prog_dir(), '..', os.path.basename(update_fn))):
             log.logger.error("file md5 no match")
             index += 1
             continue
         else:
             break
Example #3
0
def generate_goal_states():
    config = read_config("config.yaml")
    agent_config = config['Agent']
    network_config = agent_config['Network']
    agent = DCAAgent(agent_config, network_config)
    env = DCAEnv()

    new_goal_states=[]
    new_goal_path=[]

    start_time=time.time()
    print('Start generating new goal states (5 steps away)')
    for T in range(9, 10):
        n_games = 10000
        num_game = 1
        while num_game <= n_games:
            temp, _ = agent.collect_data(env, T)
            new_goal_state=temp[0][-1].tolist()
            if new_goal_state not in new_goal_states:
                new_goal_states.append(new_goal_state)
                new_goal_path.append(temp[0])
            num_game = num_game + 1
    print('Finish generating new goal states (5 steps away) @ %s'%(time.time()-start_time))
    print('# of new goal states is %s'%(len(new_goal_states)))
    return new_goal_states,new_goal_path
Example #4
0
 def main(self):
     self.setup_basic_logging()
     parser = self.create_parser()
     (options, args) = parser.parse_args()
     cfg = read_config(options.config)
     self.setup_logging(cfg)
     self.handle_args(parser, cfg, options, args)
Example #5
0
def launch_lab(conn, user_vpc, lab):
    """Execute a lab configuration"""

    path = LAB_DIR + lab + '/scripts/'
    prompt, answer = _prompt_config(lab, path)

    # import lab configs
    labmod = imp.load_source('labmod', path + prompt[answer] + '.py')
    labmod.pre_process()
    cfg = util.read_config(LAB_DIR + lab + '/instances.cfg')

    # prompt for any dynamic configuration options
    for instance in cfg['instance']:
        for k, v in instance.iteritems():
            if str(v).startswith('PROMPT:'):
                instance[k] = raw_input('{0}: '.format(v.split(':')[1]))
            if str(v).startswith('PROMPT#:'):
                instance[k] = int(raw_input('{0}: '.format(v.split(':')[1])))
        for device in instance['device']:
            for k, v in device.iteritems():
                if str(v).startswith('PROMPT:'):
                    device[k] = raw_input('{0}: '.format(v.split(':')[1]))
                if str(v).startswith('PROMPT#:'):
                    device[k] = int(raw_input('{0}: '.format(v.split(':')[1])))

    # connection and required info
    security_groups = conn.get_all_security_groups(
        filters={'vpc-id': user_vpc.id})
    subnets = conn.get_all_subnets(filters={'vpc-id': user_vpc.id})

    # launch
    inst.launch_instances(conn, user_vpc, prompt[answer], lab, labmod, cfg,
                          security_groups, subnets)
    labmod.post_process()
Example #6
0
def main():
    # Read config file
    cfg = util.read_config('config/digit.yaml')

    # Load digit data from dataset
    x_train, x_test, y_train, y_test = load_data(cfg['dataset'])
    x_train, y_train = util.shuffle_data(x_train, y_train)
    x_test, y_test = util.shuffle_data(x_test, y_test)

    # Default model name as loaded from file, overwritten if training
    model_name = cfg['nn']['model_name']
    model_dir = cfg['nn']['model_dir']

    with tf.Session() as sess:
        if cfg['nn']['train']:
            # Train network on our training data
            print('[ANN] Training new network...')
            model, model_name = train_network(sess, x_train, y_train, cfg)
        else:
            print('[ANN] Testing network {0}...'.format(model_name))
            model = util.load_model(
                os.path.join(model_dir, model_name + "_model"))

        # Test network on our testing data
        results = test_network(sess, model, x_test, y_test, cfg)

        # TODO: Tristan to reimplement analyse results to get confusion matrix and roc curve
        conf_mat = {}
        # conf_mat = util.analyse_results(y_test, results)
        util.store_results(conf_mat, os.path.join(model_dir,
                                                  model_name + "_cm"))
Example #7
0
def main():
    logger.info("Starting google music uploader")
    pidfile = None
    if len(sys.argv) > 1:
        if sys.argv[1] == "--pidfile":
            if len(sys.argv) < 3:
                logger.error("Missing pidfile path")
                return
            pidfile = sys.argv[2]

    if pidfile:
        if not util.make_sure_path_exists(os.path.dirname(pidfile)):
            logger.warning("Error creating pidfile directory %s" % os.path.dirname(pidfile))
            return
        with open(pidfile, "w+") as f:
            logger.debug("Writing pidfile to %s" % pidfile)
            f.write(str(os.getpid()))
    config = util.read_config(DirInfo.AppConfig)

    session_opts = {
        'session.type': 'memory',
        'session.auto': 'true'
    }
    app = SessionMiddleware(bottle.app(), session_opts)

    run(app=app, host='0.0.0.0', port=config['PORT'], debug=True)
Example #8
0
def launch_lab(conn, user_vpc, lab):
    """Execute a lab configuration"""

    path = LAB_DIR + lab + '/scripts/'
    response = _prompt_config(lab, path)

    # import lab configs
    labmod = imp.load_source('labmod', path + response + '.py')
    labmod.pre_process()
    cfg = util.read_config(LAB_DIR + lab + '/instances.cfg')

    # prompt for any dynamic configuration options
    for instance in cfg['instance']:
        for k, v in instance.iteritems():
            if str(v).startswith('PROMPT:'):
                instance[k] = raw_input('{0}: '.format(v.split(':')[1]))
            if str(v).startswith('PROMPT#:'):
                instance[k] = int(raw_input('{0}: '.format(v.split(':')[1])))
        for device in instance['device']:
            for k, v in device.iteritems():
                if str(v).startswith('PROMPT:'):
                    device[k] = raw_input('{0}: '.format(v.split(':')[1]))
                if str(v).startswith('PROMPT#:'):
                    device[k] = int(raw_input('{0}: '.format(v.split(':')[1])))

    # connection and required info
    security_groups = conn.get_all_security_groups(filters = {'vpc-id': user_vpc.id})
    subnets = conn.get_all_subnets(filters = {'vpc-id': user_vpc.id})

    # launch
    inst.launch_instances(conn, user_vpc, lab, labmod, cfg, security_groups, subnets)
    labmod.post_process()
Example #9
0
def launch_lab(conn, user_vpc, lab):
    """Execute a lab configuration"""

    path = LAB_DIR + lab + "/scripts/"
    prompt, answer = _prompt_config(lab, path)

    # import lab configs
    labmod = imp.load_source("labmod", path + prompt[answer] + ".py")
    labmod.pre_process()
    cfg = util.read_config(LAB_DIR + lab + "/instances.cfg")

    # prompt for any dynamic configuration options
    for instance in cfg["instance"]:
        for k, v in instance.iteritems():
            if str(v).startswith("PROMPT:"):
                instance[k] = raw_input("{0}: ".format(v.split(":")[1]))
            if str(v).startswith("PROMPT#:"):
                instance[k] = int(raw_input("{0}: ".format(v.split(":")[1])))
        for device in instance["device"]:
            for k, v in device.iteritems():
                if str(v).startswith("PROMPT:"):
                    device[k] = raw_input("{0}: ".format(v.split(":")[1]))
                if str(v).startswith("PROMPT#:"):
                    device[k] = int(raw_input("{0}: ".format(v.split(":")[1])))

    # connection and required info
    security_groups = conn.get_all_security_groups(filters={"vpc-id": user_vpc.id})
    subnets = conn.get_all_subnets(filters={"vpc-id": user_vpc.id})

    # launch
    inst.launch_instances(conn, user_vpc, prompt[answer], lab, labmod, cfg, security_groups, subnets)
    labmod.post_process()
Example #10
0
def create_amis(conn, user_vpc, lab_tag):
    """Create all images for lab"""

    running_labs = get_running_labs(conn, user_vpc)

    cfg = util.read_config(LAB_DIR + lab_tag.rsplit('-', 1)[0] + '/instances.cfg')
    instances = inst.get_vpc_instances(conn, user_vpc)

    lab_instances = []
    for instance in instances:
        if 'Lab' in instance.tags:
            if instance.tags['Lab'] == lab_tag and instance.tags['AMI-Build'] == 'True':
                lab_instances.append(instance)


    print "Creating AMI's for lab: {0}".format(lab_tag)
    for instance in lab_instances:
        name_tag = TRAINER + '-{0}-'.format(VPC) + \
                             '{0}-'.format(lab_tag.rsplit('-', 1)[0]) + \
                             '{0}-'.format(instance.tags['Script']) + \
                             '{0}'.format(instance.tags['AMI-Key'])

        ami = conn.create_image(instance.id, name_tag, '{0} lab AMI'.format(lab_tag.rsplit('-', 1)[0]))

        # tags
        tags = {'Name': '{0}'.format(name_tag),
                'AMI-Key': '{0}'.format(instance.tags['AMI-Key']),
                'Lab': '{0}'.format(lab_tag.rsplit('-', 1)[0])}

        conn.create_tags(ami, tags)

    print "AMI build trigger completed successfully ..."
    print "Note: It could take 15-20 minutes for the AMI build to complete."
def main():
    util.read_config()
    pygame.mixer.pre_init(44100, -16, 1, 1024 * 1)
    pygame.init()
    controller = gamecontroller.GameController()
    pygame.display.update()

    def process_events(events, controller):
        for event in events:
            if event.type == QUIT:
                sys.exit(0)
            if event.type == MOUSEMOTION:
                for b in controller.circles.itervalues():
                    if (b.is_pressed):
                        b.rect.move_ip(event.rel)
            if event.type == MOUSEBUTTONDOWN:
                pos = pygame.mouse.get_pos()
                clicked = [
                    s for s in controller.circles.itervalues()
                    if s.rect.collidepoint(pos)
                ]
                for b in clicked:
                    b.pressed()
            if event.type == MOUSEBUTTONUP:
                for b in controller.circles.itervalues():
                    b.unpressed()
            if event.type == USEREVENT + 1:
                #only redraw needed
                pass
            if event.type == USEREVENT + 2:
                #only redraw needed
                pass
            if event.type == KEYDOWN:
                controller.process_key(event.key)

    while controller.running:
        process_events([pygame.event.wait()] + pygame.event.get(), controller)

        controller.redraw()

        pygame.display.flip()

    if controller.playing:
        controller.stop_fgsegmentation()
    controller.picam.close()
    pygame.quit()
Example #12
0
 def __init__(self, config_file="etc/client.conf", logfile=""):
     self.config = read_config(app_abs_path(config_file))
     if not logfile:
         logfile = self.config['log_file']
     self.loglevel = logging._levelNames[self.config['log_level']]
     self._setup_logging()
     if self._no_handlers:
         self._setup_handlers(logfilepath=app_abs_path(logfile))
Example #13
0
def connect_to_database():
    # connect
    logging.info('Connecting to the mindful database')
    conn = psycopg2.connect(host=util.read_config('postgresql', 'host'),
                            database=util.read_config('postgresql',
                                                      'database'),
                            user=util.read_config('postgresql', 'user'),
                            password=util.read_config('postgresql',
                                                      'password'))

    cur = conn.cursor()
    yield cur

    # close the connection
    cur.close()
    conn.commit()
    conn.close()
    logging.info('Database connection closed')
def main():
    util.read_config()
    pygame.mixer.pre_init(44100, -16, 1, 1024 * 1)
    pygame.init()
    controller = gamecontroller.GameController()
    pygame.display.update()

    def process_events(events, controller):
        for event in events:
            if event.type == QUIT:
                sys.exit(0)
            if event.type == MOUSEMOTION:
                for b in controller.circles.itervalues():
                    if b.is_pressed:
                        b.rect.move_ip(event.rel)
            if event.type == MOUSEBUTTONDOWN:
                pos = pygame.mouse.get_pos()
                clicked = [s for s in controller.circles.itervalues() if s.rect.collidepoint(pos)]
                for b in clicked:
                    b.pressed()
            if event.type == MOUSEBUTTONUP:
                for b in controller.circles.itervalues():
                    b.unpressed()
            if event.type == USEREVENT + 1:
                # only redraw needed
                pass
            if event.type == USEREVENT + 2:
                # only redraw needed
                pass
            if event.type == KEYDOWN:
                controller.process_key(event.key)

    while controller.running:
        process_events([pygame.event.wait()] + pygame.event.get(), controller)

        controller.redraw()

        pygame.display.flip()

    if controller.playing:
        controller.stop_fgsegmentation()
    controller.picam.close()
    pygame.quit()
Example #15
0
def launcher():
    """ Launch all commands """
    config = util.read_config()

    for task in config:
        if 'cmd' in config[task]:
            if not 'enable' in config[task]:
                config[task]['enable'] = True
            if config[task]['enable']:
                launch(config[task])
Example #16
0
def main():
    # Read config file
    cfg = util.read_config('config/mushroom.yaml')

    # Load mushroom data from dataset
    x_train, x_test, y_train, y_test = load_data(cfg['dataset'],
                                                 cfg['test_ratio_offset'])
    x_train, y_train = util.shuffle_data(x_train, y_train)
    x_test, y_test = util.shuffle_data(x_test, y_test)

    # Default model name as loaded from file, overwritten if training
    model_name = cfg['nn']['model_name']
    model_dir = cfg['nn']['model_dir']

    with tf.Session() as sess:
        if cfg['nn']['train']:
            # Train network on our training data
            print('[ANN] Training new network...')
            model, model_name, train_stats = train_network(
                sess, x_train, y_train, cfg)
        else:
            loaded_results = util.load_results(
                os.path.join(model_dir, model_name + "_cm"))
            # Setup our continous plot
            plt.title('Error vs Epoch')
            plt.plot(loaded_results['train_stats']['train_errors'],
                     color='r',
                     label='training')
            plt.plot(loaded_results['train_stats']['valid_errors'],
                     color='b',
                     label='validation')
            plt.xlabel('Epoch')
            plt.ylabel('Error')
            plt.legend()
            plt.grid()
            plt.show()

            print('[ANN] Testing network {0}...'.format(model_name))
            model = util.load_model(
                os.path.join(model_dir, model_name + "_model"))
            train_stats = loaded_results['train_stats']

        # Test network on our testing data
        results = test_network(sess, model, x_test, y_test, cfg)
        conf_mat, sk_fpr, sk_tpr, roc_auc = util.analyse_results(
            y_test, results)
        print('[ANN] ROC Area Under Curve: {0:.2f}'.format(roc_auc))
        plot_roc(sk_fpr, sk_tpr, roc_auc)
        results_to_save = {
            'conf_mat': conf_mat,
            'train_stats': train_stats,
            'roc_auc': float(roc_auc)
        }
        util.store_results(results_to_save,
                           os.path.join(model_dir, model_name + "_cm"))
Example #17
0
def interpretArgs():
    global debugOutput
    settings = {}
    settingsFile = None
    info = {
        'mid': '-',
        'output': '-'
    }
    lookupMode = False
    args = sys.argv[:]
    args.pop(0);
    while args:
        val = args.pop(0)
        if val == '-h' or val == '--help':
            usage()
        elif val == '-p':
            if not args:
                print('-p requires argument', file=sys.stderr)
                usage()
            info['mid'] = args.pop(0)
        elif val == '-c':
            if not args:
                print('-c requires argument', file=sys.stderr)
                usage()
            settingsFile = args.pop(0)
            util.read_config(settings, settingsFile, debugOutput)
        elif val == '-o':
            if not args:
                print('-o requires argument', file=sys.stderr)
                usage()
            info['output'] = args.pop(0)
        elif val == '--lookup':
            lookupMode = True
            break
        elif val == '--debug':
            debugOutput = True
        else:
            print('illegal argument '+val, file=sys.stderr)
            usage()
    return (settings, settingsFile, info, lookupMode, args)
Example #18
0
def run_tracking(pr_df, commit_df, srcpath, output, repo_name, home_url, recent_authors):
    config = util.read_config('summary')
    email = config['email']
    day = config['day']
    today = datetime.datetime.today()
    is_today = today.strftime('%A') == day
    objectives = config['objectives']
    authors = config['authors']

    monitors_str = '<tr>' \
                   '<td data-label="Report to">{email}</td>' \
                   '<td data-label="Day">{day}</td>' \
                   '<td data-label="Objectives">{objectives}</td>' \
                   '<td data-label="Authors">{authors}</td>' \
                   '</tr>'.format(email=email, day=day, objectives=objectives, authors=authors)

    with open(os.path.join(srcpath, 'templates', 'tracking.html'), 'r') as f:
        tracking_text = f.read()
    target_path = os.path.join(output, 'tracking.html')
    with open(target_path, 'w') as f:
        f.write(tracking_text.format(name=repo_name,
                                     nav=htmls.compute_nav(home_url, view='', timeframe='',
                                                           recent_authors=recent_authors),
                                     home_url=home_url, timeframe='', view='',
                                     author='', monitors=monitors_str))

    if is_today:
        month_start = today - datetime.timedelta(days=14)
        week_start = today - datetime.timedelta(days=7)
        last_month_prs = pr_df[(pr_df.index >= month_start) & (pr_df.index < week_start)]
        last_week_prs = pr_df[pr_df.index >= week_start]
        last_week_commits = commit_df[commit_df.index >= week_start]
        last_mean = last_month_prs.no_reviews.mean()
        this_mean = last_week_prs.no_reviews.mean()
        review_text = 'The mean reviews per pull request was {avg_review_week:.2f}, ' \
                      '{status} previous week\'s which saw a mean rate of {avg_review_month:.2f}'.format(
            avg_review_week=this_mean, avg_review_month=last_mean,
            status='about the same as' if last_mean - 0.1 < this_mean < last_mean + 0.1 else
            'higher than' if this_mean > last_mean else 'lower than'
        )

        with open(os.path.join(srcpath, 'templates', 'summary.html'), 'r') as f:
            page_text = f.read()
        page_text = page_text.format(
            no_prs=last_week_prs.shape[0], no_commits=last_week_commits.shape[0],
            no_lines=last_week_commits.insertions.sum() + last_week_commits.deletions.sum(),
            no_code_lines=last_week_commits.code_changes.sum(),
            review_text=review_text, name=repo_name,
            link=home_url
        )
        subject = 'git-quality weekly summary'
        email_summary(email, page_text, subject)
Example #19
0
def notification_message(song):
    chosen_str = " chosen by {username}".format(
        username=song.username) if song.username != "Unknown" else ""
    server_url = SERVER_URL if SERVER_URL != "None" else "http://{hostname}:{port}".format(
        hostname=socket.gethostname(),
        port=util.read_config(
            'server', 'port', type=int, default=util.DEFAULT_PORT))
    msg = "The song of the day is: {song_name}{chosen_str}: {url} \\n" \
          "To add your songs please visit {server_url}, " \
          "or to provide bug reports or feature requests please visit {repo_url}" \
          "\\n{release_notes}".format(song_name=song.title, chosen_str=chosen_str, url=song.url,
                                      server_url=server_url, repo_url=REPO_URL, release_notes="")
    return msg
def make_submission(inference_config="config/infer.yml",
                    run_base_dir="wandb",
                    save_local=False):
    submission_folder = pathlib.Path(datetime.now().strftime("%Y%m%d-%H%M"))
    submission_dir = "submissions" / submission_folder
    submission_dir.mkdir(exist_ok=True)

    dir_util.copy_tree('src', str(submission_dir / 'src'))
    dir_util.copy_tree('prodia', str(submission_dir / 'src' / 'prodia'))

    with open(submission_dir / 'dataset-metadata.json', 'w') as f:
        json.dump(
            {
                "title": str(submission_folder),
                "id": f"xvrhlt/{submission_folder}",
                "licenses": [{
                    "name": "CC0-1.0"
                }]
            }, f)

    shutil.copy(inference_config, submission_dir)
    config = util.read_config(inference_config)

    run_base_dir = pathlib.Path(run_base_dir)

    dst_run_dir = submission_dir / "runs"
    dst_run_dir.mkdir()

    for run in config['runs']:
        if isinstance(run, str):
            run_id = run
            run_kwargs = {}
        elif isinstance(run, dict):
            run_id = run['run_id']
            run_kwargs = run
        src_path = run_base_dir / run_id
        dst_run_dir = dst_run_dir / run_id
        dst_run_dir.mkdir()
        shutil.copy(str((src_path / "config.yaml")),
                    str(dst_run_dir / "config.yaml"))
        if 'model_pth' in run_kwargs:
            model = src_path / run_kwargs['model_pth']
        else:
            model = max(src_path.rglob('*.ckpt'))
        (dst_run_dir / 'model.ckpt').write_bytes(model.read_bytes())

    kaggle.api.dataset_create_new(submission_dir, dir_mode='zip')
    if save_local:
        shutil.make_archive(submission_folder, 'zip', root_dir='submissions')
    dir_util.remove_tree(submission_dir)
Example #21
0
def lidar_logger(output_directory):
    """ LIDAR Logger """
    global LIDAR_STATUS, LIDAR_DATA

    port_name = '/dev/lidar'
    lidar = None

    # Configure
    config = util.read_config()

    while not util.DONE:
        try:
            lidar = RPLidar(port_name)
            print(lidar.get_info())
            print(lidar.get_health())
            # Open the output file
            timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M")
            with open(os.path.join(output_directory,timestamp+"_lidar.csv"), "w") as lidar_output:
                lidar_output.write("%s %s %s *\n" % (config['time'], "VERSION", json.dumps({"class": "VERSION", "version": util.DATA_API})))
                lidar_output.write("%s %s %s *\n" % (config['time'], config['class'], json.dumps(config)))
                for _, scan in enumerate(lidar.iter_scans(max_buf_meas=1500)):
                    lidartime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
                    data = []
                    for (_, angle, distance) in scan:
                        if distance > 0:
                            data.append((int(angle)%360, int(distance)))
                    lidar_data = {
                        'class': 'LIDAR',
                        'device': 'A1M8',
                        'time': lidartime,
                        'scan': data,
                    }
                    lidar_output.write("%s %s %s *\n" % (lidar_data['time'], lidar_data['class'], json.dumps(lidar_data)))
                    LIDAR_DATA = lidar_data
                    LIDAR_STATUS = True
                    if util.DONE:
                        break
        except KeyboardInterrupt:
            util.DONE = True
        except Exception as ex:
            print("LIDAR Logger Exception: %s" % ex)
            time.sleep(util.ERROR_DELAY)

        if lidar is not None:
            lidar.stop()
            lidar.stop_motor()
            lidar.disconnect()
        LIDAR_STATUS = False
        time.sleep(util.ERROR_DELAY)
Example #22
0
def email_summary(email_address, content, subject):
    """ Emails award winners to the given email address
    :param str email_address: the address to email
    """
    from_address = util.read_config('email')['from']
    msg = MIMEMultipart('alternative')
    msg['Subject'] = subject
    msg['From'] = from_address
    msg['To'] = email_address
    msg.attach(MIMEText(html2text.html2text(content), 'plain'))
    msg.attach(MIMEText(content, 'html'))
    s = smtplib.SMTP('localhost')
    for e in email_address.split(','):
        s.sendmail(from_address, [e], msg.as_string())
    s.close()
Example #23
0
def lpcm_logger(output_directory):
    """ LPCM Capture Wrapper """
    global LPCM_DATA

    # Configure
    config = util.read_config()

    timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M")
    with open(os.path.join(output_directory, timestamp + "_lpcm.csv"),
              "w") as lpcm_output:
        lpcm_output.write("%s %s %s *\n" % (config['time'], "VERSION",
                                            json.dumps({
                                                "class": "VERSION",
                                                "version": util.DATA_API
                                            })))
        lpcm_output.write(
            "%s %s %s *\n" %
            (config['time'], config['class'], json.dumps(config)))
        while not util.DONE:
            try:
                timestamp = datetime.datetime.now()
                filename = timestamp.strftime("%Y%m%d%H%M")
                LPCM_DATA = {
                    "class": "LPCM",
                    "time": timestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
                }
                for channel in ["left", "right"]:
                    capture_file = filename + "_" + channel + ".wav"
                    if os.path.isfile(
                            os.path.join(output_directory, capture_file)):
                        LPCM_DATA[channel] = capture_file

                if os.system("./lpcm_collect.sh %s %s \"%s\"" %
                             (output_directory, filename,
                              config['lpcm']['arecord'])) != 0:
                    time.sleep(util.ERROR_DELAY)
                else:
                    lpcm_output.write("%s %s %s *\n" % (
                        LPCM_DATA['time'],
                        LPCM_DATA['class'],
                        json.dumps(LPCM_DATA),
                    ))
            except KeyboardInterrupt:
                util.DONE = True
            except Exception as ex:
                print("LPCM Logger Exception: %s" % ex)
                time.sleep(util.ERROR_DELAY)
Example #24
0
def create_leaderboard(config_path, output_path):
    """
    Create a leaderboard for the given ``config`` file.

    :param config_path:
      `str` representing path to config file.
    """
    config = read_config(config_path)

    title = config.get('title', '')
    if not title:
        print("Leaderboard title is mandatory. Please provide the same in "
              "your config file")
        sys.exit(1)

    tabs = config.get('tabs', [])
    pills = []
    for i, tab in enumerate(tabs):
        pill = {
            'pill_id': 'pill%d' % i,
            'status': 'active' if i == 0 else '',
            'nav_id': 'tab%d' % i,
            'title': tab['title']
        }
        pills.append(pill)

    tables = []
    for i, tab in enumerate(tabs):
        table = {
            'status': 'show active' if i == 0 else '',
            'nav_id': 'tab%d' % i,
            'pill_id': 'pill%d' % i,
            'type': tab['type'],
            'table': ''
        }
        data = _read_csv(tab['data'])
        print(len(data))
        tmpl = Template(_read_template(tab['type']))
        tbl_html = tmpl.render(data=data)
        table['table'] = tbl_html
        tables.append(table)

    index_tmpl = Template(_read_template('index'))
    index_html = index_tmpl.render(title=title, pills=pills, tables=tables)

    with open(output_path, 'w') as f:
        f.write(index_html)
Example #25
0
def main():
    from torch.nn import init
    config = util.read_config('../configs/process.yaml')
    # get_temp_vocab(config)
    vocab = data.Vocab(config.vocab_path, max_size=config.max_size)
    # vocab.build_vectors(config.pre_word_embedding_path, 300, unk_init=init.xavier_uniform)
    if config.save:
        torch.save(vocab, config.vocab_path_50)
    val_data = DocDataset(config.val_path, vocab, config)
    test_data = DocDataset(config.test_path, vocab, config)
    if config.save:
        torch.save(val_data, config.val_data_path)
        torch.save(test_data, config.test_data_path)

    train_data = DocDataset(config.train_path, vocab, config)
    if config.save:
        torch.save(train_data, config.train_data_path)
Example #26
0
def init_db(type):
    result = None
    if type == 'MYSQL':
        host = util.read_config('MYSQL', 'host')
        port = int(util.read_config('MYSQL', 'port'))
        user = util.read_config('MYSQL', 'username')
        password = util.read_config('MYSQL', 'password')
        database = util.read_config('MYSQL', 'database')
        result = MySQL(host=host,
                       port=port,
                       user=user,
                       password=password,
                       database=database)
    elif type == 'SQLSERVER':
        host = util.read_config('SQLSERVER', 'host')
        user = util.read_config('SQLSERVER', 'username')
        password = util.read_config('SQLSERVER', 'password')
        database = util.read_config('SQLSERVER', 'database')
        result = MSSQL(host=host,
                       user=user,
                       password=password,
                       database=database)
    return result
def univ_collection(target_alias=None):
    '''
    this function use api defined in util.py, collect faculty lists of universities provided in config.json
    and covert the Chinese to name represent by pinyin followed by the full name of this university
    params:
        target_alias: str, list of str, or None. the institutions we would like to find connection for, if set to None
        find for all institutions supported by config.json
    return value:
        defaultdict, each key is the university's name, and the value is a list of all faculty members' pinyin names
        with this alias
    '''
    configs = util.read_config(filename='config/institutions.json')
    print('find connection for {}'.format(
        target_alias if target_alias else 'all'))
    univ_faculty_collection = util.crawl_faculty_list(configs, target_alias)
    univ_faculty_collection = util.extract_name(univ_faculty_collection)
    univ_faculty_collection = util.name_to_pinyin(univ_faculty_collection)
    return univ_faculty_collection
Example #28
0
def upload_file():
    if not weibo.check_login_status():
        if not util.check_config_file():
            util.generate_config()

        config = util.read_config()

        if not config:
            util.alert('请先设置你的微博账号')
            util.open_with_editor()
            sys.exit(0)

        username = config['username']
        password = config['password']

        try:
            weibo.login_with_username_and_password(username, password)
        except:
            util.alert('登录失败,请重试!')
            sys.exit(0)

        if weibo.check_login_status():
            # util.delete_config()
            util.alert('登录成功!')
        else:
            util.alert('登录失败,请重试!')
            sys.exit(0)

    img_file = get_paste_img_file()

    if img_file:
        try:
            url = weibo.request_image_url(img_file.name)
            return url
        except:
            util.delete_cookie()
            util.alert('Cookie 过期,请重新登录!')
            sys.exit(0)
            return None
    else:
        util.alert('您的剪切板里没有图片!')
        sys.exit(0)
Example #29
0
def upload_file():
    if not weibo.check_login_status():
        if not util.check_config_file():
            util.generate_config()

        config = util.read_config()

        if not config:
            util.alert('请先设置你的微博账号')
            util.open_with_editor()
            sys.exit(0)

        username = config['username']
        password = config['password']

        try:
            weibo.login_with_username_and_password(username, password)
        except:
            util.alert('登录失败,请重试!')
            sys.exit(0)


        if weibo.check_login_status():
            util.delete_config()
        else:
            util.alert('登录失败,请重试!')
            sys.exit(0)

    img_file = get_paste_img_file()

    if img_file:
        try:
            url = weibo.request_image_url(img_file.name)
            return url
        except:
            util.delete_cookie()
            util.alert('Cookie 过期,请重新登录!')
            sys.exit(0)
            return None
    else:
        util.alert('您的剪切板里没有图片!')
        sys.exit(0)
Example #30
0
def create_amis(conn, user_vpc, lab_tag):
    """Create all images for lab"""

    running_labs = get_running_labs(conn, user_vpc)

    cfg = util.read_config(LAB_DIR + lab_tag.rsplit('-', 1)[0] +
                           '/instances.cfg')
    instances = inst.get_vpc_instances(conn, user_vpc)

    lab_instances = []
    for instance in instances:
        if 'Lab' in instance.tags:
            if instance.tags['Lab'] == lab_tag and instance.tags[
                    'AMI-Build'] == 'True':
                lab_instances.append(instance)

    print "Creating AMI's for lab: {0}".format(lab_tag)
    for instance in lab_instances:
        name_tag = TRAINER + '-{0}-'.format(VPC) + \
                             '{0}-'.format(lab_tag.rsplit('-', 1)[0]) + \
                             '{0}-'.format(instance.tags['Script']) + \
                             '{0}'.format(instance.tags['AMI-Key'])

        ami = conn.create_image(
            instance.id, name_tag,
            '{0} lab AMI'.format(lab_tag.rsplit('-', 1)[0]))

        # tags
        tags = {
            'Name': '{0}'.format(name_tag),
            'AMI-Key': '{0}'.format(instance.tags['AMI-Key']),
            'Lab': '{0}'.format(lab_tag.rsplit('-', 1)[0])
        }

        conn.create_tags(ami, tags)

    print "AMI build trigger completed successfully ..."
    print "Note: It could take 15-20 minutes for the AMI build to complete."
Example #31
0
def email_awards(email_address, awards_df, repo_name, srcpath=os.getcwd()):
    """ Emails award winners to the given email address
    :param str email_address: the address to email
    :param pd.DataFrame awards_df: the awards dataframe
    """
    report_filename = os.path.join(srcpath, 'templates', 'email_report.html')
    with open(report_filename, 'r') as f:
        content = f.read()
    month = awards_df.index[0].split()[1]
    table = awards_df.to_html(col_space=5)
    content = content.format(repo=repo_name, month=month, table=table)

    msg = MIMEMultipart('alternative')
    msg['Subject'] = '{repo} quality stats awards for {month}'.format(repo=repo_name, month=month)
    from_address = util.read_config('email')['from']
    msg['From'] = from_address
    msg['To'] = email_address
    msg.attach(MIMEText(content, 'html'))
    msg.attach(MIMEText(html2text.html2text(content), 'plain'))
    s = smtplib.SMTP('localhost')
    for e in email_address.split(','):
        s.sendmail(from_address, [e], msg.as_string())
    s.close()
Example #32
0
def main():
	cfg = util.read_config('config/digit.yaml')

	x_train, x_test, y_train, y_test = load_data(cfg['dataset'])
	x_train, y_train = util.shuffle_data(x_train, y_train)
	x_test, y_test   = util.shuffle_data(x_test, y_test)

	i=0
	y_test3 = []
	while i<len(y_test):
		y_test3.append(y_test[i][0])
		i += 1
	y_test3 = np.array(y_test3)

	model_name = cfg['svm']['model_name']
	model_dir = cfg['svm']['model_dir']

#	svm = train_svm_multi(x_train, y_train, cfg)
#	svm = train_svm(x_train, y_train, cfg)
#	y_test2 = test_svm(svm, x_test)
#	plot_confusion_matrix(y_test3, y_test2)

	train_test_svm_multi_roc_ver(x_train, y_train, x_test, y_test3, cfg)
Example #33
0
def create_amis(conn, user_vpc, lab_tag):
    """Create all images for lab"""

    running_labs = get_running_labs(conn, user_vpc)

    cfg = util.read_config(LAB_DIR + lab_tag.rsplit("-", 1)[0] + "/instances.cfg")
    instances = inst.get_vpc_instances(conn, user_vpc)

    lab_instances = []
    for instance in instances:
        if "Lab" in instance.tags:
            if instance.tags["Lab"] == lab_tag and instance.tags["AMI-Build"] == "True":
                lab_instances.append(instance)

    print "Creating AMI's for lab: {0}".format(lab_tag)
    for instance in lab_instances:
        name_tag = (
            TRAINER
            + "-{0}-".format(VPC)
            + "{0}-".format(lab_tag.rsplit("-", 1)[0])
            + "{0}-".format(instance.tags["Script"])
            + "{0}".format(instance.tags["AMI-Key"])
        )

        ami = conn.create_image(instance.id, name_tag, "{0} lab AMI".format(lab_tag.rsplit("-", 1)[0]))

        # tags
        tags = {
            "Name": "{0}".format(name_tag),
            "AMI-Key": "{0}".format(instance.tags["AMI-Key"]),
            "Lab": "{0}".format(lab_tag.rsplit("-", 1)[0]),
        }

        conn.create_tags(ami, tags)

    print "Completed ..."
Example #34
0
def friction(agi, extension, now, context):
    config = util.read_config(config_filename)
    if config:
        config_map = util.relevant_config(config, extension, now, context)
        if config_map:
            action(action_map, config_map)(agi)
Example #35
0
            return
        log.logger.info('update zops_agent done')

    def backup_agent(self):
        app_dir = os.path.join(prog_, '..')
        src = "%s/agent" % app_dir
        dst = "%s/agent.%s" % (app_dir, int(time.time()))
        backup_list = glob.glob('%s/agent.[0-9]*' % app_dir)
        max_backup = int(self.opts['max_backup'])
        if len(backup_list) >= max_backup:
            for bak in sorted(backup_list, key=lambda x: -int(os.path.basename(x).split('.')[1]))[max_backup - 1:]:
                shutil.rmtree(bak)
        shutil.copytree(src, dst)

if __name__ == '__main__':
    opts = read_config(config_file='conf/client.conf')
    wd = WatchDog(opts)
    #apscheduler.events
    def err_listener(ev):
        if ev.exception:
            aps_log.info('%s error.', str(ev.job))
        else:
            aps_log.info('%s miss', str(ev.job))
    from apscheduler.schedulers.blocking import BlockingScheduler
    from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_MISSED
    from apscheduler.triggers.interval import IntervalTrigger
    sched = BlockingScheduler()
    sched.add_listener(err_listener, EVENT_JOB_ERROR | EVENT_JOB_MISSED)
    sched.add_job(wd.watch_process, IntervalTrigger(seconds=10))
    sched.add_job(wd.watch_update, IntervalTrigger(seconds=10))
    sched.start()
Example #36
0
from os import path
from datetime import datetime

from RQ3_H1 import format_as_csv
from util import read_config
from utils import utils

project_config = read_config(['project_details.properties'])


def compute():
    random_number = "{:%Y_%m_%d_%H_%M_%S}".format(datetime.now())
    file_path = 'RQ3_H1/results/does_coverage_increase__' + random_number
    result = for_list_of_projects()
    formatted_result = format_as_csv.format_contents(result)

    utils.write_list_as_csv(formatted_result, file_path + '.csv')
    utils.write_json_file(result, file_path + '.json')


def for_list_of_projects():
    project_list = project_config.get('projects', 'project_list').split(",")
    result = []
    for project in project_list:
        result.append({
            'project_name': project,
            'result': for_each_project(project)
        })

    return result
Example #37
0
def interpretArgs():
    settings = {
        'delim': ',',
        'quote': '"',
        'hdr_split': '|',
        'row_split': '|',
        'database': 'db/members.db',
        'header_elig': 'code/headers/elig.hdr',
        'header_encs': 'code/headers/encs.hdr',
        'header_lab_rsl': 'code/headers/lab_rsl.hdr',
        'header_med_clms': 'code/headers/med_clms.hdr',
        'header_rx_clms': 'code/headers/rx_clms.hdr',
        'join_id': 'MEMBER_ID',
        'shelve_id_files': [
            'code/db/set_myeloma.txt',
            'code/db/set_diabetes.txt'
        ],
        'anonymize': {
            'do': False,
            'date_columns': [
                'ELIG_EFFECTIVE_DATE',
                'ELIG_TERMINATION_DATE',
                'ENCS_SERVICE_DATE',
                'ENCS_PAID_DATE',
                'ENCS_ADMIT_DATE',
                'ENCS_DISCHARGE_DATE',
                'LAB_RSL_SERVICE_DATE',
                'MED_CLMS_SERVICE_DATE',
                'MED_CLMS_PAID_DATE',
                'MED_CLMS_ADMIT_DATE',
                'MED_CLMS_DISCHARGE_DATE',
                'RX_CLMS_SERVICE_DATE',
                'RX_CLMS_PAID_DATE',
                'RX_CLMS_PRESCRIPTION_DATE'
            ],
            'age_columns': [
                'ELIG_AGE',
                'LAB_RSL_AGE',
                'RX_CLMS_AGE'
            ],
            'redact_columns': [
                'ELIG_PATIENT_KEY',
                'ELIG_OLD_MEMBER_ID',
                'ELIG_SUBSCRIBER_ID',
                'ELIG_ZIP',
                'ELIG_COUNTRY_CODE',
                'ELIG_PCP_ID',
                'ELIG_GROUP_ID',
                'ELIG_SUB_GROUP_ID',
                'ELIG_PLAN_ID',
                'LAB_RSL_SUBSCRIBER_ID'
            ]
        }
    }
    info = {
        'pid': '',
        'output': '-'
    }
    args = sys.argv[:]
    args.pop(0);
    do_list = False
    while args:
        val = args.pop(0)
        if val == '-h' or val == '--help':
            usage()
        if val == '-l' or val == '--list':
            do_list = True
        elif val == '-p':
            if not args:
                print('-p requires argument', file=sys.stderr)
                usage()
            info['pid'] = args.pop(0)
        elif val == '--all':
            info['pid'] = '--all'
        elif val == '-c':
            if not args:
                print('-c requires argument', file=sys.stderr)
                usage()
            util.read_config(settings, args.pop(0))
        elif val == '-o':
            if not args:
                print('-o requires argument', file=sys.stderr)
                usage()
            info['output'] = args.pop(0)
        elif arg == '--seed':
            if not len(args):
                print('--seed requires integer seed', file=sys.stderr)
                usage()
            try:
                seed = int(args.pop(0))
                random.seed(seed)
            except:
                print('--seed requires integer seed', file=sys.stderr)
                usage()
        else:
            print('illegal argument '+val, file=sys.stderr)
            usage()
    if do_list:
        printList(settings)
        sys.exit(0)
    if info['pid'] == '':
        print('patient id required', file=sys.stderr)
        usage()
    return (settings, info)
Example #38
0
schema = ''

host = ''
port = None
user = ''
password = ''

import util
import os

_config = os.environ.get('SOCA_CONFIG') or util.find_file('.socarc')

if _config:
    locals().update(util.read_config(_config))
Example #39
0
 def __init__(self):
     self.config = read_config(self.source_name)
     self.log = config_logging(self.config).getLogger(self.source_name)
     self.db = DB.from_config(self.config)
import time
from subprocess import run

import util

if __name__ == '__main__':
    config = util.read_config('config.yaml')

    runners = {}
    for k, v in config['runners'].items():
        runners[k] = v['cmd']

    for r in config['runs']:
        if r in runners:
            cmd = runners[r]
            print('\n\n[RUN COMMAND]: {}'.format(cmd))
            cmds = cmd.split(' ')
            run(cmds)
            time.sleep(0.25)
        else:
            err_str = "Invalid runner found in runs section of config.yaml: {}".format(
                r)
            raise RuntimeError(err_str)
Example #41
0
            try:
                body = self.serial.loads(self.job.recv())
            except:
                log.logger.error(traceback.format_exc())
                continue
            self.job.send(self.serial.dumps({'ret':'ok'}))
            job_handler = JobHandler(self.opts, body)
            job_handler.start()
            def sigterm_clean(signum, frame):
                clean_proc(job_handler)
                try:
                    os.kill(os.getpid(),signal.SIGKILL)
                except OSError:
                    pass
            signal.signal(signal.SIGTERM, sigterm_clean)

    def destroy(self):
        if self.job.closed is False:
            self.job.setsockopt(zmq.LINGER, 1)
            self.job.close()
        if self.context.closed is False:
            self.context.term()

    def __del__(self):
        self.destroy()

if __name__ == '__main__':
    opts = read_config()
    jobsvr = JobServer(opts)
    jobsvr.run()
Example #42
0
        elif arg == '-f':
            if not args or args[0] == '--':
                print('-f requires format file', file=sys.stderr)
                usage()
            util.read_format(args.pop(0), cms_get_patient.input_format, usage)
        elif arg == '-o':
            if not args or args[0] == '--':
                print('-o requires output file', file=sys.stderr)
                usage()
            output = args.pop(0)
        elif arg == '-c':
            if not args or args[0] == '--':
                print('-c requires argument', file=sys.stderr)
                usage()
            settingsFile = args.pop(0)
            util.read_config(settings, settingsFile, build_dictionary.debugOutput)
        elif arg == '--debug':
            build_dictionary.debugOutput = True
        else:
            print('unrecognized argument: ' + arg, file=sys.stderr)
            usage()

    if not len(query):
        print('query is required', file=sys.stderr)
        usage()

    build_dictionary.init(settings, settingsFile)

    allPaths = []
    while args:
        path = args.pop(0)
Example #43
0
            usage()
        if arg == '--path':
            if not args:
                print('--path requires path', file=sys.stderr)
                usage()
            path = args.pop(0)
        elif arg == '-f':
            if not args:
                print('-f requires format file', file=sys.stderr)
                usage()
            util.read_format(args.pop(0), input_format, usage)
        elif arg == '-c':
            if not args:
                print('-c requires argument', file=sys.stderr)
                usage()
            util.read_config(settings, args.pop(0))
        else:
            print('unrecognized argument: ' + arg, file=sys.stderr)
            usage()
    if path is None:
        print('need to specify path with --path', file=sys.stderr)
        usage()
    if not len(input_format.keys()):
        print('need to specify non-empty format file with -f', file=sys.stderr)
        usage()
    out = {
        'delim': settings['delim'],
        'quote': settings['quote']
    }
    util.process_burst_directory(path, lambda root, file: processFile(root, path, file, out))
Example #44
0
def start_server(max_num, settings_file, format_file, class_file, line_file, cms_path, addr, port, debug):
    settings = {}
    util.read_config(settings, settings_file, True)
    use_cache = settings.get('use_cache', True)

    all_paths = []
    input_format = {}
    use_db = False
    omop = None
    if settings.get('omop_use_db', False):
        use_db = True
        omop = OMOP(settings, True)
    else:
        util.convert_paths([ cms_path ], all_paths)

        util.read_format(format_file, input_format, usage)
        cms_analyze.input_format = input_format
        cms_get_patient.input_format = input_format

        build_dictionary.debugOutput = True
        build_dictionary.init(settings, settings_file)

    dictionary_file = os.path.join(json_dir, dictionary_bind)

    patients = set()
    def save_patients():
        if not use_cache:
            return
        with open(patients_list, 'w') as pf:
            pf.write('\n'.join(sorted(list(patients))))
            pf.flush()

    if not os.path.isfile(patients_list) or not use_cache:
        if use_db:
            omop.list_patients(patients, prefix=json_dir, limit=max_num, show_old_ids=True)
        else:
            tf = StringIO()
            cms_analyze.compute(all_paths, {}, False, tf, filter_zero=True)
            tf.flush()
            tf.seek(0)
            lines = tf.readlines()[-max_num:] if max_num is not None else tf.readlines()
            for line in lines:
                patients.add(json_dir + line.strip() + '.json')
        save_patients()

    dict = {}
    if use_cache:
        if os.path.isfile(dictionary_file):
            with open(dictionary_file, 'r') as input:
                dict = json.loads(input.read())
        else:
            os.makedirs(json_dir)
            # write the initial empty dictionary
            # also ensures that the folder is writeable
            with open(dictionary_file, 'w') as output:
                output.write("{}")

    server = create_server((addr, port))
    server.bind_path('/', '..')

    prefix = '/' + os.path.basename(os.path.normpath(server.base_path))

    server.add_default_white_list()
    server.add_file_patterns([
            prefix + '/' + json_dir + '*',
            prefix + '/' + patients_list
        ], True)
    server.favicon_fallback = 'favicon.ico'
    server.report_slow_requests = True
    if debug:
        server.suppress_noise = True

    @server.text_get(prefix + '/' + patients_list, 0)
    def get_list(req, args):
        return '\n'.join(sorted(list(patients)))

    @server.json_get(prefix + '/' + json_dir, 1)
    def get_patient(req, args):
        pid = args['paths'][0]
        if pid.endswith('.json') and use_db:
            pid = pid[:-len('.json')]
            pid = omop.get_person_id(pid)
        cache_file = os.path.join(json_dir, pid)
        p_name = json_dir + pid.strip()
        if p_name not in patients:
            patients.add(p_name)
            save_patients()
        if pid.endswith('.json'):
            pid = pid[:-len('.json')]
        if not os.path.isfile(cache_file) or not use_cache:
            if use_db:
                patient = omop.get_patient(pid, dict, line_file, class_file)
            else:
                patient = cms_get_patient.process(all_paths, line_file, class_file, pid)
                build_dictionary.extractEntries(dict, patient)
            if use_cache:
                with open(cache_file, 'w') as pf:
                    pf.write(json_dumps(patient))
                    pf.flush()
            if use_cache:
                with open(dictionary_file, 'w') as output:
                    output.write(json_dumps(dict))
                    output.flush()
            return patient
        with open(cache_file, 'r') as pf:
            return json.loads(pf.read())

    @server.json_get(prefix + '/' + dictionary_file)
    def get_dictionary(req, args):
        return dict

    msg("starting server at {0}:{1}", addr if addr else 'localhost', port)
    server.serve_forever()
    msg("shutting down..")
    server.server_close()
Example #45
0
                    "key_category": category['key_category']
                }


def run_scrapers(limit_sources=None):
    """
    Collect and run scrapers

    if limit_names is None, all scrapers will run
    """
    scrapers = set()
    for root, dirs, files in os.walk(script_dir()):
        for file_name in glob.fnmatch.filter(files, 'scraper_*.py'):
            file_path = os.path.join(root, file_name)
            mod_name = os.path.splitext(file_name)[0]
            mod = imp.load_source(mod_name, file_path)
            for (cls_name, cls) in inspect.getmembers(mod, inspect.isclass):
                if glob.fnmatch.fnmatch(cls_name, 'Scraper*') and\
                    (not limit_sources or \
                    (getattr(cls, 'source_name') in limit_sources)):
                        scrapers.add(cls)
    for scraper in scrapers:
        scraper().run()

if __name__ == '__main__':
    config = read_config('scraper')
    limit_sources = None
    if config.limit_sources:
        limit_sources = map(string.strip, config.limit_sources.split(','))
    run_scrapers(limit_sources)
Example #46
0
File: candy.py Project: rtfb/candy
    print "Can't run Candy! Need wxWidgets v2.8+, but got v" + wxVersion
    sys.exit(0)

import wx.stc as stc
import wx.lib.pubsub as pubsub

import keyboard
import util
from status_line import StatusLine
import data
from constants import *


project_dir = os.path.join(os.path.dirname(__file__), '..')
general_conf_path = os.path.join(project_dir, u'general.conf')
general_config = util.read_config(general_conf_path)
color_conf = os.path.join(project_dir, u'colorscheme-default.conf')
color_scheme = util.read_config(color_conf)


class VisualItem(object):
    """
    An item to hold visual representation. E.g. if the external item is a
    filename, this one will hold things like justified name, startChar in
    the ViewWindow coords and the like. Number of these objects is the number
    RawItems that actually fit on screen (at least partially).
    """
    def __init__(self):
        # Character on the row-representing string, which is the str[0]-th
        # char of this item's visual representation. Will not be negative,
        # since objects of this class represent only visible things, even
Example #47
0
 def __init__(self):
     self.config = read_config(self.source_name)
     self.log = config_logging(self.config).getLogger(self.source_name)
     self.db = DB.from_config(self.config)
Example #48
0
    venues = os.path.join(api_v1, 'venues'),
    venue = os.path.join(api_v1, 'venues/<id:int>'),
    venue_nearby = os.path.join(api_v1, 'venues/<id:int>/nearby'),
    categories = os.path.join(api_v1, 'categories'),
    category = os.path.join(api_v1, 'categories/<id:int>'),
    category_venues = os.path.join(api_v1, 'categories/<id:int>/venues'),
    zips = os.path.join(api_v1, 'zips'),
    zip = os.path.join(api_v1, 'zips/<zip>'),
    zip_venues = os.path.join(api_v1, 'zips/<zip>/venues'),
)


### setup


config = util.read_config("api")
log = util.config_logging(config).getLogger("server")
# connection pool
pool = psycopg2.pool.ThreadedConnectionPool(
    minconn=1,
    maxconn=int(config.max_connections or 10),
    dsn=config.db,
    cursor_factory=psycopg2.extras.RealDictCursor
)
# install plugin
install(bottle_pgpool.PgSQLPoolPlugin(pool))


###  helpers

Example #49
0
# -*- coding: utf-8 -*-

import os
from qiniu import Auth, put_file
from util import read_config

config = read_config()

def upload_qiniu(path, upload_name):
    ''' upload file to qiniu'''
    q = Auth(config['ak'], config['sk'])
    key = upload_name  # upload to qiniu's markdown dir

    token = q.upload_token(config['bucket'], key)
    ret, info = put_file(token, key, path, check_crc=True)
    return ret != None and ret['key'] == key
Example #50
0
                }


def run_scrapers(limit_sources=None):
    """
    Collect and run scrapers

    if limit_names is None, all scrapers will run
    """
    scrapers = set()
    for root, dirs, files in os.walk(script_dir()):
        for file_name in glob.fnmatch.filter(files, 'scraper_*.py'):
            file_path = os.path.join(root, file_name)
            mod_name = os.path.splitext(file_name)[0]
            mod = imp.load_source(mod_name, file_path)
            for (cls_name, cls) in inspect.getmembers(mod, inspect.isclass):
                if glob.fnmatch.fnmatch(cls_name, 'Scraper*') and\
                    (not limit_sources or \
                    (getattr(cls, 'source_name') in limit_sources)):
                    scrapers.add(cls)
    for scraper in scrapers:
        scraper().run()


if __name__ == '__main__':
    config = read_config('scraper')
    limit_sources = None
    if config.limit_sources:
        limit_sources = map(string.strip, config.limit_sources.split(','))
    run_scrapers(limit_sources)
Example #51
0
File: api.py Project: kuulemart/tp
    venues=os.path.join(api_v1, "venues"),
    venue=os.path.join(api_v1, "venues/<id:int>"),
    venue_nearby=os.path.join(api_v1, "venues/<id:int>/nearby"),
    categories=os.path.join(api_v1, "categories"),
    category=os.path.join(api_v1, "categories/<id:int>"),
    category_venues=os.path.join(api_v1, "categories/<id:int>/venues"),
    zips=os.path.join(api_v1, "zips"),
    zip=os.path.join(api_v1, "zips/<zip>"),
    zip_venues=os.path.join(api_v1, "zips/<zip>/venues"),
)


### setup


config = util.read_config("api")
log = util.config_logging(config).getLogger("server")
# connection pool
pool = psycopg2.pool.ThreadedConnectionPool(
    minconn=1, maxconn=int(config.max_connections or 10), dsn=config.db, cursor_factory=psycopg2.extras.RealDictCursor
)
# install plugin
install(bottle_pgpool.PgSQLPoolPlugin(pool))


###  helpers


def fq_url(*path, **subst):
    """
    Builds fully qualified url. Uses bottle request object for scheme and loc
Example #52
0
import util
import models
import os
from data import *

import time

import warnings
warnings.filterwarnings("ignore")

import torch
import torch.nn as nn

# config
config = util.read_config('./configs/predict.yaml')
torch.manual_seed(config.seed)
# log
log_path = './data/results/'
# log_path = config.log  + '2018-10-20-12:17:22/'
if not os.path.exists(log_path):
    os.mkdir(log_path)
logging = util.logging(log_path+'res_log.txt') # 记录本次运行的记录

# checkpoint
if config.checkpoint_restore:
    print('loading checkpoint from {}...'.format(config.checkpoint_restore))
    checkpoints = torch.load(config.checkpoint_restore)

# cuda
use_cuda = torch.cuda.is_available() and len(config.gpus) > 0
Example #53
0
 def _read_config(self):
     return util.read_config(os.path.join(self.app_data_dir, self.email, CFG_FILE_NAME))
Example #54
0
 def parse_config(self):
     self.cfg_dict_1 = util.read_config(self.config_file_1)
     self.cfg_dict_2 = util.read_config(self.config_file_2)
Example #55
0
#!/usr/bin/env python

import gzip
from pipeline import *
import util

config = util.read_config('import')
log = util.config_logging(config)

table = 'area.area'
columns=('area', 'zip', 'po_name', 'geom')
filename = 'bayareadata.gz'
area = 'sfbayarea'
db = util.DB.from_config(config)

log.info('importing file %r to table %r' % (filename, table))

# compose import pipeline
cat(gzip.open(filename)) | skip(head=2, tail=2) | split(sep='|') |\
    transform([lambda r: area, 0, 1, 2]) |\
    join('\t') | load_data(db, table, columns=columns, clean=True)