コード例 #1
0
 def testReadFilesWithPrefix(self):
     testPrefix = "test_read_files_with_prefix_"
     with tempfile.TemporaryDirectory() as tmpdirname:
         for i in range(10):
             with open(tmpdirname + "/" + testPrefix + str(i) + ".timeline",
                       "w") as fp:
                 fp.write("Added file %d" % i)
         matching_files = common.read_files_with_prefix(tmpdirname + "/" +
                                                        testPrefix)
         self.assertEqual(len(matching_files), 10)
         self.assertIn(tmpdirname + "/" + testPrefix + "0.timeline",
                       matching_files)
         self.assertIn(tmpdirname + "/" + testPrefix + "9.timeline",
                       matching_files)
コード例 #2
0
        "-p",
        "--prefix",
        default="user",
        help=
        "prefix for the automatically generated usernames. usernames will be <prefix>-001, <prefix>-002..."
    )

    args = parser.parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    fn = args.file_prefix
    logging.info("Loading file or prefix %s" % fn)
    sel_file_list = common.read_files_with_prefix(fn)

    all_user_list = []
    all_rerun_list = []

    for i, filename in enumerate(sel_file_list):
        if "pipelinestate" in filename:
            continue
        logging.info("=" * 50)
        logging.info("Loading data from file %s" % filename)

        entries = json.load(gzip.open(filename), object_hook=bju.object_hook)

        # Obtain uuid and rerun information from entries
        curr_uuid_list, needs_rerun = common.analyse_timeline(entries)
        if len(curr_uuid_list) > 1:
コード例 #3
0
    parser = argparse.ArgumentParser()
    parser.add_argument("timeline_filename",
        help="the name of the file that contains the json representation of the timeline")
    parser.add_argument("-v", "--verbose", type=int,
        help="after how many lines we should print a status message.")

    parser.add_argument("-i", "--info-only", default=False, action='store_true',
        help="only print entry analysis")

    parser.add_argument("-p", "--pipeline-purge", default=False, action='store_true',
        help="purge the pipeline state as well")

    args = parser.parse_args()
    fn = args.timeline_filename
    logging.info("Loading file or prefix %s" % fn)
    sel_file_list = common.read_files_with_prefix(fn)

    ts_db = edb.get_timeseries_db()
    ats_db = edb.get_analysis_timeseries_db()
    udb = edb.get_uuid_db()
    psdb = edb.get_pipeline_state_db()
    db_array = [ts_db, ats_db, udb, psdb]

    for i, filename in enumerate(sel_file_list):
        if "pipelinestate" in filename:
            continue

        logging.info("=" * 50)
        logging.info("Deleting data from file %s" % filename)

        entries = json.load(gzip.open(filename), object_hook = bju.object_hook)
コード例 #4
0
        help='''
            the directory from which to read the input json files, one per user.
            the user email/label is between the prefix and the '.timeline' suffix
            so for /tmp/filled_pop_Tour_0.timeline with prefix = '/tmp/filled_pop_",
            the email is 'Tour_0'
        ''')

    args = parser.parse_args()

    client_config = {
        "emission_server_base_url": args.remote_server,
        "register_user_endpoint": "/profile/create",
        "user_cache_endpoint": "/usercache/put"
    }

    client = escg.EmissionFakeDataGenerator(client_config)

    if args.input_file:
        regex = re.compile(r"(\S*).timeline")
        load_user(client, args.input_file, regex)
    else:
        assert args.input_prefix is not None
        regex = re.compile(r"{prefix}(\S*).timeline".format(prefix=args.input_prefix))
        matching_files = common.read_files_with_prefix(args.input_prefix)
        print("Found %d matching files for prefix %s" %
            (len(matching_files), args.input_prefix))
        for fn in matching_files:
            load_user(client, fn, regex)