Exemple #1
0
def plot_table(path_dict, save_path):
    pd.set_option('display.precision', 4)
    pd.set_option('display.width', 40)
    pd.set_option('display.float_format', '{:,.3f}'.format)

    for i, act in enumerate(path_dict.keys()):
        row_labels, values_train, values_test = [], [], []
        for path in path_dict[act]:
            with open(path + '/results.json', 'r') as f:
                results = json.load(f)
                if (plot_it is not None) and (results['combinator']
                                              not in plot_it):
                    continue
                if i == 0:
                    col_labels = utils.fill_col_labels(results)
                temp_train, temp_test = utils.fill_row_values(
                    results, path, act)
                values_train.append(temp_train)
                values_test.append(temp_test)
                if 'test_acc_hr_0.0' in results:
                    temp_train, temp_test = utils.fill_row_values(results,
                                                                  path,
                                                                  act,
                                                                  hr=0.0)
                    values_train.append(temp_train)
                    values_test.append(temp_test)

        # create table
        table_train = utils.create_table(values_train, col_labels, act,
                                         'train')
        table_test = utils.create_table(values_test, col_labels, act, 'test')
        # save table
        utils.save_table(table_train, table_test, save_path, act)
Exemple #2
0
def create_database(args):
  file_struct.DEBUG = getattr(args,file_struct.debug_long)
  #Create tables in the database
  for i in range(0,len(file_struct.tables)):
    utils.create_table(file_struct.tables[i],
                      file_struct.PKs[i],file_struct.foreign_key_relations[i])

  #Add fields to each table in the database
  for j in range(0,len(file_struct.tables)):
    for i in range(0,(len(file_struct.table_fields[j]))):
      utils.add_field(file_struct.tables[j],
                      file_struct.table_fields[j][i][0],file_struct.table_fields[j][i][1])
Exemple #3
0
def render_cases(choice):
    if choice == 1:
        try:
            utils.create_database()
            utils.create_table()

        except Exception as e:
            print(e)
            print("UNABLE TO CREATE TABLE !!")

        finally:
            utils.input_and_save_user()
            print("Your membership has been created !! ")
            print("Have a nice day !")
            utils.display_main_menu()
            render_cases(int(input()))

    elif choice == 2:
        member_uuid = utils.login_member()
        if member_uuid:
            (book_name, fine) = utils.check_for_pending_books(member_uuid)
            if fine is not None:
                print("You have a pending book ")
                print("Book Name-> ", book_name)
                print("Due -> ", fine)
            utils.display_book_lending_options()
            handle_book_lend_cases(int(input()), member_uuid)

        else:
            print("User not found ! Please try again ")

    elif choice == 3:
        system('cls')
        print("Enter Username: "******"Enter Password: "******"admin"
                and admin_password == "admin") or utils.is_user_admin(
                    admin_username, admin_password):
            system('cls')
            utils.display_admin_options()
            handle_admin_cases(int(input()))
        else:
            print("Invalid Credentials. Try again.")
            utils.display_main_menu()
            render_cases(int(input()))

    elif choice == 4:
        exit(0)

    else:
        print("Invalid Input !!")
Exemple #4
0
    def init_storage(self):
        #change: create the "storage" dir first, then do the rest
        utils.create_doc_dir(self.crawl_method,
                             self.doc_paths)  #create file storage dir
        self.url_file = open('storage/urls_{}.txt'.format(self.crawl_method),
                             'a')

        #create/connect to db file
        os.chdir('storage')
        self.conn = utils.create_conn(self.db_path)
        os.chdir('..')
        utils.create_table(self.conn, self.table_name)  #create url table
Exemple #5
0
    def pretty_print(df):
        """
        Display the sample in a appealing manner, given a DataFrame. \
        This is only displayed prettily here, the `view` module only \
        shows tables.
        """

        return create_table(df)
def main(old_meo_obj, target_meo_obj):

    meo_features = [
        'temperature', 'dew_point', 'humidity', 'pressure', 'wind_speed',
        'wind_bearing', 'cloud_cover', 'visibility'
    ]

    max_time = session.query(func.max(
        old_meo_obj.timestamp)).scalar().strftime('%Y-%m-%d %H:%M:%S')
    min_time = session.query(func.min(
        old_meo_obj.timestamp)).scalar().strftime('%Y-%m-%d %H:%M:%S')
    tz = pytz.timezone('America/Los_Angeles')
    time_df = pd.date_range(start=min_time, end=max_time, freq='1H')
    time_list = sorted(list(set([tz.localize(x) for x in time_df])))
    print(len(time_list))
    """ !!! Be careful, create table would overwrite the original table """
    create_table(target_meo_obj)
    interpolate_time(old_meo_obj, target_meo_obj, time_list, meo_features)
Exemple #7
0
    def __create_table(self, num_players, dealer_index):
        chip_counts = []
        for i in range(num_players):
            chip_counts.append(Currency(1000))

        tuple = create_table(chip_counts, dealer_index)
        self.limit = tuple[0]
        self.table = tuple[1]
        self.players = tuple[2]
Exemple #8
0
def classify_write(bucket_name, prefix, selected_pdf_folder, prediction_client,
                   storage_client, bq_client, bq_dataset, bq_table,
                   score_threshold, service_account, input_path,
                   model_full_id):
    bucket = storage_client.bucket(bucket_name)
    params = {}
    lines = []

    schema = [
        bigquery.SchemaField('file', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('class', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('class_confidence', 'STRING', mode='REQUIRED'),
    ]
    table = utils.create_table(bq_client, bq_dataset, bq_table, schema)
    if score_threshold:
        params = {"score_threshold": str(score_threshold)}

    for blob in bucket.list_blobs(prefix=str(prefix + "/")):
        if blob.name.endswith(".png"):
            logger.info(os.path.basename(blob.name))
            content = utils.sample_handler(storage_client, bucket_name,
                                           blob.name)
            payload = {"image": {"image_bytes": content}}
            response = prediction_client.predict(model_full_id, payload,
                                                 params)
            for result in response.payload:
                logger.info("File location: {}".format(
                    os.path.join('gs://', bucket_name, blob.name)))
                logger.info("Predicted class name: {}".format(
                    result.display_name))
                logger.info("Predicted class score: {}\n".format(
                    result.classification.score))

                if result.display_name == "datasheets":
                    pass
                else:
                    # Copy from the pdf folder to the selected_pdf_folder
                    filename = os.path.basename(blob.name).replace(
                        '.png', '.pdf')
                    input_pdf_path = os.path.join(input_path, filename)
                    selected_pdf_path = os.path.join(selected_pdf_folder,
                                                     filename)
                    bucket_input, blob_input = utils.get_bucket_blob(
                        input_pdf_path)
                    bucket_output, blob_output = utils.get_bucket_blob(
                        selected_pdf_path)

                    utils.copy_blob(bucket_input, blob_input, bucket_output,
                                    blob_output, service_account)

                rows_to_insert = [
                    (str(blob.name).replace(".png", ".pdf").replace(
                        prefix, "").replace("/", ""), result.display_name,
                     result.classification.score),
                ]
                load_job = bq_client.insert_rows(table, rows_to_insert)
Exemple #9
0
    def test_simple_serialize(self):
        tuple = create_table([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000,
            1000, 1000], 0)
        table = tuple[1]
        state = TableState(table)
        str = dumps(state)

        new_state = loads(str)
        self.assertEquals(state.name, new_state.name)
        self.assertEquals(10, len(new_state.seats))
Exemple #10
0
def plot_table_attention(path_dict, save_path):
    for i, act in enumerate(path_dict.keys()):
        row_labels, values_train, values_test = [], [], []
        if act not in COMBINED_ACT:
            continue
        for path in path_dict[act]:
            # print(act)
            with open(f'{path}/results.json', 'r') as f:
                results = json.load(f)
            if results['combinator'] not in ATT_LIST or results[
                    'combinator'] not in plot_it:
                continue
            if i == 0:
                col_labels = utils.fill_col_labels(results, att=1)
            temp_train, temp_test = utils.fill_row_values(results,
                                                          path,
                                                          act,
                                                          att=1)
            values_train.append(temp_train)
            values_test.append(temp_test)
            if 'test_acc_hr_0.0' in results:
                temp_train, temp_test = utils.fill_row_values(results,
                                                              path,
                                                              act,
                                                              att=1,
                                                              hr=0.0)
                values_train.append(temp_train)
                values_test.append(temp_test)

        # create table
        table_train = utils.create_table(values_train,
                                         col_labels,
                                         act,
                                         'train',
                                         att=1)
        table_test = utils.create_table(values_test,
                                        col_labels,
                                        act,
                                        'test',
                                        att=1)
        # save table
        utils.save_table(table_train, table_test, save_path + 'ATT_', act)
Exemple #11
0
def plot_table_max(path_dict, save_path, limit):
    res_json = ['results.json', 'results_hr.json']
    row_labels, values_train, values_test = [], [], []
    for i, act in enumerate(path_dict.keys()):
        for path in path_dict[act]:
            for res in res_json:
                try:
                    with open(f'{path}/{res}', 'r') as f:
                        results = json.load(f)
                        # att = 2 if res == 'results_hr.json' else 0
                except Exception as e:
                    continue
                if i == 0:
                    col_labels = utils.fill_col_labels(results,
                                                       max_=True,
                                                       att=2)
                temp_train, temp_test = utils.fill_row_values(results,
                                                              path,
                                                              act,
                                                              max_=True,
                                                              att=2)
                if True not in np.where(temp_test[8] >= limit, True, False):
                    continue
                values_train.append(temp_train)
                values_test.append(temp_test)

    # create table
    table_train = utils.create_table(values_train,
                                     col_labels,
                                     '',
                                     'train',
                                     max_=True)
    table_test = utils.create_table(values_test,
                                    col_labels,
                                    '',
                                    'test',
                                    max_=True)

    # save table
    utils.save_table(table_train, table_test, save_path, 'best')
Exemple #12
0
def plot_table(path_dict, save_path):
    pd.set_option('display.precision', 4)
    pd.set_option('display.width', 40)

    for i, act in enumerate(path_dict.keys()):
        row_labels, values_train, values_test = [], [], []
        for path in path_dict[act]:
            with open(path + '/results.json', 'r') as f:
                results = json.load(f)
                if i == 0:
                    col_labels = utils.fill_col_labels(results)
                temp_train, temp_test = utils.fill_row_values(
                    results, path, act)
                values_train.append(temp_train)
                values_test.append(temp_test)

        # create table
        table_train = utils.create_table(values_train, col_labels, act,
                                         'train')
        table_test = utils.create_table(values_test, col_labels, act, 'test')
        # save table
        utils.save_table(table_train, table_test, save_path, act)
def create_database(args):
    if args.lite:
        if not os.path.exists(fs.SQLite_DB_path):
            os.mkdir(fs.SQLite_DB_path)
        if os.path.exists(fs.SQLite_DB_path + "/" + fs.DB_name):
            print("{0} already exists in {1} , exiting".format(
                fs.DB_name, fs.SQLite_DB_path))
            exit()

    print("Creating {0} now".format(fs.DB_name))

    fs.DEBUG = getattr(args, fs.debug_long)
    #Create tables in the database
    for i in range(0, len(fs.tables)):
        utils.create_table(fs.tables[i], fs.PKs[i],
                           fs.foreign_key_relations[i], args)

    #Add fields to each table in the database
    for j in range(0, len(fs.tables)):
        for i in range(0, (len(fs.table_fields[j]))):
            utils.add_field(fs.tables[j], fs.table_fields[j][i][0],
                            fs.table_fields[j][i][1], args)
Exemple #14
0
    def worker(*args, **kwargs):
        try:
            func(*args, **kwargs)
        except OperationalError as e:
            if "no such table" in str(e):
                logger.warning(
                    "Tables are not initialized. Running SQL queries...")

                query = """
                        CREATE TABLE IF NOT EXISTS articles (
                            id INTEGER NOT NULL PRIMARY KEY,
                            name TEXT NOT NULL,
                            content TEXT NOT NULL,
                            image TEXT NOT NULL,
                            source TEXT NOT NULL,
                            date TEXT NOT NULL
                        );
                        """

                create_table(query)
                logger.warning('Restarting script...')
                func(*args, **kwargs)
Exemple #15
0
def plot_table_attention(path_dict, save_path):
    res_json = ['results.json', 'results_hr.json']
    for i, act in enumerate(path_dict.keys()):
        row_labels, values_train, values_test = [], [], []
        if act not in COMBINED_ACT:
            continue
        for path in path_dict[act]:
            # print(act)
            for res in res_json:
                try:
                    with open(f'{path}/{res}', 'r') as f:
                        results = json.load(f)
                        # att = 1 if res == 'results_hr.json' else 0
                except Exception as e:
                    continue
                if results['combinator'] not in ATT_LIST:
                    continue
                if i == 0:
                    col_labels = utils.fill_col_labels(results, att=1)
                temp_train, temp_test = utils.fill_row_values(results,
                                                              path,
                                                              act,
                                                              att=1)
                values_train.append(temp_train)
                values_test.append(temp_test)

        # create table
        table_train = utils.create_table(values_train,
                                         col_labels,
                                         act,
                                         'train',
                                         att=1)
        table_test = utils.create_table(values_test,
                                        col_labels,
                                        act,
                                        'test',
                                        att=1)
        # save table
        utils.save_table(table_train, table_test, save_path + 'ATT_', act)
Exemple #16
0
def plot_table_max(path_dict, save_path, limit):
    row_labels, values_train, values_test = [], [], []
    for i, act in enumerate(path_dict.keys()):
        for path in path_dict[act]:
            with open(f'{path}/results.json', 'r') as f:
                results = json.load(f)
            if (plot_it is not None) and (results['combinator']
                                          not in plot_it):
                continue
            if i == 0:
                col_labels = utils.fill_col_labels(results, max_=True, att=2)
            temp_train, temp_test = utils.fill_row_values(results,
                                                          path,
                                                          act,
                                                          max_=True,
                                                          att=2)
            # print(temp_test[9])
            if True not in np.where(temp_test[12] >= limit, True, False):
                continue
            values_train.append(temp_train)
            values_test.append(temp_test)

    # create table
    table_train = utils.create_table(values_train,
                                     col_labels,
                                     '',
                                     'train',
                                     max_=True)
    table_test = utils.create_table(values_test,
                                    col_labels,
                                    '',
                                    'test',
                                    max_=True)

    # save table
    utils.save_table(table_train, table_test, save_path, 'best')
Exemple #17
0
def render_table(dataset_key):
    """
    Create a display for the chosen dataset.

    Args:
        dataset_key (str): Value from the dropdown. It is the Redis \
                           key for the dataset.

    Returns:
        list: A list of dash components.
    """

    if dataset_key is None:
        return [html.H4("Nothing selected.")]

    df = dill.loads(redis_conn.get(dataset_key))

    return [
        html.Br(),
        create_table(df),
    ]
Exemple #18
0
def crawl_save_upload():
    '''调用函数实现抓取、保存和上传数据文件'''
    print('-----数据抓取开始-----')
    wb = Workbook()
    engine, Base, session = get_mysql_connection()
    Order, Website = create_table(engine, Base)
    add_default_data(session, Website)
    codemart_crawler.main(wb, session, Order, Website)
    oschina_crawler.main(wb, session, Order, Website)
    rrkf_crawler.main(wb, session, Order, Website)
    shixian_crawler.main(wb, session, Order, Website)
    wywaibao_crawler.main(wb, session, Order, Website)
    yuanjisong_crawler.main(wb, session, Order, Website)
    print('-----数据抓取结束-----')

    print('-----文件保存开始-----')
    delete_data()
    now = datetime.now()
    file = r'data/%s.xlsx' % now.strftime("%Y-%m-%d %H-%M-%S")
    wb.save(file)
    time.sleep(3)
    print('-----文件保存结束-----')

    print('-----文件上传开始-----')
    media_id = get_media_id(file)
    if isinstance(media_id, str):
        upload_result = send_file(media_id)
        if upload_result == True:
            print('文件上传成功:%s' % file)
        else:
            message = '文件上传失败:%s' % upload_result[1]
            print(message)
            send_message(message)
    else:
        message = '获取media_id失败:%s' % media_id[1]
        print(message)
        send_message(message)

    print('-----文件上传结束-----')
Exemple #19
0
r = sim_pdf.fitTo(dataset,
              RF.Save(True),
              RF.Minimizer("Minuit2", "Migrad"),
              RF.NumCPU(1),
              #RF.PrintLevel(3),
              #RF.Offset(True)
              )

for i, j in product(range(nBins_Y), range(nBins_PT)):
    Sig_Yields[i][j] = w.var("Nsig"+sample[i][j]).getVal()
    Sig_Yields_err[i][j] = w.var("Nsig"+sample[i][j]).getError()
    Bgr_Yields[i][j] = w.var("Ncombi"+sample[i][j]).getVal()
    Bgr_Yields_err[i][j] = w.var("Ncombi"+sample[i][j]).getError()

utils.create_table(Sig_Yields, Sig_Yields_err, "Signal_Yields.txt",nBins_Y, nBins_PT)

f_res = open("Fit_Result.txt", "w")
for k in range(len(r.floatParsFinal())-1):
    f_res.write(str(r.floatParsFinal()[k])+"\n")

f_res.write("\n")
f_res.write("Fit status: "+str(r.covQual())+"\n")
f_res.close()

components = {"Signal": (RF.kSolid, R.kRed),
              "Background": (RF.kDashed, R.kOrange),
              }

for i, j in product(range(nBins_Y), range(nBins_PT)):
    components = {"Signal"+sample[i][j]: (RF.kSolid, R.kRed),
Exemple #20
0
def main(argv):
    print("starting db init")
    conn = utils.create_connection(db_file)
    utils.create_table(conn)
    get_messages(conn)
def test_world():

    # Test World
    # ----------

    env = UnityEnvironment(file_name=ENV)

    brain_name = env.brain_names[0]
    brain = env.brains[brain_name]

    env_info = env.reset(train_mode=True)[brain_name]
    num_agents = len(env_info.agents)
    print('\nNumber of Agents: ', num_agents)

    def take_actions(env_info, brain):
        num_agents = len(env_info.agents)
        action_size = brain.vector_action_space_size
        actions = np.random.randn(num_agents, action_size)
        actions = np.clip(actions, -1, 1)
        env_info = env.step(actions)[brain_name]
        next_states = env_info.vector_observations
        return env_info, actions, next_states

    action_size = brain.vector_action_space_size
    states = env_info.vector_observations
    state_vector_names = [
        'racket x pos', 'racket y pos', 'racket x velocity',
        'racket y velocity', 'ball x pos', 'ball y pos', 'ball x velocity',
        'ball y velocity'
    ]

    print('\nA state vector for one of the agent looks like:')
    state = states[0].reshape(3, 8)
    table = create_table(state, state_vector_names)
    print(table)

    # But only the last row provides new information to each state, so we could simply get those values
    print('\nKeeping only the last row would be: ')
    state0 = states[0].reshape(3, 8)[-1]
    table0 = create_table(state0, state_vector_names)
    print(table0)

    # If we take a step in the environment
    env_info, _, next_states = take_actions(env_info, brain)

    print('\n\nTaking 1 action, the state vector would look like::')
    state1 = next_states[0].reshape(3, 8)
    table1 = create_table(state1, state_vector_names)

    print('t = 0')
    print(table)
    print('t = 1')
    print(table1)
    print(
        'Only the last row is providing new information - It have just shifted \n\n'
    )
    print(
        'Although, we can think than given the three provides temporal relationships \n\n'
    )

    # If we take another step in the environment
    env_info, _, next_states = take_actions(env_info, brain)

    print('\n\nTaking another action to be sure')
    state2 = next_states[0].reshape(3, 8)
    table2 = create_table(state2, state_vector_names)

    print('t = 0')
    print(table)
    print('t = 1')
    print(table1)
    print('t = 2')
    print(table2)
    print('Yes, only the last row is providing new information !')

    env.close()

    # Test Agent
    # ----------

    state_size, action_size = brain.vector_observation_space_size, brain.vector_action_space_size
    agent = Agent(num_agents=num_agents,
                  state_size=state_size,
                  action_size=action_size)

    print('\n\nCapacity of the Actor (# of parameters): ',
          count_parameters(agent.actor_local))
    print('Capacity of the Critic (# of parameters): ',
          count_parameters(agent.critic_local))
    return
def main():
    """Runs queries to create training and prediction tables from clean data."""

    # Load config shared by all steps of feature creation.
    config_path = utils.parse_arguments(sys.argv).config_path
    config = utils.read_config(config_path)
    # Project-wide config.
    global_config = config['global']
    # Path to SQL files.
    queries_path = config['file_paths']['queries']
    # SQL files for different pipeline steps.
    query_files = config['query_files']
    # Parameters unique to individual pipeline steps.
    query_params = config['query_params']

    # Create the dataset to hold data for the pipeline run.
    utils.create_dataset(
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'])

    # Query to remove nulls from the target column (company_response_to_consumer)
    # and from complaint_narrative column.
    remove_nulls_params = utils.merge_dicts(global_config,
                                            query_params['remove_nulls'])

    utils.create_table(
        query_path=os.path.join(queries_path, query_files['remove_nulls']),
        query_params=remove_nulls_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['nulls_removed_table'],
        partition_field=None)

    # Query to cleanup the categories of issue, subissue, product, subproduct.
    utils.create_table(
        query_path=os.path.join(queries_path, query_files['clean_categories']),
        query_params=global_config,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['cleaned_features_table'],
        partition_field=None)

    # Query to merge the cleaned features and the table with nulls removed.
    utils.create_table(
        query_path=os.path.join(queries_path, query_files['combine_tables']),
        query_params=global_config,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['clean_table'],
        partition_field=None)

    # Query to split the clean dataset into training and prediction datasets.
    # The training dataset will be fed to the AutoML Tables for training and
    # the prediction dataset will be used for batch prediction.
    features_split_params = utils.merge_dicts(
        global_config, query_params['train_predict_split'])

    utils.create_table(
        query_path=os.path.join(queries_path,
                                query_files['train_predict_split']),
        query_params=features_split_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['train_predict_split'],
        partition_field=None)

    # Query to create the prediction table.
    features_split_params = utils.merge_dicts(
        global_config, query_params['train_predict_split'])

    utils.create_table(
        query_path=os.path.join(queries_path,
                                query_files['prediction_features']),
        query_params=features_split_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['features_predict_table'],
        partition_field=None)

    # Query to create the training table along with the manual split into train,
    # validation and test rows for the AutoML tables.
    features_split_params = utils.merge_dicts(
        global_config, query_params['train_predict_split'])

    utils.create_table(
        query_path=os.path.join(queries_path,
                                query_files['training_features']),
        query_params=features_split_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['features_train_table'],
        partition_field=None)
                            category='',
                            pub_time=pub_time,
                            is_valid=is_valid,
                            is_delete=False if is_valid else True)
                        order.website = website
                        session.add(order)
                        if is_valid == True:
                            sheet.append(
                                [count, desc, link, pub_time, contact, ''])
                            count += 1
                else:
                    message = '人人开发详情爬取第%d行出错:%s' % (details[0], details[1])
                    print(message)
                    send_message(message)
            session.commit()
        elif isinstance(info_list, tuple):
            message = '人人开发爬取第%d行出错:%s' % (info_list[0], info_list[1])
            print(message)
            send_message(message)
    print('结束爬取人人开发订单')


if __name__ == '__main__':
    wb = Workbook()
    engine, Base, session = get_mysql_connection()
    Order, Website = create_table(engine, Base)
    add_default_data(session, Website)
    main(wb, session, Order, Website)
    now = datetime.now()
    wb.save(r'data/%s.xlsx' % now.strftime("%Y-%m-%d %H-%M-%S"))
Exemple #24
0
w_import = getattr(w, "import")

w.factory("x_M[%f,%f]" % (x_M_LL, x_M_UL))
x_M = w.var("x_M")
w.factory("x_Y[%f,%f]" % (x_Y_LL, x_Y_UL))
x_Y = w.var("x_Y")
w.factory("x_PT[%f,%f]" % (x_PT_LL, x_PT_UL))
x_PT = w.var("x_PT")

f_input = R.TFile(local_dir + "/JPsi_mc_sub.root")
sel_tree = f_input.subTree
mc_tree = f_input.MCTree

sel_cut = ""

bin_cut = utils.define_cuts(nBins_Y, nBins_PT, x_Y, x_PT)
sample = utils.define_samples(nBins_Y, nBins_PT)
efficiency = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)]
efficiency_err = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)]

for i, j in product(range(nBins_Y), range(nBins_PT)):
    cut = sel_cut + bin_cut[i][j]
    N_sel = sel_tree.GetEntries(cut)
    N_init = mc_tree.GetEntries(bin_cut[i][j])
    efficiency[i][j] = float(N_sel) / float(N_init)
    efficiency_err[i][j] = efficiency[i][j] * R.TMath.sqrt(1. / N_sel +
                                                           1. / N_init)

utils.create_table(efficiency, efficiency_err, "Efficiency.txt", nBins_Y,
                   nBins_PT)
yields = utils.parse_table("../fitting/Signal_Yields.txt",nBins_Y, nBins_PT)[0]
yields_err = utils.parse_table("../fitting/Signal_Yields.txt",nBins_Y, nBins_PT)[1]

yield_distr = R.TH2F("yield_distr", "Yield distribution", nBins_Y, x_Y_LL , x_Y_UL , nBins_PT, x_PT_LL , x_PT_UL )
yield_distr.GetXaxis().SetTitle("#eta")
yield_distr.GetYaxis().SetTitle("P^{T} [GeV/c]")

yield_distr_table = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)] 
yield_distr_table_err = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)] 

for i, j in product(range(nBins_Y), range(nBins_PT)):
    yield_distr_table[i][j]=float(yields[i][j])/float(efficiencies[i][j])
    yield_distr_table_err[i][j] = float(yields[i][j])/float(efficiencies[i][j])*R.TMath.sqrt((float(efficiencies_err[i][j])/float(efficiencies[i][j]))**2+(float(yields_err[i][j])/float(yields[i][j]))**2)
    yield_distr.SetBinContent(i+1, j+1, yield_distr_table[i][j])
    yield_distr.SetBinError(i+1, j+1, yield_distr_table_err[i][j])


txt = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)] 

c = R.TCanvas()
R.gStyle.SetOptStat(0)
yield_distr.Draw("COLZ")
for i, j in product(range(nBins_Y), range(nBins_PT)):
    txt[i][j] = R.TText(yield_distr.GetXaxis().GetBinLowEdge(i+1), yield_distr.GetYaxis().GetBinCenter(j+1), "%d +/- %d"%(yield_distr.GetBinContent(i+1,j+1), yield_distr.GetBinError(i+1,j+1)))
    txt[i][j].Draw("SAME")
    c.Update()
c.SaveAs("Yield_distr.pdf")


utils.create_table(yield_distr_table, yield_distr_table_err, "Final_Yield_Distr.txt",nBins_Y, nBins_PT)
Exemple #26
0
 def init(self):
     utils.create_table(self.sql, self.table_name)
Exemple #27
0
def render_table(api_choice, user_id):
    """
    Create a display for the chosen dataset.

    Args:
        api_choice (str): Value from the dropdown.
        user_id (str): Session/user id.

    Returns:
        list: A list of dash components.
    """

    if api_choice is None:
        return [html.H4("Nothing selected.")]

    if api_choice == "twitter_api":
        api = pickle.loads(r.get(f"{user_id}_{api_choice}_handle"))

        return pretty_print_tweets(api, 5)

    elif api_choice == "reddit_api":
        # No need to get the api here
        # TODO: But maybe this change?

        return [
            html.H4("Write the name of a subreddit:"),
            dcc.Input(
                id="subreddit_choice",
                type="text",
                value="",
            ),
            html.Button("Gimme dem reddits", id="reddit_submit"),
            html.Br(),
            html.Br(),
            html.Div(id="subreddit_posts"),
        ]

    elif api_choice == "spotify_api":
        spotify = pickle.loads(r.get(f"{user_id}_{api_choice}_handle"))
        top_playlists = spotify.category_playlists(
            "toplists")["playlists"]["items"]

        posts = [
            html.Div([
                dbc.Card([
                    dbc.CardHeader([
                        html.H4(playlist["name"]),
                        html.A("listen on Spotify",
                               href=playlist["external_urls"]["spotify"]),
                    ]),
                    dbc.CardBody([
                        dbc.CardTitle(
                            f"Owner: {playlist['owner']['display_name']}"),
                        dbc.CardText(
                            f"{playlist['tracks']['total']} total tracks"),
                    ]),
                ]),
                html.Br(),
            ]) for playlist in top_playlists
        ]
        return posts

    elif api_choice == "quandl_api":
        df = get_data(api_choice, user_id)

    else:
        df = get_data(api_choice, user_id)

    if df is None:
        return [html.H4("Nothing to display")]

    df = df[df.columns[:10]]
    return [
        html.Br(),
        create_table(df),
    ]
    # bulk stress profile
    stress_bulk_parser = stress_sub_parser.add_parser(
        'bulk', help='Bulk profile of stress')

    # incremental stress profile
    stress_incr_parser = stress_sub_parser.add_parser(
        'increment', help='Incremental profile of stress')

    args = parser.parse_args()
    print args

    if args.cmd_name == 'create':
        logging.debug('Create command')
        if args.obj_type == 'table':
            utils.create_table(table_path_prefix=args.prefix,
                               start_idx=args.startidx,
                               num_tables=args.numtables)
        elif args.obj_type == 'volume':
            utils.create_volume(volume_path_prefix=args.prefix,
                                start_idx=args.startidx,
                                num_volumes=args.numvolumes)
        else:
            logging.error('Unrecognized object. Cannot create.')
            sys.exit(-1)
    elif args.cmd_name == 'delete':
        logging.debug('Delete command')
        if args.obj_type == 'table':
            utils.delete_table(table_path_prefix=args.prefix,
                               start_idx=args.startidx,
                               num_tables=args.numtables)
        elif args.obj_type == 'volume':
w_import = getattr(w, "import")


w.factory("x_M[%f,%f]"%(x_M_LL, x_M_UL))
x_M = w.var("x_M")
w.factory("x_Y[%f,%f]"%(x_Y_LL, x_Y_UL))
x_Y = w.var("x_Y")
w.factory("x_PT[%f,%f]"%(x_PT_LL, x_PT_UL))
x_PT = w.var("x_PT")


f_input = R.TFile(local_dir + "/JPsi_mc_sub.root")
sel_tree = f_input.subTree
mc_tree = f_input.MCTree

sel_cut = ""

bin_cut = utils.define_cuts(nBins_Y, nBins_PT, x_Y, x_PT)
sample = utils.define_samples(nBins_Y, nBins_PT)
efficiency = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)] 
efficiency_err = [[0 for x in xrange(nBins_Y)] for x in xrange(nBins_PT)] 

for i, j in product(range(nBins_Y), range(nBins_PT)):
    cut = sel_cut + bin_cut[i][j]
    N_sel = sel_tree.GetEntries(cut)
    N_init = mc_tree.GetEntries(bin_cut[i][j])
    efficiency[i][j] = float(N_sel)/float(N_init)
    efficiency_err[i][j] = efficiency[i][j]*R.TMath.sqrt(1./N_sel+1./N_init)

utils.create_table(efficiency, efficiency_err, "Efficiency.txt",nBins_Y, nBins_PT)
Exemple #30
0
from utils import get_report, create_table, clean, create_cli_table, display_cli_menu


def help_me():
    print('usage: \n python reportdroid.py -c [for CLI] \n python reportdroid.py -v [for html report]')
    sys.exit(0)


if len(sys.argv) < 2:
    help_me()

if sys.argv[1] == '-v':
    # visual version
    clean()
    password = getpass.getpass("Type Jenkins Password: "******"Type Jenkins Password: "******"please run: \"pip install PyInquirer\" first if haven't done already")
    report_map = create_cli_table(get_report(password))
    display_cli_menu(report_map)

else:
    help_me()
Exemple #31
0
import utils as ut

### read data

ii = ut.read_graph("ii")
ui = ut.read_graph("ui")

### Hypergeom Test

ii_lou = map(lambda x: ut.hypergeom_test(ii, x),
             filter(ut.check_length_mod, ut.louvain(ii)))
ii_mcl = map(lambda x: ut.hypergeom_test(ii, x),
             filter(ut.check_length_mod, ut.mcl(ii)))

ui_lou = map(lambda x: ut.hypergeom_test(ui, x),
             filter(ut.check_length_mod, ut.louvain(ui)))
ui_mcl = map(lambda x: ut.hypergeom_test(ui, x),
             filter(ut.check_length_mod, ut.mcl(ui)))

# Create tables

ii_mod = ut.create_table("ii_mod", list(ii_lou), list(ii_mcl))
ui_mod = ut.create_table("ui_mod", list(ui_lou), list(ui_mcl))

# Visualize clusters

ut.louvain(ii, 'ii', viz=True)
ut.louvain(ui, 'ui', viz=True)
ut.mcl(ii, viz=True)
ut.mcl(ui, viz=True)
def detect_object(gcs_image_folder, gcs_cropped_image_folder, main_project_id,
                  model_id, bq_dataset_output, bq_table_output,
                  prediction_client, storage_client, bq_client):

    match = re.match(r'gs://([^/]+)/(.+)', gcs_image_folder)
    bucket_name = match.group(1)
    prefix = match.group(2)
    dataset_ref = bq_client.dataset(bq_dataset_output)
    table_ref = dataset_ref.table(bq_table_output)
    bucket = storage_client.bucket(bucket_name)
    params = {"timeout": "60.0s"}
    lines = []

    schema = [
        bigquery.SchemaField('file', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('object', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('confidence', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('x_min', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('x_max', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('y_min', 'STRING', mode='REQUIRED'),
        bigquery.SchemaField('y_max', 'STRING', mode='REQUIRED'),
    ]
    table = utils.create_table(bq_client, bq_dataset_output, bq_table_output,
                               schema)

    for blob in bucket.list_blobs(prefix=str(prefix + "/")):
        if blob.name.endswith(".png"):
            logger.info("File location: {}".format(
                os.path.join('gs://', bucket_name, blob.name)))
            content = utils.sample_handler(storage_client, bucket_name,
                                           blob.name)
            name = 'projects/{}/locations/us-central1/models/{}'.format(
                main_project_id, model_id)
            payload = {'image': {'image_bytes': content}}
            params = {}
            request = prediction_client.predict(name, payload, params)

            for result in request.payload:
                logger.info("Figure detected in file.")
                rows_to_insert = [
                    (str(blob.name).replace(".png", ".pdf").replace(prefix,"").replace("/",""), \
                     result.display_name, \
                     result.image_object_detection.score, \
                     result.image_object_detection.bounding_box.normalized_vertices[0].x, result.image_object_detection.bounding_box.normalized_vertices[1].x, \
                     result.image_object_detection.bounding_box.normalized_vertices[0].y, result.image_object_detection.bounding_box.normalized_vertices[1].y),
                ]
                load_job = bq_client.insert_rows(table, rows_to_insert)

                # As below,  crop the object and save the cropped part as a separated image file
                file_name = blob.name
                _, temp_local_filename = tempfile.mkstemp()
                blob.download_to_filename(temp_local_filename)
                im = Image.open(temp_local_filename)
                width, height = im.size
                r_xmin = width * result.image_object_detection.bounding_box.normalized_vertices[
                    0].x
                r_ymin = height * result.image_object_detection.bounding_box.normalized_vertices[
                    0].y
                r_xmax = width * result.image_object_detection.bounding_box.normalized_vertices[
                    1].x
                r_ymax = height * result.image_object_detection.bounding_box.normalized_vertices[
                    1].y
                box = (r_xmin, r_ymin, r_xmax, r_ymax)
                im = Image.open(temp_local_filename)
                im2 = im.crop(box)
                im2.save(temp_local_filename.replace('.png', '-crop.png'),
                         'png')

                # Upload cropped image to gcs bucket
                new_file_name = os.path.join(
                    gcs_cropped_image_folder,
                    os.path.basename(blob.name).replace('.png', '-crop.png'))
                new_file_bucket, new_file_name = utils.get_bucket_blob(
                    new_file_name)
                new_blob = blob.bucket.blob(new_file_name)
                new_blob.upload_from_filename(temp_local_filename)
                os.remove(temp_local_filename)
        else:
            pass
    # robot
    elif args.run:
        print('<<START>>')
        loop = asyncio.get_event_loop()
        loop.run_until_complete(processor())
        try:
            loop.run_forever()
        except KeyboardInterrupt:
            pass
        finally:
            print('<<END>>')
            loop.close()


if __name__ == '__main__':
    create_table()

    parser = argparse.ArgumentParser(description='Robot Navigation')

    parser.add_argument(
        '--loaddata',
        help=
        'Loading data is easy: just call \'manage.py loaddata <filename>\' where'
        ' <filename> is the name of the data file you\'ve created')
    parser.add_argument(
        '--landmark',
        nargs='+',
        help='Creating landmark is easy: just call \'manage.py landmark <coords>'
        '<landmarkname>\' where <coords> is the coordinates of landmark and'
        '<landmarkname> is the name of the landmark you\'ve crated')
    parser.add_argument('--run', help='Start to work with your routes')
Exemple #34
0
#-*- coding: utf-8 -*-

import logging
import utils
import config

from sqlhelper import SqlHelper

if __name__ == '__main__':
    logging.basicConfig(filename='log/exporttosql.log',
                        format='%(levelname)s %(asctime)s: %(message)s',
                        level=logging.DEBUG)

    sql = SqlHelper()
    utils.create_table(sql, config.assetstore_table_name)
    utils.export_to_sql(sql, 'Plugins/all')