async def register():
    username = input("Username:"******"Password:"******"\nCould not verify if the password is compromised.\n")
    else:
        print("\nPassword is compromised. Must use another password.\n")
        raise Exception("Compromised password at registration phase.")
    rpassword = getpass.getpass("Repeat password:"******"\nPasswords are not the same.\n")
        raise Exception("Passwords differ at registration phase.")
    password = utils.hash(password)
    creds = utils.create_json(usr=username, pwd=password)
    if utils.validate_user(creds):
        async with websockets.connect(uri) as websocket:
            await websocket.send(utils.create_json(request="register"))
            resp = await websocket.recv()
            if resp == "ok":
                await websocket.send(creds)
                resp = await websocket.recv()
                if resp == "success":
                    print("registration successful")
                else:
                    raise Exception('\nServer could not register the user.')
            else:
                raise Exception('\nServer could not register the user.')
예제 #2
0
    def run(self, json_name):
        cities = requests.get(self.CITIES_URL).json()

        cities_info = self._get_cities(cities)

        for city_id, city_name in cities_info.items():
            city_url = self.CITY_URL_PATTERN.format(id=city_id)
            offices = requests.get(city_url).json()['offices']
            for office in offices:
                json_model = self._fill_json_model(office, city_name)
                self.offices.append(json_model.convert_to_dict())

        create_json(json_name, self.offices)
    def run(self, json_name):
        response = requests.get(self.URL)
        content = response.content.decode(response.encoding)

        soup = BeautifulSoup(markup=content, features=self.PARSER)
        city_items = soup.find_all('div', class_='city-item')

        for address in city_items:
            html_model = self._fill_html_model(address)
            json_model = self._fill_json_model(html_model)
            self.shops.append(json_model.convert_to_dict())

        create_json(json_name, self.shops)
예제 #4
0
def generate_course_usage(result_loc_, date_):
    """
    Generate course consumption report.
    :param result_loc_: pathlib.Path object to store resultant CSV at.
    :param date_: datetime object to use in path
    :return: None
    """
    tenant_info = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'),
                                                   'tenant_info.csv'),
                              dtype=str)[['id', 'slug']].set_index('id')
    course_batch = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'),
                                                    'course_batch.csv'),
                               dtype=str)
    for channel in course_batch.channel.unique():
        try:
            slug = tenant_info.loc[channel][0]
            print(slug)
            df = course_batch[course_batch['channel'] == channel]
            result_loc_.parent.joinpath('portal_dashboards',
                                        slug).mkdir(exist_ok=True)
            try:
                get_data_from_blob(
                    result_loc_.parent.joinpath('portal_dashboards', slug,
                                                'course_usage.csv'))
                blob_data = pd.read_csv(
                    result_loc_.parent.joinpath('portal_dashboards', slug,
                                                'course_usage.csv'))
            except:
                blob_data = pd.DataFrame()
            blob_data = blob_data.append(df, sort=False).fillna('')
            blob_data.drop_duplicates(
                subset=['Date', 'Course Name', 'Batch Name', 'Batch Status'],
                inplace=True,
                keep='last')
            blob_data.sort_values(
                ['Date', 'Course Name', 'Batch Name', 'Batch Status'],
                ascending=False,
                inplace=True)
            blob_data.drop('channel',
                           axis=1).to_csv(result_loc_.parent.joinpath(
                               'portal_dashboards', slug, 'course_usage.csv'),
                                          index=False)
            create_json(
                result_loc_.parent.joinpath('portal_dashboards', slug,
                                            'course_usage.csv'))
            post_data_to_blob(
                result_loc_.parent.joinpath('portal_dashboards', slug,
                                            'course_usage.csv'))
        except KeyError:
            print(channel, 'channel not in tenant list')
예제 #5
0
 def put_online(self, user, proxy):
     try:
         return proxy.scheduler.put_into_online_mode(
             self.name, "Put online by %s" % user)
     except xmlrpclib.Fault as e:
         return utils.create_json(
             "error", "XMLRPC err%d: %s" % (e.faultCode, e.faultString))
async def login():
    username = input("Username:"******"Password:"******"login"))
            resp = await websocket.recv()
            if resp == "ok":
                await websocket.send(creds)
                resp = await websocket.recv()
                if resp == "success":
                    print("login successful")
                else:
                    raise Exception('\nUser could not log in.')
            else:
                raise Exception('\nUser could not log in.')
예제 #7
0
    for year, call_log in data.items():
        print(f'Getting call data for {year}')
        # UNCOMMENT THIS FOR CREATING A YEAR KEY FOR EACH YEAR (1 OF 3)
        # final_data[year] = {}
        for month, calls in call_log.items():
            # UNCOMMENT THIS FOR CREATING A MONTH KEY FOR EACH MONTH (2 OF 3)
            # month = parse_month(month=month, abbreviated=False)
            # final_data[year][month] = {}
            for call in calls:
                call_data = call.split("-")
                call_date = utils.parse_date(call_data[0].split("_")[0])
                call_time = utils.parse_time(call_data[0].split("_")[1])
                call_number = utils.parse_call_number(call_data[1])
                call_direction = call_data[2].split(".")[0]
                # UNCOMMENT THIS FOR CREATING A MONTH KEY FOR EACH MONTH (3 OF 3)
                # final_data[year][month].setdefault(call_date, {})
                # final_data[year][month][call_date].setdefault(call_direction, [])
                # final_data[year][month][call_date][call_direction].append(f'{call_time} - {call_number}')
                final_data.setdefault(call_date, [])
                final_data[call_date].append(
                    f'{call_direction} - {call_time} - {call_number}')

    return final_data


utils.create_json(generate_data(), json_file)
raw_data = utils.load_json(json_file)
parsed_data = parse_data(raw_data)
# print(json.dumps(parsed_data, indent=2))
utils.create_excel(save_dir, 'Call Logs', parsed_data)
예제 #8
0
            "The file {filename} was minimized in {time} minutes".format(
                filename=filename, time=est_time))
    except Exception as e:
        raise Exception("Something happens {}".format(e.message))
    finally:
        # remove tmp directories
        rmtree(lithium_tmp, ignore_errors=True)
        rmtree(project_dir, ignore_errors=True)

    return output_lithium


for _class in classes:
    try:
        result = minimize_file(_class)
        data["slicer"].append(result)
    except Exception as e:
        logger.error("{}".format(e))
        raise Exception("Minimization was failed. \n{}".format(e))

# generate slicer-testcase.json
testcase_fmt = test_case.split("::")[-1]
slicer_name = "slicer-{}.json".format(testcase_fmt)
json_path = os.path.join(log_testcase_dir, slicer_name)

# export data to a JSON file
create_json(json_path, data)

total_time = int((time.time() - init_time) / 60.0)
logger.info("The testcase {test_case} was finished in {time} minutes".format(
    test_case=testcase_name, time=total_time))
예제 #9
0
    descriptions = get_descriptions('../data/raw/DeCS2020.obo')
    _, dev, _ = mesinesp2.data.load_dataset('../data/raw', 1)
    x_dev, y_dev = transform(dev,
                             descriptions,
                             split_sentences=False,
                             transform_labels=False)
    model = 'sbw'

    #idf = create_decs_embeddings() # Run just in case you don't have decs_mix decs_sbw and idf json files
    with open('../embeddings/idf.json') as f:
        idf = json.load(f)

    dev_sbw_similarity = similarity(x_dev, model, idf)

    result = np.apply_along_axis(top_k_values, 1, dev_sbw_similarity, 100)
    create_json(dev['id'], result, descriptions, model)

    with open(f'../embeddings/{model}_predictions.json') as json_file:
        data = json.load(json_file)

    pred = []
    for doc in data['documents']:
        pred.append(doc['labels'])

    real = dev["decsCodes"]
    assert (len(real) == len(pred))
    tp, fn, fp, p, r, f1 = f1_score(real, pred)
    print(F'TP: {tp}')
    print(F'FN: {fn}')
    print(F'FP: {fp}')
    print(f'Precision: {p}')
예제 #10
0
                    type=str,
                    default=date.today().strftime("%d/%m/%Y"),
                    help="DD/MM/YYYY, optional argument for backfill jobs")
args = parser.parse_args()
data_store_location = Path(
    args.data_store_location).joinpath('portal_dashboards')
data_store_location.mkdir(exist_ok=True)
org_search = args.org_search
analysis_date = datetime.strptime(args.execution_date, "%d/%m/%Y")
data_store_location.joinpath('public').mkdir(exist_ok=True)
get_data_from_blob(data_store_location.joinpath('overall',
                                                'daily_metrics.csv'))
data_wrangling(result_loc_=data_store_location.joinpath(
    'overall', 'daily_metrics.csv'),
               date_=analysis_date)
create_json(data_store_location.joinpath('public', 'cmo_dashboard.csv'),
            last_update=True)
post_data_to_blob(data_store_location.joinpath('public', 'cmo_dashboard.csv'))
get_tenant_info(
    result_loc_=data_store_location.parent.joinpath('textbook_reports'),
    org_search_=org_search,
    date_=analysis_date)
board_slug = pd.read_csv(
    data_store_location.parent.joinpath('textbook_reports',
                                        analysis_date.strftime('%Y-%m-%d'),
                                        'tenant_info.csv'))
slug_list = board_slug['slug'].unique().tolist()
for slug in slug_list:
    try:
        get_data_from_blob(result_loc_=data_store_location.joinpath(
            slug, 'daily_metrics.csv'))
        data_wrangling(result_loc_=data_store_location.joinpath(
예제 #11
0
    print("Inference on {}".format(device))


    if not os.path.exists("./infer_results"):
        os.makedir("./infer_results")




    net = ModifiedUNet(num_classes=args["num_class"]).load_from_checkpoint(args["checkpoint_path"])

    net.eval()


    for batch_idx,batch in enumerate(inferloader):
        prediction = net(batch)
        masks = preds_to_rgb_mask(prediction[0],batch_idx,2)
        file_name = inferloader.images[batch_idx]
        f_name = os.path.split(file_name)[1]
        name = os.path.splitext(f_name)[0]
        save_name = name + ".json"
        save_path = os.path.join("./infer_results",save_name)
        create_json(masks,file_name,save_path)






예제 #12
0
def process_ct_instances(study_instance_id, ct_instances, work_dir, output_dir):
    valid_ct_slice_objects = list()
    ct_slice_objects = list()
    ggo_count = 0
    con_count = 0
    sub_count = 0
    fib_count = 0
    ple_count = 0
    pne_count = 0
    abnormal_slice_count = 0

    total_number_of_slices = len(ct_instances)
    write_progress(study_instance_id, "20")
    write_last_sent_progress(study_instance_id, "20")
    update_progress_percent(study_instance_id, "20")

    points_file = study_instance_id + '_points.pkl'

    if os.path.exists(points_file):
        with open(points_file, 'rb') as f:
            rs = pickle.load(f)
    else:
        rs = process(study_instance_id, ct_instances)
        with open(points_file, 'wb') as fp:
            pickle.dump(rs, fp)

    if len(rs) > 0:
        write_progress(output_dir, "50")
        update_progress_percent(study_instance_id, "50")

        for r in rs:
            ct_slice, ggo, con, sub, fib, ple, pne, nor, affected_points, meta_data_dicom = r
            if not ct_slice.all_zeros:
                ct_slice_objects.append(ct_slice)
            if ggo or con or sub or fib or ple or pne > 10:
                abnormal_slice_count += 1
            ggo_count += ggo
            con_count += con
            sub_count += sub
            fib_count += fib
            ple_count += ple
            pne_count += pne
            if ct_slice.is_valid():
                valid_ct_slice_objects.append(ct_slice)

        scores = get_25_score(ct_slice_objects, study_instance_id)

        print('Total slice count: ' + str(total_number_of_slices))
        print('Abnormal slices  : ' + str(abnormal_slice_count))
        print('co_rads_score : ' + str(scores[5]))
        print('right_superior_lobe_percentage_affected : ' + str(scores[0]))
        print('right_middle_lobe_percentage_affected : ' + str(scores[1]))
        print('right_inferior_lobe_percentage_affected : ' + str(scores[2]))
        print('left_superior_lobe_percentage_affected : ' + str(scores[3]))
        print('left_inferior_lobe_percentage_affected : ' + str(scores[4]))

        final_json = create_json(study_instance_id,
                                 scores,
                                 ggo_count,
                                 con_count,
                                 sub_count,
                                 fib_count,
                                 ple_count,
                                 abnormal_slice_count,
                                 total_number_of_slices)

        write_progress(output_dir, "55")
        update_progress_percent(study_instance_id, "55")

        with open(output_dir + '/out.json', 'w') as f:
            final_json_str = json.dumps(final_json, indent=4)
            f.write(final_json_str)

        mp_slice_plot_2(rs, output_dir)
        write_progress(output_dir, "60")
        write_last_sent_progress(output_dir, "60")
        update_progress_percent(study_instance_id, "60")
        type_map = dict()

        for r in rs:
            for af in r[8]:
                x = int(math.floor(af[0][0]))
                y = int(math.floor(af[0][1]))
                z = int(math.floor(af[0][2]))
                v = af[1]
                if type_map.get(z) is None:
                    type_map[z] = [(v, (x, y, z))]
                else:
                    type_map.get(z).append((v, (x, y, z)))

        vtk_dir = tempfile.mkdtemp()
        ct_ggo_dir = tempfile.mkdtemp()
        ct_con_dir = tempfile.mkdtemp()
        ct_fib_dir = tempfile.mkdtemp()

        # print("1")

        mp_plot(study_instance_id,
                rs,
                type_map,
                ct_ggo_dir,
                ct_con_dir,
                ct_fib_dir,
                vtk_dir)

        write_progress(output_dir, "80")
        update_progress_percent(study_instance_id, "80")

        # vtk_plot(vtk_dir, output_dir)
        three_d_plot(study_instance_id, work_dir, output_dir, ct_ggo_dir, ct_con_dir, ct_fib_dir)

        shutil.rmtree(ct_ggo_dir)
        shutil.rmtree(ct_con_dir)
        shutil.rmtree(ct_fib_dir)
        shutil.rmtree(vtk_dir)

        write_progress(output_dir, "90")
        update_progress_percent(study_instance_id, "90")
    else:
        write_progress(output_dir, "90")
        update_progress_percent(study_instance_id, "90")
예제 #13
0
 def get_status(self, proxy):
     try:
         return proxy.scheduler.get_device_status(self.name)
     except xmlrpclib.Fault as e:
         return utils.create_json("error", "XMLRPC err%d: %s" % (e.faultCode, e.faultString))
예제 #14
0
 def put_online(self, user, proxy):
     try:
         return proxy.scheduler.put_into_online_mode(self.name, "Put online by %s" % user)
     except xmlrpclib.Fault as e:
         return utils.create_json("error", "XMLRPC err%d: %s" % (e.faultCode, e.faultString))
예제 #15
0
def daily_metrics(read_loc_, date_):
    """
    merge the three metrics
    :param read_loc_: pathlib.Path object to read CSV from.
    :param date_: datetime object to use in path
    :return: None
    """
    try:
        board_slug = \
            pd.read_csv(
                data_store_location.joinpath('textbook_reports', date_.strftime('%Y-%m-%d'), 'tenant_info.csv'))[
                ['id', 'slug']]
        board_slug.set_index('id', inplace=True)
    except Exception:
        raise Exception('Board Slug Error!')
    try:
        scans_df = pd.read_csv(
            read_loc_.joinpath('dialcode_scans', date_.strftime('%Y-%m-%d'),
                               'dial_scans.csv')).fillna('')
        scans_df = scans_df.pivot(index='dialcode_channel',
                                  columns='failed_flag',
                                  values='count').reset_index().fillna(0)
        scans_df = scans_df.join(
            board_slug, on='dialcode_channel',
            how='left')[['slug', 'Failed QR Scans', 'Successful QR Scans']]
        scans_df['Total QR scans'] = scans_df[
            'Successful QR Scans'] + scans_df['Failed QR Scans']
        scans_df['Percentage (%) of Failed QR Scans'] = scans_df[
            'Failed QR Scans'] * 100 / scans_df['Total QR scans']
        unmapped = scans_df[scans_df.slug.isna()]['Total QR scans'][0]
        scans_df.dropna(subset=['slug'], inplace=True)
    except Exception:
        raise Exception('Scans Error!')
    try:
        downloads_df = pd.read_csv(read_loc_.joinpath(
            'downloads', date_.strftime('%Y-%m-%d'), 'downloads.csv'),
                                   header=[0, 1],
                                   index_col=0)
        # Setting the value as Nan if there is no data for desktop
        downloads_df[(
            'count', config['context']['pdata']['id']['desktop']
        )] = downloads_df.reindex(
            columns=[('count',
                      config['context']['pdata']['id']['desktop'])]).squeeze()
        downloads_df = downloads_df.join(board_slug, on='channel', how='left')
        downloads_df = downloads_df[[
            ('count', config['context']['pdata']['id']['app']),
            ('count', config['context']['pdata']['id']['desktop']), 'slug'
        ]]
        downloads_df.columns = [
            'Total Content Downloads in Mobile App',
            'Total Content Downloads in Desktop App', 'slug'
        ]

        downloads_df = downloads_df.dropna(subset=['slug'])

    except Exception:
        raise Exception('Downloads Error!')
    try:
        app_df = pd.read_csv(read_loc_.joinpath('play',
                                                date_.strftime('%Y-%m-%d'),
                                                'app_sessions.csv'),
                             header=[0, 1])
        # Setting the value as Nan if there is no data for desktop
        app_df[('Total App Sessions',
                config['context']['pdata']['id']['desktop'])] = app_df.reindex(
                    columns=[('Total App Sessions',
                              config['context']['pdata']['id']['desktop']
                              )]).squeeze()
        app_df[('Total Devices on App',
                config['context']['pdata']['id']['desktop'])] = app_df.reindex(
                    columns=[('Total Devices on App',
                              config['context']['pdata']['id']['desktop']
                              )]).squeeze()
        app_df[('Total Time on App (in hours)',
                config['context']['pdata']['id']['desktop'])] = app_df.reindex(
                    columns=[('Total Time on App (in hours)',
                              config['context']['pdata']['id']['desktop']
                              )]).squeeze()

        app_df = app_df[[
            ('Total App Sessions', config['context']['pdata']['id']['app']),
            ('Total Devices on App', config['context']['pdata']['id']['app']),
            ('Total Time on App (in hours)',
             config['context']['pdata']['id']['app']),
            ('Total App Sessions',
             config['context']['pdata']['id']['desktop']),
            ('Total Devices on App',
             config['context']['pdata']['id']['desktop']),
            ('Total Time on App (in hours)',
             config['context']['pdata']['id']['desktop'])
        ]]
        app_df.columns = [
            'Total Mobile App Sessions', 'Total Devices on Mobile App',
            'Total Time on Mobile App (in hours)',
            'Total Desktop App Sessions', 'Total Devices on Desktop App',
            'Total Time on Desktop App (in hours)'
        ]
        plays_df = pd.read_csv(read_loc_.joinpath('play',
                                                  date_.strftime('%Y-%m-%d'),
                                                  'plays.csv'),
                               header=[0, 1],
                               index_col=0)
        # Setting the value as Nan if there is no data for desktop
        plays_df[(
            'Total Content Plays',
            config['context']['pdata']['id']['desktop'])] = plays_df.reindex(
                columns=[(
                    'Total Content Plays',
                    config['context']['pdata']['id']['desktop'])]).squeeze()
        plays_df[(
            'Total Devices that played content',
            config['context']['pdata']['id']['desktop'])] = plays_df.reindex(
                columns=[(
                    'Total Devices that played content',
                    config['context']['pdata']['id']['desktop'])]).squeeze()
        plays_df[(
            'Content Play Time (in hours)',
            config['context']['pdata']['id']['desktop'])] = plays_df.reindex(
                columns=[(
                    'Content Play Time (in hours)',
                    config['context']['pdata']['id']['desktop'])]).squeeze()

        plays_df = plays_df.reset_index().join(
            board_slug, on='channel',
            how='left')[[('Total Content Plays',
                          config['context']['pdata']['id']['app']),
                         ('Total Content Plays',
                          config['context']['pdata']['id']['portal']),
                         ('Total Content Plays',
                          config['context']['pdata']['id']['desktop']),
                         ('Total Devices that played content',
                          config['context']['pdata']['id']['app']),
                         ('Total Devices that played content',
                          config['context']['pdata']['id']['portal']),
                         ('Total Devices that played content',
                          config['context']['pdata']['id']['desktop']),
                         ('Content Play Time (in hours)',
                          config['context']['pdata']['id']['app']),
                         ('Content Play Time (in hours)',
                          config['context']['pdata']['id']['portal']),
                         ('Content Play Time (in hours)',
                          config['context']['pdata']['id']['desktop']),
                         'slug']].dropna(subset=['slug'])
        plays_df.columns = [
            'Total Content Plays on Mobile App',
            'Total Content Plays on Portal',
            'Total Content Plays on Desktop App',
            'Total Devices that played content on Mobile App',
            'Total Devices that played content on Portal',
            'Total Devices that played content on Desktop App',
            'Content Play Time on Mobile App (in hours)',
            'Content Play Time on Portal (in hours)',
            'Content Play Time on Desktop App (in hours)', 'slug'
        ]
    except Exception:
        raise Exception('App and Plays Error!')
    try:
        daily_metrics_df = scans_df.join(
            downloads_df.set_index('slug'), on='slug',
            how='outer').reset_index(drop=True).join(
                plays_df.set_index('slug'),
                on='slug',
                how='outer',
                rsuffix='_plays').fillna(0)
        daily_metrics_df['Date'] = '-'.join(
            date_.strftime('%Y-%m-%d').split('-')[::-1])
    except Exception:
        raise Exception('Daily Metrics Error!')
    try:
        overall = daily_metrics_df[[
            'Successful QR Scans', 'Failed QR Scans',
            'Total Content Downloads in Mobile App',
            'Total Content Downloads in Desktop App',
            'Total Content Plays on Mobile App',
            'Total Content Plays on Portal',
            'Total Content Plays on Desktop App',
            'Total Devices that played content on Mobile App',
            'Total Devices that played content on Portal',
            'Total Devices that played content on Desktop App',
            'Content Play Time on Mobile App (in hours)',
            'Content Play Time on Portal (in hours)',
            'Content Play Time on Desktop App (in hours)'
        ]].sum().astype(int)
        overall['Total Mobile App Sessions'] = app_df[
            'Total Mobile App Sessions'].loc[0]
        overall['Total Desktop App Sessions'] = app_df[
            'Total Desktop App Sessions'].loc[0]
        overall['Total Devices on Mobile App'] = app_df[
            'Total Devices on Mobile App'].loc[0]
        overall['Total Devices on Desktop App'] = app_df[
            'Total Devices on Desktop App'].loc[0]
        overall['Total Time on Mobile App (in hours)'] = app_df[
            'Total Time on Mobile App (in hours)'].loc[0]
        overall['Total Time on Desktop App (in hours)'] = app_df[
            'Total Time on Desktop App (in hours)'].loc[0]
        overall['Date'] = '-'.join(date_.strftime('%Y-%m-%d').split('-')[::-1])
        overall['Unmapped QR Scans'] = unmapped
        overall['Total QR scans'] = overall['Successful QR Scans'] + overall[
            'Failed QR Scans'] + overall['Unmapped QR Scans']
        overall['Percentage (%) of Failed QR Scans'] = '%.2f' % (
            overall['Failed QR Scans'] * 100 / overall['Total QR scans'])
        overall['Percentage (%) of Unmapped QR Scans'] = '%.2f' % (
            overall['Unmapped QR Scans'] * 100 / overall['Total QR scans'])
        overall['Total Content Plays'] = overall[
            'Total Content Plays on Mobile App'] + overall[
                'Total Content Plays on Portal'] + overall[
                    'Total Content Plays on Desktop App']
        overall['Total Devices that played content'] = overall['Total Devices that played content on Mobile App'] + \
                                                       overall['Total Devices that played content on Portal'] + \
                                                       overall['Total Devices that played content on Desktop App']
        overall['Total Content Play Time (in hours)'] = overall['Content Play Time on Mobile App (in hours)'] + \
                                                        overall['Content Play Time on Portal (in hours)'] + \
                                                        overall['Content Play Time on Desktop App (in hours)']
        overall = overall[[
            'Date', 'Total QR scans', 'Successful QR Scans', 'Failed QR Scans',
            'Unmapped QR Scans', 'Percentage (%) of Failed QR Scans',
            'Percentage (%) of Unmapped QR Scans',
            'Total Content Downloads in Mobile App',
            'Total Content Downloads in Desktop App',
            'Total Mobile App Sessions', 'Total Devices on Mobile App',
            'Total Time on Mobile App (in hours)',
            'Total Desktop App Sessions', 'Total Devices on Desktop App',
            'Total Time on Desktop App (in hours)',
            'Total Content Plays on Mobile App',
            'Total Devices that played content on Mobile App',
            'Content Play Time on Mobile App (in hours)',
            'Total Content Plays on Portal',
            'Total Devices that played content on Portal',
            'Content Play Time on Portal (in hours)',
            'Total Content Plays on Desktop App',
            'Total Devices that played content on Desktop App',
            'Content Play Time on Desktop App (in hours)',
            'Total Content Plays', 'Total Devices that played content',
            'Total Content Play Time (in hours)'
        ]]
        read_loc_.joinpath('portal_dashboards', 'overall').mkdir(exist_ok=True)
        read_loc_.joinpath('portal_dashboards', 'mhrd').mkdir(exist_ok=True)
        try:
            get_data_from_blob(
                read_loc_.joinpath('portal_dashboards', 'overall',
                                   'daily_metrics.csv'))
            blob_data = pd.read_csv(
                read_loc_.joinpath('portal_dashboards', 'overall',
                                   'daily_metrics.csv'))
        except:
            blob_data = pd.DataFrame()

        # Changing old generic app fields Mobile app and adding Desktop app fields // first time use
        if 'Total Content Downloads in Mobile App' not in blob_data.columns:
            blob_data.rename(columns={
                'Total Content Downloads':
                'Total Content Downloads in Mobile App',
                'Total App Sessions':
                'Total Mobile App Sessions',
                'Total Devices on App':
                'Total Devices on Mobile App',
                'Total Time on App (in hours)':
                'Total Time on Mobile App (in hours)',
                'Total Content Plays on App':
                'Total Content Plays on Mobile App',
                'Total Devices that played content on App':
                'Total Devices that played content on Mobile App',
                'Content Play Time on App (in hours)':
                'Content Play Time on Mobile App (in hours)'
            },
                             inplace=True)
            blob_data['Total Content Downloads in Desktop App'] = 0
            blob_data['Total Desktop App Sessions'] = 0
            blob_data['Total Devices on Desktop App'] = 0
            blob_data['Total Time on Desktop App (in hours)'] = 0
            blob_data['Total Content Plays on Desktop App'] = 0
            blob_data['Total Devices that played content on Desktop App'] = 0
            blob_data['Content Play Time on Desktop App (in hours)'] = 0
        blob_data = blob_data.append(pd.DataFrame(overall).transpose(),
                                     sort=False).fillna('')
        blob_data.index = pd.to_datetime(blob_data.Date, format='%d-%m-%Y')
        blob_data.sort_index(inplace=True)
        blob_data.drop_duplicates('Date', inplace=True, keep='last')

        # Excluding the desktop colums if no value
        sum_of_desktop_values = blob_data[[
            'Total Content Downloads in Desktop App',
            'Total Desktop App Sessions', 'Total Devices on Desktop App',
            'Total Time on Desktop App (in hours)',
            'Total Content Plays on Desktop App',
            'Total Devices that played content on Desktop App',
            'Content Play Time on Desktop App (in hours)'
        ]].values.sum()
        if sum_of_desktop_values == 0:
            exportable_cols = [
                'Date', 'Total QR scans', 'Successful QR Scans',
                'Failed QR Scans', 'Unmapped QR Scans',
                'Percentage (%) of Failed QR Scans',
                'Percentage (%) of Unmapped QR Scans',
                'Total Content Downloads in Mobile App',
                'Total Mobile App Sessions', 'Total Devices on Mobile App',
                'Total Time on Mobile App (in hours)',
                'Total Content Plays on Mobile App',
                'Total Devices that played content on Mobile App',
                'Content Play Time on Mobile App (in hours)',
                'Total Content Plays on Portal',
                'Total Devices that played content on Portal',
                'Content Play Time on Portal (in hours)',
                'Total Content Plays', 'Total Devices that played content',
                'Total Content Play Time (in hours)'
            ]
        else:
            exportable_cols = [
                'Date', 'Total QR scans', 'Successful QR Scans',
                'Failed QR Scans', 'Unmapped QR Scans',
                'Percentage (%) of Failed QR Scans',
                'Percentage (%) of Unmapped QR Scans',
                'Total Content Downloads in Mobile App',
                'Total Content Downloads in Desktop App',
                'Total Mobile App Sessions', 'Total Devices on Mobile App',
                'Total Time on Mobile App (in hours)',
                'Total Desktop App Sessions', 'Total Devices on Desktop App',
                'Total Time on Desktop App (in hours)',
                'Total Content Plays on Mobile App',
                'Total Devices that played content on Mobile App',
                'Content Play Time on Mobile App (in hours)',
                'Total Content Plays on Portal',
                'Total Devices that played content on Portal',
                'Content Play Time on Portal (in hours)',
                'Total Content Plays on Desktop App',
                'Total Devices that played content on Desktop App',
                'Content Play Time on Desktop App (in hours)',
                'Total Content Plays', 'Total Devices that played content',
                'Total Content Play Time (in hours)'
            ]
        # can remove after first run
        blob_data = blob_data[exportable_cols]
        blob_data.to_csv(read_loc_.joinpath('portal_dashboards', 'overall',
                                            'daily_metrics.csv'),
                         index=False)
        create_json(
            read_loc_.joinpath('portal_dashboards', 'overall',
                               'daily_metrics.csv'))
        post_data_to_blob(
            read_loc_.joinpath('portal_dashboards', 'overall',
                               'daily_metrics.csv'))
    except Exception:
        raise Exception('Overall Metrics Error!')
    try:
        daily_metrics_df['Total Content Plays'] = daily_metrics_df['Total Content Plays on Mobile App'] + \
                                                  daily_metrics_df['Total Content Plays on Portal'] + \
                                                  daily_metrics_df['Total Content Plays on Desktop App']
        daily_metrics_df['Total Devices that played content'] = daily_metrics_df[
                                                                    'Total Devices that played content on Mobile App'] \
                                                                + daily_metrics_df[
                                                                    'Total Devices that played content on Portal'] \
                                                                + daily_metrics_df[
                                                                    'Total Devices that played content on Desktop App']
        daily_metrics_df['Total Content Play Time (in hours)'] = daily_metrics_df[
                                                                     'Content Play Time on Mobile App (in hours)'] + \
                                                                 daily_metrics_df[
                                                                     'Content Play Time on Portal (in hours)'] + \
                                                                 daily_metrics_df[
                                                                     'Content Play Time on Desktop App (in hours)']
        daily_metrics_df.set_index(['slug'], inplace=True)
        daily_metrics_df = daily_metrics_df[[
            'Date', 'Total QR scans', 'Successful QR Scans', 'Failed QR Scans',
            'Percentage (%) of Failed QR Scans',
            'Total Content Downloads in Mobile App',
            'Total Content Downloads in Desktop App',
            'Total Content Plays on Mobile App',
            'Total Devices that played content on Mobile App',
            'Content Play Time on Mobile App (in hours)',
            'Total Content Plays on Portal',
            'Total Devices that played content on Portal',
            'Content Play Time on Portal (in hours)',
            'Total Content Plays on Desktop App',
            'Total Devices that played content on Desktop App',
            'Content Play Time on Desktop App (in hours)',
            'Total Content Plays', 'Total Devices that played content',
            'Total Content Play Time (in hours)'
        ]]
        for slug, value in daily_metrics_df.iterrows():
            if slug != '':
                read_loc_.joinpath('portal_dashboards',
                                   slug).mkdir(exist_ok=True)
                for key, val in value.items():
                    if key not in [
                            'Date', 'Percentage (%) of Failed QR Scans'
                    ]:
                        value[key] = int(val)
                    elif key == 'Percentage (%) of Failed QR Scans':
                        value[key] = '%.2f' % val
                try:
                    get_data_from_blob(
                        read_loc_.joinpath('portal_dashboards', slug,
                                           'daily_metrics.csv'))
                    blob_data = pd.read_csv(
                        read_loc_.joinpath('portal_dashboards', slug,
                                           'daily_metrics.csv'))
                except:
                    blob_data = pd.DataFrame()

                # Changing old generic app fields Mobile app and adding Desktop app fields // first time use
                if 'Total Content Downloads in Mobile App' not in blob_data.columns:
                    blob_data.rename(columns={
                        'Total Content Downloads':
                        'Total Content Downloads in Mobile App',
                        'Total Content Plays on App':
                        'Total Content Plays on Mobile App',
                        'Total Devices that played content on App':
                        'Total Devices that played content on Mobile App',
                        'Content Play Time on App (in hours)':
                        'Content Play Time on Mobile App (in hours)'
                    },
                                     inplace=True)
                    blob_data['Total Content Downloads in Desktop App'] = 0
                    blob_data['Total Content Plays on Desktop App'] = 0
                    blob_data[
                        'Total Devices that played content on Desktop App'] = 0
                    blob_data[
                        'Content Play Time on Desktop App (in hours)'] = 0
                blob_data = blob_data.append(pd.DataFrame(value).transpose(),
                                             sort=False).fillna('')
                blob_data.index = pd.to_datetime(blob_data.Date,
                                                 format='%d-%m-%Y')
                blob_data.sort_index(inplace=True)
                blob_data.drop_duplicates('Date', inplace=True, keep='last')

                # Excluding the desktop colums if no value
                sum_of_desktop_values = blob_data[[
                    'Total Content Downloads in Desktop App',
                    'Total Content Plays on Desktop App',
                    'Total Devices that played content on Desktop App',
                    'Content Play Time on Desktop App (in hours)'
                ]].values.sum()
                if sum_of_desktop_values == 0:
                    exportable_cols = [
                        'Date', 'Total QR scans', 'Successful QR Scans',
                        'Failed QR Scans', 'Percentage (%) of Failed QR Scans',
                        'Total Content Downloads in Mobile App',
                        'Total Content Plays on Mobile App',
                        'Total Devices that played content on Mobile App',
                        'Content Play Time on Mobile App (in hours)',
                        'Total Content Plays on Portal',
                        'Total Devices that played content on Portal',
                        'Content Play Time on Portal (in hours)',
                        'Total Content Plays',
                        'Total Devices that played content',
                        'Total Content Play Time (in hours)'
                    ]
                else:
                    exportable_cols = [
                        'Date', 'Total QR scans', 'Successful QR Scans',
                        'Failed QR Scans', 'Percentage (%) of Failed QR Scans',
                        'Total Content Downloads in Mobile App',
                        'Total Content Downloads in Desktop App',
                        'Total Content Plays on Mobile App',
                        'Total Devices that played content on Mobile App',
                        'Content Play Time on Mobile App (in hours)',
                        'Total Content Plays on Portal',
                        'Total Devices that played content on Portal',
                        'Content Play Time on Portal (in hours)',
                        'Total Content Plays on Desktop App',
                        'Total Devices that played content on Desktop App',
                        'Content Play Time on Desktop App (in hours)',
                        'Total Content Plays',
                        'Total Devices that played content',
                        'Total Content Play Time (in hours)'
                    ]
                blob_data = blob_data[exportable_cols]
                blob_data.to_csv(read_loc_.joinpath('portal_dashboards', slug,
                                                    'daily_metrics.csv'),
                                 index=False)
                create_json(
                    read_loc_.joinpath('portal_dashboards', slug,
                                       'daily_metrics.csv'))
                post_data_to_blob(
                    read_loc_.joinpath('portal_dashboards', slug,
                                       'daily_metrics.csv'))
    except Exception:
        raise Exception('State Metrics Error!')
예제 #16
0
        # normalize and smoothen
        item['norm_ema_rsi'] = (item['ema_rsi'] - 0.5) * 200
        item['norm_close'] = ((item['close'] - item['close'].min()) /
            (item['close'].max() - item['close'].min())) * 100
        item['smoothed_close'] = item['norm_close'].rolling(
            window=5,
            win_type='gaussian',
            center=True).mean(std=3).shift(2)

        # remove all NA -- cleanup
        # not really neccessary...
        # item = item.dropna()
        # just take last 200 rows
        # item = item.tail(200)
        # item.reset_index(drop=True, inplace=True)

        # add to df dataframe
        df.loc[symbol,'strength_index'] = item['norm_ema_rsi'].tail(1).values
        df.loc[symbol,'price'] = item['close'].tail(1).values
        df.loc[symbol,'norm_close'] = item['norm_close'].tail(1).values

    df.reset_index(inplace=True)
    df = df.dropna()
    return df

# actual code
equities = initialize(path='equities.csv')
equities = populate(equities)
json = utils.create_json(equities)
print(client.put_json(json))
예제 #17
0
 def get_status(self, proxy):
     try:
         return proxy.scheduler.get_device_status(self.name)
     except xmlrpclib.Fault as e:
         return utils.create_json(
             "error", "XMLRPC err%d: %s" % (e.faultCode, e.faultString))
예제 #18
0
"""
Store the parameters being passed to YABI
"""
import logging
import os
import sys
import utils

LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s:" + logging.BASIC_FORMAT)

# Print where we are for debug
LOG.info("Working dir = %s", os.path.abspath(os.curdir))

try:
    LOG.info("Creating parameters.json")

    # Write it to a file
    file = open("parameters.json", "wb")
    file.write(utils.create_json(sys.argv[1:]))
    file.close()

    LOG.info("Done")

except Exception as e:
    LOG.exception(e)