Exemple #1
0
def _plot_target_network_rel_residuals(df,
                                       target,
                                       ref,
                                       batch_options,
                                       filter_options,
                                       tt_scale=50,
                                       snr_scale=(0, 60),
                                       annotator=None):

    file_label = batch_options.batch_label
    save_file = batch_options.save_file

    def _plot_dataset(ds, net_code, ref_code):
        # Sort ds rows by SNR, so that the weakest SNR points are drawn first and the high SNR point last,
        # to make sure high SNR point are in the top rendering layer.
        ds = ds.sort_values('snr')
        times = pandas_timestamp_to_plottable_datetime(ds['originTimestamp'])
        vals = ds[yaxis].values
        qual = ds['snr'].values
        min_mag = 4.0
        mag = ds['mag'].values - min_mag
        ylabel = 'Relative TT residual (sec)'
        title = r"Station {} TT residuals relative to network {} (filtering: ref SNR$\geq${}, CWT$\geq${}, "\
                r"slope$\geq${}, $n\sigma\geq{}$)".format(ref_code, net_code, str(filter_options.min_event_snr),
                                                          str(filter_options.cwt_cutoff),
                                                          str(filter_options.slope_cutoff),
                                                          str(filter_options.nsigma_cutoff))
        if vals.any():
            plt.figure(figsize=(32, 9))
            sc = plt.scatter(times,
                             vals,
                             c=qual,
                             alpha=0.5,
                             cmap='gnuplot_r',
                             s=np.maximum(50 * mag, 10),
                             edgecolors=None,
                             linewidths=0)
            time_formatter = matplotlib.dates.DateFormatter("%Y-%m-%d")
            plt.axes().xaxis.set_major_formatter(time_formatter)
            cb = plt.colorbar(sc, drawedges=False)
            cb.set_label('Signal to noise ratio', fontsize=12)
            plt.grid(color='#808080', linestyle=':', alpha=0.7)
            plt.xlabel(xlabel, fontsize=14)
            plt.ylabel(ylabel, fontsize=14)
            plt.xticks(fontsize=14)
            plt.yticks(fontsize=14)
            plt.xlim(time_range)
            plt.ylim((-tt_scale, tt_scale))
            plt.clim(snr_scale)
            plt.title(title, fontsize=18)
            plt.legend(['Point size = Mag - {}, Color = SNR'.format(min_mag)],
                       fontsize=12,
                       loc=1)
            plt.text(0.01,
                     0.96,
                     "Channel selection: {}".format(
                         filter_options.channel_preference),
                     transform=plt.gca().transAxes,
                     fontsize=12)
            plt.text(0.01,
                     0.92,
                     "Start date: {}".format(str(time_range[0])),
                     transform=plt.gca().transAxes,
                     fontsize=12)
            plt.text(0.01,
                     0.88,
                     "  End date: {}".format(str(time_range[1])),
                     transform=plt.gca().transAxes,
                     fontsize=12)
            plt.tight_layout(pad=1.05)
            if annotator is not None:
                annotator()
            if save_file:
                subfolder = os.path.join(
                    ref_code.split('.')[0] + file_label, net_code)
                # subfolder = net_code
                pathlib.Path(subfolder).mkdir(exist_ok=True, parents=True)
                plt_file = os.path.join(
                    subfolder, '_'.join([ref_code, net_code]) + '_' +
                    ylabel.replace(" ", "").replace(".*", "") + ".png")
                plt.savefig(plt_file, dpi=150)
                plt.close()
            else:  # pragma: no cover
                plt.show()

    # end plot_dataset

    df_times = pandas_timestamp_to_plottable_datetime(df['originTimestamp'])
    if batch_options.x_range is not None:
        time_range = batch_options.x_range
    else:
        time_range = (df_times.min(), df_times.max())
    ref_code = ".".join([ref['net'][0], ref['sta'][0]])
    log = logging.getLogger(__name__)
    log.info("Plotting time range " +
             " to ".join([t.strftime("%Y-%m-%d %H:%M:%S")
                          for t in time_range]) + " for " + ref_code)
    yaxis = 'relTtResidual'
    xlabel = 'Event Origin Timestamp'

    # Remove reference station from target set before producing composite image.
    # The reference station may not be there, but remove it if it is.
    mask_ref = compute_matching_network_mask(df, ref)
    mask_targ = compute_matching_network_mask(df, target)
    df_agg = df[(mask_targ) & (~mask_ref)]
    _plot_dataset(df_agg, ','.join(np.unique(target['net'])), ref_code)

    # Export median error for each origin event if export path is provided
    if batch_options.export_path is not None:
        pathlib.Path(batch_options.export_path).mkdir(parents=True,
                                                      exist_ok=True)
        df_agg = df_agg.sort_values('originTimestamp')
        df_export = df_agg[['originTimestamp', 'relTtResidual']]
        median_errors = {'originTimestamp': [], 'rawClockError': []}
        for origin_ts, df_event in df_export.groupby('originTimestamp'):
            median_errors['originTimestamp'].append(origin_ts)
            median_errors['rawClockError'].append(
                df_event['relTtResidual'].median())
        df_export = pd.DataFrame(median_errors)
        fname = ref_code + ".raw_clock_error.csv"
        fname = os.path.join(batch_options.export_path, fname)
        df_export.to_csv(fname, index=False)
Exemple #2
0
    Config = {
        'videomode': 0,
        'stereomode': 0,
        'nomipmap': 0,
        'fixedtime': 0,
        'sizewindow': 2,
        'sizefullscreen': 0,
        'bitdepth': 2,
        'refreshrate': 2,
        'pathplayer': 'engine/blenderplayer.exe',
        'pathfile': 'data/main.blend'
    }

if True:

    cur_dir = os.path.basename(pathlib2.Path('').resolve().as_posix())

    if cur_dir == 'bglauncher':

        if os.name == 'nt':
            Config['pathplayer'] = 'dist_win/engine/blenderplayer.exe'
            Config['pathfile'] = 'dist_win/data/testfile.blend'

        if os.name == 'posix':
            Config['pathplayer'] = 'dist_linux/engine/blenderplayer'
            Config['pathfile'] = 'dist_linux/data/testfile.blend'

    if cur_dir == 'dist_win':
        Config['pathplayer'] = 'engine/blenderplayer.exe'
        Config['pathfile'] = 'data/testfile.blend'
Exemple #3
0
def read_downsample_equalize_mask(impath, *args, **kwargs):
    uri_impath = [pathlib.Path(fn).as_uri() for fn in impath]
    return read_downsample_equalize_mask_uri(uri_impath, *args, **kwargs)
Exemple #4
0
    def main(self):
        parser = argparse.ArgumentParser(
            formatter_class=argparse.RawTextHelpFormatter)
        parser.add_argument('--device-model-id',
                            '--device_model_id',
                            type=str,
                            metavar='DEVICE_MODEL_ID',
                            required=False,
                            help='the device model ID registered with Google')
        parser.add_argument('--project-id',
                            '--project_id',
                            type=str,
                            metavar='PROJECT_ID',
                            required=False,
                            help='the project ID used to register this device')
        parser.add_argument('--device-config',
                            type=str,
                            metavar='DEVICE_CONFIG_FILE',
                            default=os.path.join(
                                os.path.expanduser('~/.config'),
                                'googlesamples-assistant',
                                'device_config_library.json'),
                            help='path to store and read device configuration')
        parser.add_argument('--credentials',
                            type=existing_file,
                            metavar='OAUTH2_CREDENTIALS_FILE',
                            default=os.path.join(
                                os.path.expanduser('~/.config'),
                                'google-oauthlib-tool', 'credentials.json'),
                            help='path to store and read OAuth2 credentials')
        parser.add_argument('-v',
                            '--version',
                            action='version',
                            version='%(prog)s ' + Assistant.__version_str__())

        args = parser.parse_args()
        with open(args.credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))

        device_model_id = None
        last_device_id = None
        try:
            with open(args.device_config) as f:
                device_config = json.load(f)
                device_model_id = device_config['model_id']
                last_device_id = device_config.get('last_device_id', None)
        except FileNotFoundError:
            pass

        if not args.device_model_id and not device_model_id:
            raise Exception('Missing --device-model-id option')

        # Re-register if "device_model_id" is given by the user and it differs
        # from what we previously registered with.
        should_register = (args.device_model_id
                           and args.device_model_id != device_model_id)

        device_model_id = args.device_model_id or device_model_id
        with Assistant(credentials, device_model_id) as assistant:
            self.assistant = assistant
            subprocess.Popen([
                "aplay", "{}/sample-audio-files/Startup.wav".format(ROOT_PATH)
            ],
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            events = assistant.start()
            device_id = assistant.device_id
            print('device_model_id:', device_model_id)
            print('device_id:', device_id + '\n')

            # Re-register if "device_id" is different from the last "device_id":
            if should_register or (device_id != last_device_id):
                if args.project_id:
                    register_device(args.project_id, credentials,
                                    device_model_id, device_id)
                    pathlib.Path(os.path.dirname(
                        args.device_config)).mkdir(exist_ok=True)
                    with open(args.device_config, 'w') as f:
                        json.dump(
                            {
                                'last_device_id': device_id,
                                'model_id': device_model_id,
                            }, f)
                else:
                    print(WARNING_NOT_REGISTERED)

            for event in events:
                self.process_event(event)
                usrcmd = event.args
                if configuration['DIYHUE']['DIYHUE_Control'] == 'Enabled':
                    if os.path.isfile('/opt/hue-emulator/config.json'):
                        with open('/opt/hue-emulator/config.json',
                                  'r') as config:
                            hueconfig = json.load(config)
                        for i in range(1, len(hueconfig['lights']) + 1):
                            try:
                                if str(
                                        hueconfig['lights'][str(i)]
                                    ['name']).lower() in str(usrcmd).lower():
                                    assistant.stop_conversation()
                                    hue_control(
                                        str(usrcmd).lower(), str(i),
                                        str(hueconfig['lights_address'][str(i)]
                                            ['ip']))
                                    break
                            except Keyerror:
                                say('Unable to help, please check your config file'
                                    )
                if configuration['Tasmota_devicelist'][
                        'Tasmota_Control'] == 'Enabled':
                    for num, name in enumerate(tasmota_devicelist):
                        if name.lower() in str(usrcmd).lower():
                            assistant.stop_conversation()
                            tasmota_control(
                                str(usrcmd).lower(), name.lower(),
                                tasmota_deviceip[num],
                                tasmota_deviceportid[num])
                            break
                if configuration['Conversation'][
                        'Conversation_Control'] == 'Enabled':
                    for i in range(1, numques + 1):
                        try:
                            if str(configuration['Conversation']['question'][i]
                                   [0]).lower() in str(usrcmd).lower():
                                assistant.stop_conversation()
                                selectedans = random.sample(
                                    configuration['Conversation']['answer'][i],
                                    1)
                                say(selectedans[0])
                                break
                        except Keyerror:
                            say('Please check if the number of questions matches the number of answers'
                                )

                if Domoticz_Device_Control == True and len(
                        domoticz_devices['result']) > 0:
                    if len(configuration['Domoticz']['Devices']
                           ['Name']) == len(
                               configuration['Domoticz']['Devices']['Id']):
                        for i in range(
                                0,
                                len(configuration['Domoticz']['Devices']
                                    ['Name'])):
                            if str(configuration['Domoticz']['Devices']['Name']
                                   [i]).lower() in str(usrcmd).lower():
                                assistant.stop_conversation()
                                domoticz_control(
                                    str(usrcmd).lower(),
                                    configuration['Domoticz']['Devices']['Id']
                                    [i], configuration['Domoticz']['Devices']
                                    ['Name'][i])
                                break
                    else:
                        say("Number of devices and the number of ids given in config file do not match"
                            )

                if (custom_action_keyword['Keywords']['Magic_mirror'][0]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    try:
                        mmmcommand = str(usrcmd).lower()
                        if 'weather'.lower() in mmmcommand:
                            if 'show'.lower() in mmmcommand:
                                mmreq_one = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=SHOW&module=module_2_currentweather"
                                )
                                mmreq_two = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=SHOW&module=module_3_currentweather"
                                )
                            if 'hide'.lower() in mmmcommand:
                                mmreq_one = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=HIDE&module=module_2_currentweather"
                                )
                                mmreq_two = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=HIDE&module=module_3_currentweather"
                                )
                        if 'power off'.lower() in mmmcommand:
                            mmreq = requests.get(
                                "http://" + mmmip +
                                ":8080/remote?action=SHUTDOWN")
                        if 'reboot'.lower() in mmmcommand:
                            mmreq = requests.get("http://" + mmmip +
                                                 ":8080/remote?action=REBOOT")
                        if 'restart'.lower() in mmmcommand:
                            mmreq = requests.get("http://" + mmmip +
                                                 ":8080/remote?action=RESTART")
                        if 'display on'.lower() in mmmcommand:
                            mmreq = requests.get(
                                "http://" + mmmip +
                                ":8080/remote?action=MONITORON")
                        if 'display off'.lower() in mmmcommand:
                            mmreq = requests.get(
                                "http://" + mmmip +
                                ":8080/remote?action=MONITOROFF")
                    except requests.exceptions.ConnectionError:
                        say("Magic mirror not online")
                if (custom_action_keyword['Keywords']['Recipe_pushbullet'][0]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    ingrequest = str(usrcmd).lower()
                    ingredientsidx = ingrequest.find('for')
                    ingrequest = ingrequest[ingredientsidx:]
                    ingrequest = ingrequest.replace('for', "", 1)
                    ingrequest = ingrequest.replace("'}", "", 1)
                    ingrequest = ingrequest.strip()
                    ingrequest = ingrequest.replace(" ", "%20", 1)
                    getrecipe(ingrequest)
                if (custom_action_keyword['Keywords']['Kickstarter_tracking']
                    [0]).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    kickstarter_tracker(str(usrcmd).lower())
                if configuration['Raspberrypi_GPIO_Control'][
                        'GPIO_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']['Pi_GPIO_control'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        Action(str(usrcmd).lower())
                if configuration['YouTube']['YouTube_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']
                        ['YouTube_music_stream'][0]
                        ).lower() in str(usrcmd).lower() and 'kodi' not in str(
                            usrcmd).lower() and 'chromecast' not in str(
                                usrcmd).lower():
                        assistant.stop_conversation()
                        vlcplayer.stop_vlc()
                        if 'autoplay'.lower() in str(usrcmd).lower():
                            YouTube_Autoplay(str(usrcmd).lower())
                        else:
                            YouTube_No_Autoplay(str(usrcmd).lower())
                if (custom_action_keyword['Keywords']['Stop_music'][0]
                    ).lower() in str(usrcmd).lower():
                    stop()
                if configuration['Radio_stations'][
                        'Radio_Control'] == 'Enabled':
                    if 'radio'.lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        radio(str(usrcmd).lower())
                if configuration['ESP']['ESP_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']['ESP_control'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        ESP(str(usrcmd).lower())
                if (custom_action_keyword['Keywords']['Parcel_tracking'][0]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    track()
                if (custom_action_keyword['Keywords']['RSS'][0]
                    ).lower() in str(usrcmd).lower() or (
                        custom_action_keyword['Keywords']['RSS'][1]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    feed(str(usrcmd).lower())
                if kodicontrol:
                    if (custom_action_keyword['Keywords']['Kodi_actions'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        kodiactions(str(usrcmd).lower())
                # Google Assistant now comes built in with chromecast control, so custom function has been commented
                # if 'chromecast'.lower() in str(usrcmd).lower():
                #     assistant.stop_conversation()
                #     if 'play'.lower() in str(usrcmd).lower():
                #         chromecast_play_video(str(usrcmd).lower())
                #     else:
                #         chromecast_control(usrcmd)
                if (custom_action_keyword['Keywords']['Pause_resume'][0]
                    ).lower() in str(usrcmd).lower() or (
                        custom_action_keyword['Keywords']['Pause_resume'][1]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    if vlcplayer.is_vlc_playing():
                        if (custom_action_keyword['Keywords']['Pause_resume']
                            [0]).lower() in str(usrcmd).lower():
                            vlcplayer.pause_vlc()
                    if checkvlcpaused():
                        if (custom_action_keyword['Keywords']['Pause_resume']
                            [1]).lower() in str(usrcmd).lower():
                            vlcplayer.play_vlc()
                    elif vlcplayer.is_vlc_playing(
                    ) == False and checkvlcpaused() == False:
                        say("Sorry nothing is playing right now")
                if (custom_action_keyword['Keywords']['Track_change']['Next']
                    [0]).lower() in str(usrcmd).lower() or (
                        custom_action_keyword['Keywords']['Track_change']
                        ['Next'][1]).lower() in str(usrcmd).lower() or (
                            custom_action_keyword['Keywords']['Track_change']
                            ['Next'][2]).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    if vlcplayer.is_vlc_playing() or checkvlcpaused() == True:
                        vlcplayer.stop_vlc()
                        vlcplayer.change_media_next()
                    elif vlcplayer.is_vlc_playing(
                    ) == False and checkvlcpaused() == False:
                        say("Sorry nothing is playing right now")
                if (custom_action_keyword['Keywords']['Track_change']
                    ['Previous'][0]).lower() in str(usrcmd).lower() or (
                        custom_action_keyword['Keywords']['Track_change']
                        ['Previous'][1]).lower() in str(usrcmd).lower() or (
                            custom_action_keyword['Keywords']['Track_change']
                            ['Previous'][2]).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    if vlcplayer.is_vlc_playing() or checkvlcpaused() == True:
                        vlcplayer.stop_vlc()
                        vlcplayer.change_media_previous()
                    elif vlcplayer.is_vlc_playing(
                    ) == False and checkvlcpaused() == False:
                        say("Sorry nothing is playing right now")
                if (custom_action_keyword['Keywords']['VLC_music_volume'][0]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    if vlcplayer.is_vlc_playing() == True or checkvlcpaused(
                    ) == True:
                        if (custom_action_keyword['Dict']['Set']
                            ).lower() in str(usrcmd).lower() or (
                                custom_action_keyword['Dict']['Change']
                            ).lower() in str(usrcmd).lower():
                            if 'hundred'.lower() in str(usrcmd).lower(
                            ) or custom_action_keyword['Dict'][
                                    'Maximum'] in str(usrcmd).lower():
                                settingvollevel = 100
                                with open(
                                        '{}/.mediavolume.json'.format(
                                            USER_PATH), 'w') as vol:
                                    json.dump(settingvollevel, vol)
                            elif 'zero'.lower() in str(usrcmd).lower(
                            ) or custom_action_keyword['Dict'][
                                    'Minimum'] in str(usrcmd).lower():
                                settingvollevel = 0
                                with open(
                                        '{}/.mediavolume.json'.format(
                                            USER_PATH), 'w') as vol:
                                    json.dump(settingvollevel, vol)
                            else:
                                for settingvollevel in re.findall(
                                        r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                    with open(
                                            '{}/.mediavolume.json'.format(
                                                USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                            print('Setting volume to: ' + str(settingvollevel))
                            vlcplayer.set_vlc_volume(int(settingvollevel))
                        elif (custom_action_keyword['Dict']['Increase']
                              ).lower() in str(usrcmd).lower() or (
                                  custom_action_keyword['Dict']['Decrease']
                              ).lower() in str(usrcmd).lower(
                              ) or 'reduce'.lower() in str(usrcmd).lower():
                            if os.path.isfile(
                                    "{}/.mediavolume.json".format(USER_PATH)):
                                with open(
                                        '{}/.mediavolume.json'.format(
                                            USER_PATH), 'r') as vol:
                                    oldvollevel = json.load(vol)
                                    for oldvollevel in re.findall(
                                            r'\b\d+\b', str(oldvollevel)):
                                        oldvollevel = int(oldvollevel)
                            else:
                                oldvollevel = vlcplayer.get_vlc_volume
                                for oldvollevel in re.findall(
                                        r"[-+]?\d*\.\d+|\d+", str(output)):
                                    oldvollevel = int(oldvollevel)
                            if (custom_action_keyword['Dict']['Increase']
                                ).lower() in str(usrcmd).lower():
                                if any(char.isdigit() for char in str(usrcmd)):
                                    for changevollevel in re.findall(
                                            r'\b\d+\b', str(usrcmd)):
                                        changevollevel = int(changevollevel)
                                else:
                                    changevollevel = 10
                                newvollevel = oldvollevel + changevollevel
                                print(newvollevel)
                                if int(newvollevel) > 100:
                                    settingvollevel = 100
                                elif int(newvollevel) < 0:
                                    settingvollevel = 0
                                else:
                                    settingvollevel = newvollevel
                                with open(
                                        '{}/.mediavolume.json'.format(
                                            USER_PATH), 'w') as vol:
                                    json.dump(settingvollevel, vol)
                                print('Setting volume to: ' +
                                      str(settingvollevel))
                                vlcplayer.set_vlc_volume(int(settingvollevel))
                            if (custom_action_keyword['Dict']['Decrease']
                                ).lower() in str(usrcmd).lower(
                                ) or 'reduce'.lower() in str(usrcmd).lower():
                                if any(char.isdigit() for char in str(usrcmd)):
                                    for changevollevel in re.findall(
                                            r'\b\d+\b', str(usrcmd)):
                                        changevollevel = int(changevollevel)
                                else:
                                    changevollevel = 10
                                newvollevel = oldvollevel - changevollevel
                                print(newvollevel)
                                if int(newvollevel) > 100:
                                    settingvollevel = 100
                                elif int(newvollevel) < 0:
                                    settingvollevel = 0
                                else:
                                    settingvollevel = newvollevel
                                with open(
                                        '{}/.mediavolume.json'.format(
                                            USER_PATH), 'w') as vol:
                                    json.dump(settingvollevel, vol)
                                print('Setting volume to: ' +
                                      str(settingvollevel))
                                vlcplayer.set_vlc_volume(int(settingvollevel))
                        else:
                            say("Sorry I could not help you")
                    else:
                        say("Sorry nothing is playing right now")
                if (custom_action_keyword['Keywords']['Music_index_refresh'][0]
                    ).lower() in str(usrcmd).lower() and (
                        custom_action_keyword['Keywords']
                        ['Music_index_refresh'][1]
                    ).lower() in str(usrcmd).lower():
                    assistant.stop_conversation()
                    refreshlists()
                if configuration['Gmusicapi']['Gmusic_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']
                        ['Google_music_streaming'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        vlcplayer.stop_vlc()
                        gmusicselect(str(usrcmd).lower())
                if configuration['Spotify']['Spotify_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']
                        ['Spotify_music_streaming'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        vlcplayer.stop_vlc()
                        spotify_playlist_select(str(usrcmd).lower())
                if configuration['Gaana']['Gaana_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']
                        ['Gaana_music_streaming'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        vlcplayer.stop_vlc()
                        gaana_playlist_select(str(usrcmd).lower())
                if configuration['Deezer']['Deezer_Control'] == 'Enabled':
                    if (custom_action_keyword['Keywords']
                        ['Deezer_music_streaming'][0]
                        ).lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        vlcplayer.stop_vlc()
                        deezer_playlist_select(str(usrcmd).lower())

        if custom_wakeword:
            self.detector.terminate()
Exemple #5
0
def path_as_uri(path):
    return pathlib.Path(os.path.abspath(path)).as_uri()
Exemple #6
0
def test_py7zr_is_7zfile_path():
    assert py7zr.is_7zfile(pathlib.Path(testdata_path).joinpath('test_1.7z'))
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--device-model-id', '--device_model_id', type=str,
                        metavar='DEVICE_MODEL_ID', required=False,
                        help='the device model ID registered with Google')
    parser.add_argument('--project-id', '--project_id', type=str,
                        metavar='PROJECT_ID', required=False,
                        help='the project ID used to register this device')
    parser.add_argument('--nickname', type=str,
                        metavar='NICKNAME', required=False,
                        help='the nickname used to register this device')
    parser.add_argument('--device-config', type=str,
                        metavar='DEVICE_CONFIG_FILE',
                        default=os.path.join(
                            os.path.expanduser('~/.config'),
                            'googlesamples-assistant',
                            'device_config_library.json'
                        ),
                        help='path to store and read device configuration')
    parser.add_argument('--credentials', type=existing_file,
                        metavar='OAUTH2_CREDENTIALS_FILE',
                        default=os.path.join(
                            os.path.expanduser('~/.config'),
                            'google-oauthlib-tool',
                            'credentials.json'
                        ),
                        help='path to store and read OAuth2 credentials')
    parser.add_argument('--query', type=str,
                        metavar='QUERY',
                        help='query to send as soon as the Assistant starts')
    parser.add_argument('-v', '--version', action='version',
                        version='%(prog)s ' + Assistant.__version_str__())

    args = parser.parse_args()
    with open(args.credentials, 'r') as f:
        credentials = google.oauth2.credentials.Credentials(token=None,
                                                            **json.load(f))

    device_model_id = None
    last_device_id = None
    try:
        with open(args.device_config) as f:
            device_config = json.load(f)
            device_model_id = device_config['model_id']
            last_device_id = device_config.get('last_device_id', None)
    except FileNotFoundError:
        pass

    if not args.device_model_id and not device_model_id:
        raise Exception('Missing --device-model-id option')

    should_register = (
        args.device_model_id and args.device_model_id != device_model_id)

    device_model_id = args.device_model_id or device_model_id

    note = Notify.Notification.new(Notify.get_app_name(),'Micno with Assistant は動作中です','mic-volume-high')
    note.set_urgency(Notify.Urgency.NORMAL)
    note.show()

    with Assistant(credentials, device_model_id) as assistant:
        global device_id_global, device_model_id_global
        events = assistant.start()

        device_id = assistant.device_id
        print('device_model_id:', device_model_id)
        print('device_id:', device_id + '\n')
        device_id_global = device_id
        device_model_id_global = device_model_id
        if should_register or (device_id != last_device_id):
            if args.project_id:
                register_device(args.project_id, credentials,
                                device_model_id, device_id, args.nickname)
                pathlib.Path(os.path.dirname(args.device_config)).mkdir(
                    exist_ok=True)
                with open(args.device_config, 'w') as f:
                    json.dump({
                        'last_device_id': device_id,
                        'model_id': device_model_id,
                    }, f)
            else:
                print(WARNING_NOT_REGISTERED)

        for event in events:
            if event.type == EventType.ON_START_FINISHED and args.query:
                assistant.send_text_query(args.query)
            process_event(event)
Exemple #8
0
def simple_report():
    """Example of parsed pytest-json report contents."""
    current_folder = pathlib.Path(__file__).resolve().parent
    return json.loads((current_folder / "simple_report.json").read_text())
Exemple #9
0
    "en-US,en;q=0.9,de-DE;q=0.8,de;q=0.7",
    'Cache-Control':
    "no-cache",
    'Connection':
    "keep-alive",
    'User-Agent':
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36",
}

base_url = "https://www.oxfordlearnersdictionaries.com/external/pdf/wordlists/oxford-3000-5000/"
pdf_files = [("The%20Oxford%203000.pdf", "ox3k.pdf"),
             ("The%20Oxford%205000.pdf", "ox5k.pdf")]

for in_fn, out_fn in pdf_files:
    url = base_url + in_fn
    path = pl.Path(out_fn)
    if path.exists():
        continue

    print("downloading", url)
    resp = requests.get(url, headers=HEADERS)
    with path.open(mode="wb") as fobj:
        fobj.write(resp.content)

GRAMMAR_QUALIFIERS = {
    "n": "noun",
    "v": "verb",
    "noun": "noun",
    "adj": "adjective",
    "adv": "adverb",
    "prep": "preposition",
Exemple #10
0
def _multinode_transfer(method, dest, source, dst, username, ssh_private_key,
                        rls, mpt):
    # type: (str, DestinationSettings, SourceSettings, str, str,
    #        pathlib.Path, dict, int) -> None
    """Transfer data to multiple destination nodes simultaneously
    :param str method: transfer method
    :param DestinationSettings dest: destination settings
    :param SourceSettings source: source settings
    :param str dst: destination path
    :param str username: username
    :param pathlib.Path: ssh private key
    :param dict rls: remote login settings
    :param int mpt: max parallel transfers per node
    """
    src = source.path
    src_incl = source.include
    src_excl = source.exclude
    psrc = pathlib.Path(src)
    # if source isn't a directory, convert it using src_incl
    if not psrc.is_dir():
        src_excl = None
        src_incl = [src]
        src = str(psrc.parent)
        psrc = psrc.parent
    # if split is specified, force to multinode_scp
    if (dest.data_transfer.split_files_megabytes is not None
            and method != 'multinode_scp'):
        logger.warning('forcing transfer method to multinode_scp with split')
        method = 'multinode_scp'
    buckets = {}
    files = {}
    rcodes = {}
    spfiles = []
    spfiles_count = {}
    spfiles_count_lock = threading.Lock()
    for rkey in rls:
        buckets[rkey] = 0
        files[rkey] = []
        rcodes[rkey] = None
    # walk the directory structure
    # 1. construct a set of dirs to create on the remote side
    # 2. binpack files to different nodes
    total_files = 0
    dirs = set()
    if dest.relative_destination_path is not None:
        dirs.add(dest.relative_destination_path)
    for entry in util.scantree(src):
        rel = pathlib.Path(entry.path).relative_to(psrc)
        sparent = str(pathlib.Path(entry.path).relative_to(psrc).parent)
        if entry.is_file():
            srel = str(rel)
            # check filters
            if src_excl is not None:
                inc = not any([fnmatch.fnmatch(srel, x) for x in src_excl])
            else:
                inc = True
            if src_incl is not None:
                inc = any([fnmatch.fnmatch(srel, x) for x in src_incl])
            if not inc:
                logger.debug('skipping file {} due to filters'.format(
                    entry.path))
                continue
            if dest.relative_destination_path is None:
                dstpath = '{}{}'.format(dst, rel)
            else:
                dstpath = '{}{}/{}'.format(dst, dest.relative_destination_path,
                                           rel)
            # get key of min bucket values
            fsize = entry.stat().st_size
            if (dest.data_transfer.split_files_megabytes is not None
                    and fsize > dest.data_transfer.split_files_megabytes):
                nsplits = int(
                    math.ceil(fsize /
                              dest.data_transfer.split_files_megabytes))
                lpad = int(math.log10(nsplits)) + 1
                spfiles.append(dstpath)
                spfiles_count[dstpath] = nsplits
                n = 0
                curr = 0
                while True:
                    end = curr + dest.data_transfer.split_files_megabytes
                    if end > fsize:
                        end = fsize
                    key = min(buckets, key=buckets.get)
                    buckets[key] += (end - curr)
                    if n == 0:
                        dstfname = dstpath
                    else:
                        dstfname = '{}.{}{}'.format(dstpath,
                                                    _FILE_SPLIT_PREFIX,
                                                    str(n).zfill(lpad))
                    files[key].append((entry.path, dstfname, curr, end))
                    if end == fsize:
                        break
                    curr = end
                    n += 1
            else:
                key = min(buckets, key=buckets.get)
                buckets[key] += fsize
                files[key].append((entry.path, dstpath, None, None))
            total_files += 1
        # add directory to create
        if sparent != '.':
            if dest.relative_destination_path is None:
                dirs.add(sparent)
            else:
                dirs.add('{}/{}'.format(dest.relative_destination_path,
                                        sparent))
    total_size = sum(buckets.values())
    if total_files == 0:
        logger.error('no files to ingress')
        return
    # create remote directories via ssh
    if len(dirs) == 0:
        logger.debug('no remote directories to create')
    else:
        logger.debug('creating remote directories: {}'.format(dirs))
        dirs = ['mkdir -p {}'.format(x) for x in list(dirs)]
        dirs.insert(0, 'cd {}'.format(dst))
        _rls = next(iter(rls.values()))
        ip = _rls.remote_login_ip_address
        port = _rls.remote_login_port
        del _rls
        mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
                    '-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
                        os.devnull, ssh_private_key, port, username, ip,
                        util.wrap_commands_in_shell(dirs)))
        rc = util.subprocess_with_output(mkdircmd,
                                         shell=True,
                                         suppress_output=True)
        if rc == 0:
            logger.info('remote directories created on {}'.format(dst))
        else:
            logger.error('remote directory creation failed')
            return
        del ip
        del port
    logger.info(
        'ingress data: {0:.4f} MiB in {1} files to transfer, using {2} max '
        'parallel transfers per node'.format(total_size / _MEGABYTE,
                                             total_files, mpt))
    logger.info('begin ingressing data from {} to {}'.format(src, dst))
    nodekeys = list(buckets.keys())
    threads = []
    start = datetime.datetime.now()
    for i in range(0, len(buckets)):
        nkey = nodekeys[i]
        thr = threading.Thread(
            target=_multinode_thread_worker,
            args=(method, mpt, nkey, rcodes, files[nkey], spfiles_count,
                  spfiles_count_lock, rls[nkey].remote_login_ip_address,
                  rls[nkey].remote_login_port, username, ssh_private_key,
                  dest.data_transfer.scp_ssh_extra_options,
                  dest.data_transfer.rsync_extra_options))
        threads.append(thr)
        thr.start()
    for i in range(0, len(buckets)):
        threads[i].join()
    diff = datetime.datetime.now() - start
    del threads
    success = True
    for nkey in rcodes:
        if rcodes[nkey] != 0:
            logger.error('data ingress failed to node: {}'.format(nkey))
            success = False
    if success:
        logger.info(
            'finished ingressing {0:.4f} MB of data in {1} files from {2} to '
            '{3} in {4:.2f} sec ({5:.3f} Mbit/s)'.format(
                total_size / _MEGABYTE, total_files, src, dst,
                diff.total_seconds(),
                (total_size * 8 / 1e6) / diff.total_seconds()))
Exemple #11
0
def ingress_data(batch_client,
                 compute_client,
                 network_client,
                 config,
                 rls=None,
                 kind=None,
                 total_vm_count=None,
                 to_fs=None):
    # type: (batch.BatchServiceClient,
    #        azure.mgmt.compute.ComputeManagementClient, dict, dict, str,
    #        int, str) -> list
    """Ingresses data into Azure
    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param azure.mgmt.compute.ComputeManagementClient compute_client:
        compute client
    :param azure.mgmt.network.NetworkManagementClient network_client:
        network client
    :param dict config: configuration dict
    :param dict rls: remote login settings
    :param str kind: 'all', 'shared', 'storage', or 'remotefs'
    :param int total_vm_count: total current vm count
    :param str to_fs: to remote filesystem
    :rtype: list
    :return: list of storage threads
    """
    storage_threads = []
    files = settings.global_resources_files(config)
    if util.is_none_or_empty(files):
        logger.info('no files to ingress detected')
        return storage_threads
    pool = settings.pool_settings(config)
    is_windows = settings.is_windows_pool(config)
    for fdict in files:
        source = settings.files_source_settings(fdict)
        dest = settings.files_destination_settings(fdict)
        if (dest.shared_data_volume is not None
                and dest.storage_account_settings is not None):
            raise RuntimeError(
                'cannot specify both shared data volume and storage for the '
                'destination for source: {}'.format(source.path))
        direct_single_node = False
        if dest.relative_destination_path is not None:
            if dest.storage_account_settings is not None:
                raise RuntimeError(
                    'cannot specify a relative destination path for ingress '
                    'to storage; use the --collate option in blobxfer '
                    'instead.')
            # check if this is going to a single vm
            if dest.shared_data_volume is None:
                if total_vm_count == 1:
                    direct_single_node = True
                elif kind == 'storage':
                    # this is to prevent total_vm_count check below for
                    # non shared/all targets and will force continuation
                    # of the loop below
                    direct_single_node = True
                elif total_vm_count is None:
                    raise ValueError('total_vm_count is not set')
                else:
                    raise RuntimeError(
                        'Cannot ingress data directly into compute node '
                        'host for pools with more than one node. Please use '
                        'a shared data volume as the ingress destination '
                        'instead.')
        if dest.shared_data_volume is not None or direct_single_node:
            if kind == 'storage':
                logger.warning(
                    'skipping data ingress from {} to {} for pool as ingress '
                    'to shared file system not specified'.format(
                        source.path, dest.shared_data_volume))
                continue
            if is_windows:
                logger.error(
                    ('cannot data ingress from {} to pool {} with windows '
                     'compute nodes').format(source.path, pool.id))
                continue
            # get rfs settings
            rfs = None
            dst_rfs = False
            # set base dst path
            dst = '{}/batch/tasks/mounts'.format(
                settings.temp_disk_mountpoint(config))
            # convert shared to actual path
            if not direct_single_node:
                sdv = settings.global_resources_shared_data_volumes(config)
                for sdvkey in sdv:
                    if sdvkey == dest.shared_data_volume:
                        if settings.is_shared_data_volume_gluster_on_compute(
                                sdv, sdvkey):
                            if kind == 'remotefs':
                                continue
                            dst = '{}/{}/'.format(
                                dst, settings.get_gluster_on_compute_volume())
                        elif settings.is_shared_data_volume_storage_cluster(
                                sdv, sdvkey):
                            if kind != 'remotefs' or sdvkey != to_fs:
                                continue
                            if rfs is None:
                                rfs = settings.remotefs_settings(config, to_fs)
                            dst = rfs.storage_cluster.file_server.mountpoint
                            # add trailing directory separator if needed
                            if dst[-1] != '/':
                                dst = dst + '/'
                            dst_rfs = True
                        else:
                            raise RuntimeError(
                                'data ingress to {} not supported'.format(
                                    sdvkey))
                        break
            # skip entries that are a mismatch if remotefs transfer
            # is selected
            if kind == 'remotefs':
                if not dst_rfs:
                    continue
            else:
                if dst_rfs:
                    continue
            # set ssh info
            if dst_rfs:
                username = rfs.storage_cluster.ssh.username
                #  retrieve public ips from all vms in named storage cluster
                rls = {}
                for i in range(rfs.storage_cluster.vm_count):
                    vm_name = '{}-vm{}'.format(
                        rfs.storage_cluster.hostname_prefix, i)
                    vm = compute_client.virtual_machines.get(
                        resource_group_name=rfs.storage_cluster.resource_group,
                        vm_name=vm_name,
                    )
                    _, pip = resource.get_nic_and_pip_from_virtual_machine(
                        network_client, rfs.storage_cluster.resource_group, vm)
                    # create compute node rls settings with sc vm ip/port
                    rls[vm_name] = \
                        batchmodels.ComputeNodeGetRemoteLoginSettingsResult(
                            remote_login_ip_address=pip.ip_address,
                            remote_login_port=22)
            else:
                username = pool.ssh.username
            if rls is None:
                logger.warning(
                    'skipping data ingress from {} to {} for pool with no '
                    'remote login settings or non-existent pool'.format(
                        source.path, dest.shared_data_volume))
                continue
            if username is None:
                raise RuntimeError(
                    'cannot ingress data to shared data volume without a '
                    'valid SSH user')
            # try to get valid ssh private key (from various config blocks)
            ssh_private_key = dest.data_transfer.ssh_private_key
            if ssh_private_key is None:
                ssh_private_key = pool.ssh.ssh_private_key
            if ssh_private_key is None:
                ssh_private_key = pathlib.Path(crypto.get_ssh_key_prefix())
                if not ssh_private_key.exists():
                    raise RuntimeError(
                        'specified SSH private key is invalid or does not '
                        'exist')
            logger.debug(
                'using ssh_private_key from: {}'.format(ssh_private_key))
            if (dest.data_transfer.method == 'scp'
                    or dest.data_transfer.method == 'rsync+ssh'):
                # split/source include/exclude will force multinode
                # transfer with mpt=1
                if (dest.data_transfer.split_files_megabytes is not None
                        or source.include is not None
                        or source.exclude is not None):
                    _multinode_transfer(
                        'multinode_' + dest.data_transfer.method, dest, source,
                        dst, username, ssh_private_key, rls, 1)
                else:
                    _singlenode_transfer(dest, source.path, dst, username,
                                         ssh_private_key, rls)
            elif (dest.data_transfer.method == 'multinode_scp'
                  or dest.data_transfer.method == 'multinode_rsync+ssh'):
                _multinode_transfer(
                    dest.data_transfer.method, dest, source, dst, username,
                    ssh_private_key, rls,
                    dest.data_transfer.max_parallel_transfers_per_node)
            else:
                raise RuntimeError('unknown transfer method: {}'.format(
                    dest.data_transfer.method))
        elif dest.storage_account_settings is not None:
            if kind == 'shared':
                logger.warning(
                    'skipping data ingress from {} for pool as ingress '
                    'to Azure Blob/File Storage not specified'.format(
                        source.path))
                continue
            thr = _azure_blob_storage_transfer(
                settings.credentials_storage(config,
                                             dest.storage_account_settings),
                dest.data_transfer, source)
            storage_threads.append(thr)
        else:
            raise RuntimeError(
                'invalid file transfer configuration: {}'.format(fdict))
    return storage_threads
Exemple #12
0
def _singlenode_transfer(dest, src, dst, username, ssh_private_key, rls):
    # type: (DestinationSettings, str, str, pathlib.Path, dict) -> None
    """Transfer data to a single node
    :param DestinationSettings dest: destination settings
    :param str src: source path
    :param str dst: destination path
    :param str username: username
    :param pathlib.Path: ssh private key
    :param dict rls: remote login settings
    """
    # get remote settings
    _rls = next(iter(rls.values()))
    ip = _rls.remote_login_ip_address
    port = _rls.remote_login_port
    del _rls
    # modify dst with relative dest
    if util.is_not_empty(dest.relative_destination_path):
        dst = '{}{}'.format(dst, dest.relative_destination_path)
        # create relative path on host
        logger.debug('creating remote directory: {}'.format(dst))
        dirs = ['mkdir -p {}'.format(dst)]
        mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
                    '-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
                        os.devnull, ssh_private_key, port, username, ip,
                        util.wrap_commands_in_shell(dirs)))
        rc = util.subprocess_with_output(mkdircmd,
                                         shell=True,
                                         suppress_output=True)
        if rc == 0:
            logger.info('remote directories created on {}'.format(dst))
        else:
            logger.error('remote directory creation failed')
            return
        del dirs
    # determine if recursive flag must be set
    psrc = pathlib.Path(src)
    recursive = '-r' if psrc.is_dir() else ''
    # set command source path and adjust dst path
    if recursive:
        cmdsrc = '.'
    else:
        cmdsrc = shellquote(src)
    # transfer data
    if dest.data_transfer.method == 'scp':
        cmd = ('scp -o StrictHostKeyChecking=no '
               '-o UserKnownHostsFile={} -p {} {} -i {} '
               '-P {} {} {}@{}:"{}"'.format(
                   os.devnull,
                   dest.data_transfer.scp_ssh_extra_options, recursive,
                   ssh_private_key.resolve(), port, cmdsrc, username, ip,
                   shellquote(dst)))
    elif dest.data_transfer.method == 'rsync+ssh':
        cmd = ('rsync {} {} -e "ssh -T -x -o StrictHostKeyChecking=no '
               '-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
                   dest.data_transfer.rsync_extra_options, recursive,
                   os.devnull, dest.data_transfer.scp_ssh_extra_options,
                   ssh_private_key.resolve(), port, cmdsrc, username, ip,
                   shellquote(dst)))
    else:
        raise ValueError('Unknown transfer method: {}'.format(
            dest.data_transfer.method))
    logger.info('begin ingressing data from {} to {}'.format(src, dst))
    start = datetime.datetime.now()
    rc = util.subprocess_with_output(cmd,
                                     shell=True,
                                     cwd=src if recursive else None)
    diff = datetime.datetime.now() - start
    if rc == 0:
        logger.info(
            'finished ingressing data from {0} to {1} in {2:.2f} sec'.format(
                src, dst, diff.total_seconds()))
    else:
        logger.error(
            'data ingress from {} to {} failed with return code: {}'.format(
                src, dst, rc))
import pathlib2 as pathlib
import json

my_file = open(pathlib.Path(__file__).parent / 'data.json', 'r')
file_contents = json.load(my_file)  #Lo convierte en diccionario

my_file.close()

print(file_contents['friends'][0])

#Guardando en archivo .json

cars = [{
    "made": "ford",
    "model": "Fiesta"
}, {
    "made": "ford",
    "model": "Focus"
}]

with open(pathlib.Path(__file__).parent / 'cars.json', 'w',
          encoding='utf-8') as f:
    json.dump(cars, f)
Exemple #14
0
 def _plot_dataset(ds, net_code, ref_code):
     # Sort ds rows by SNR, so that the weakest SNR points are drawn first and the high SNR point last,
     # to make sure high SNR point are in the top rendering layer.
     ds = ds.sort_values('snr')
     times = pandas_timestamp_to_plottable_datetime(ds['originTimestamp'])
     vals = ds[yaxis].values
     qual = ds['snr'].values
     min_mag = 4.0
     mag = ds['mag'].values - min_mag
     ylabel = 'Relative TT residual (sec)'
     title = r"Station {} TT residuals relative to network {} (filtering: ref SNR$\geq${}, CWT$\geq${}, "\
             r"slope$\geq${}, $n\sigma\geq{}$)".format(ref_code, net_code, str(filter_options.min_event_snr),
                                                       str(filter_options.cwt_cutoff),
                                                       str(filter_options.slope_cutoff),
                                                       str(filter_options.nsigma_cutoff))
     if vals.any():
         plt.figure(figsize=(32, 9))
         sc = plt.scatter(times,
                          vals,
                          c=qual,
                          alpha=0.5,
                          cmap='gnuplot_r',
                          s=np.maximum(50 * mag, 10),
                          edgecolors=None,
                          linewidths=0)
         time_formatter = matplotlib.dates.DateFormatter("%Y-%m-%d")
         plt.axes().xaxis.set_major_formatter(time_formatter)
         cb = plt.colorbar(sc, drawedges=False)
         cb.set_label('Signal to noise ratio', fontsize=12)
         plt.grid(color='#808080', linestyle=':', alpha=0.7)
         plt.xlabel(xlabel, fontsize=14)
         plt.ylabel(ylabel, fontsize=14)
         plt.xticks(fontsize=14)
         plt.yticks(fontsize=14)
         plt.xlim(time_range)
         plt.ylim((-tt_scale, tt_scale))
         plt.clim(snr_scale)
         plt.title(title, fontsize=18)
         plt.legend(['Point size = Mag - {}, Color = SNR'.format(min_mag)],
                    fontsize=12,
                    loc=1)
         plt.text(0.01,
                  0.96,
                  "Channel selection: {}".format(
                      filter_options.channel_preference),
                  transform=plt.gca().transAxes,
                  fontsize=12)
         plt.text(0.01,
                  0.92,
                  "Start date: {}".format(str(time_range[0])),
                  transform=plt.gca().transAxes,
                  fontsize=12)
         plt.text(0.01,
                  0.88,
                  "  End date: {}".format(str(time_range[1])),
                  transform=plt.gca().transAxes,
                  fontsize=12)
         plt.tight_layout(pad=1.05)
         if annotator is not None:
             annotator()
         if save_file:
             subfolder = os.path.join(
                 ref_code.split('.')[0] + file_label, net_code)
             # subfolder = net_code
             pathlib.Path(subfolder).mkdir(exist_ok=True, parents=True)
             plt_file = os.path.join(
                 subfolder, '_'.join([ref_code, net_code]) + '_' +
                 ylabel.replace(" ", "").replace(".*", "") + ".png")
             plt.savefig(plt_file, dpi=150)
             plt.close()
         else:  # pragma: no cover
             plt.show()
def test_descriptor_read_data(tmpdir):
    tmpdir.join('a').write('ab')
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    # test normal
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 1
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._resume = mock.MagicMock()
    ud._resume.return_value = None

    # test no data to read
    mockoffsets = mock.MagicMock()
    mockoffsets.num_bytes = 0
    data, newoffset = ud.read_data(mockoffsets)
    assert data is None
    assert newoffset is None

    # test normal data to read
    offsets, rb = ud.next_offsets()
    assert rb is None
    data, newoffset = ud.read_data(offsets)
    assert data == b'a'
    assert newoffset is None

    # test stdin
    with mock.patch('blobxfer.STDIN',
                    new_callable=mock.PropertyMock) as patched_stdin:
        patched_stdin.read = mock.MagicMock()
        patched_stdin.read.return_value = b'z'
        ud.local_path.use_stdin = True
        data, newoffset = ud.read_data(offsets)
        assert data == b'z'
        assert newoffset.chunk_num == 0
        assert newoffset.num_bytes == 1
        assert newoffset.range_start == 0
        assert newoffset.range_end == 0
        assert not newoffset.pad
        assert ud._total_chunks == 3
        assert ud._outstanding_ops == 3
        assert ud._offset == 1
        assert ud.entity.size == 2

    with mock.patch('blobxfer.STDIN',
                    new_callable=mock.PropertyMock) as patched_stdin:
        patched_stdin.read = mock.MagicMock()
        patched_stdin.read.return_value = None
        ud.local_path.use_stdin = True
        data, newoffset = ud.read_data(offsets)
        assert data is None
        assert newoffset is None
        assert ud._total_chunks == 2
        assert ud._outstanding_ops == 2
        assert ud._chunk_num == 0
Exemple #16
0
# 	'I1_min' : 1.0,
# 	'I1_max' : 1.0,
# 	'I2_min' : 1.0,
# 	'I2_max' : 8.0
# 	}

I1_space = np.arange(constraints['I1_min'], constraints['I1_max'] + step, step)
I1_number = int((constraints['I1_max'] + step - constraints['I1_min']) / step)
I2_space = np.arange(constraints['I2_min'], constraints['I2_max'] + step, step)
I2_number = int((constraints['I2_max'] + step - constraints['I2_min']) / step)

grid = np.zeros((I1_number, I2_number))

timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
final_data_saving_folder = data_saving_folder + '\extracted_features' + '_' + str(timestamp) + '\\'
pathlib.Path(final_data_saving_folder).mkdir(parents = True, exist_ok = True)
maps_saving_folder = final_data_saving_folder + '\maps' + '\\'
pathlib.Path(maps_saving_folder).mkdir(parents = True, exist_ok = True)
feature_file_path = final_data_saving_folder + 'features' + '.txt' # + '_' + str(timestamp) + ".txt"
feature_file = open(feature_file_path,"w+")
failures_file_path = final_data_saving_folder + 'failures' + ".txt"
failure_file = open(failures_file_path,"w+")

def log_failure(I1, I2, message = ""):
	error_message = '%.2f' % I1 + ' ' + '%.2f' % I2 + ' ' + message + '\n'
	print(error_message)
	failure_file.write(error_message)

# HERE YOU PUT THE FUNCTIONS THAT WILL EXTRACT THE FINAL FEATURES YOU NEED
# pointer to the function : array of args it needs
# feature_functions_list = [
Exemple #17
0
    def get_file_entry(self):
        local_file = None
        # don't provide file in case this event is out of the history window
        last_count = self._get_metric_count(self.metric,
                                            self.variant,
                                            next=False)
        if abs(self._count - last_count) > self._file_history_size:
            output = None
        elif isinstance(self._image_data, (six.StringIO, six.BytesIO)):
            output = self._image_data
        elif self._image_data is not None:
            image_data = self._image_data
            if not isinstance(image_data, np.ndarray):
                # try conversion, if it fails we'll leave it to the user.
                image_data = np.ndarray(image_data, dtype=np.uint8)
            image_data = np.atleast_3d(image_data)
            if image_data.dtype != np.uint8:
                if np.issubdtype(image_data.dtype,
                                 np.floating) and image_data.max() <= 1.0:
                    image_data = (image_data * 255).astype(np.uint8)
                else:
                    image_data = image_data.astype(np.uint8)
            shape = image_data.shape
            height, width, channel = shape[:3]
            if channel == 1:
                image_data = np.reshape(image_data, (height, width))

            # serialize image
            image = Image.fromarray(image_data)
            output = six.BytesIO()
            image_format = Image.registered_extensions().get(
                self._format.lower(), 'JPEG')
            image.save(output, format=image_format, quality=self._quality)
            output.seek(0)
        else:
            # noinspection PyBroadException
            try:
                output = pathlib2.Path(self._local_image_path)
                if not output.is_file():
                    output = None
            except Exception:
                output = None

            if output is None:
                from ...debugging.log import LoggerRoot
                LoggerRoot.get_base_logger().warning(
                    'Skipping upload, could not find object file \'{}\''.
                    format(self._local_image_path))
                return None

        return self.FileEntry(
            event=self,
            name=self._upload_filename,
            stream=output,
            url_prop='url',
            key_prop='key',
            upload_uri=self._upload_uri,
            delete_local_file=local_file
            if self._delete_after_upload else None,
            retries=self.retries,
        )
def test_descriptor_generate_metadata(tmpdir):
    tmpdir.join('a').write('ab')
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    # test nothing
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 1
    opts.one_shot_bytes = 0
    opts.store_file_properties.attributes = False
    opts.store_file_properties.md5 = False
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    meta = ud.generate_metadata()
    assert meta is None

    # test page md5 align
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 1
    opts.one_shot_bytes = 0
    opts.store_file_properties.attributes = False
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Page
    ase._name = 'name'
    ase._encryption = None
    ase._size = 1

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._offset = 1
    ud.md5 = hashlib.md5()
    ud.md5.update(b'z')
    meta = ud.generate_metadata()
    assert meta is None
    md5 = hashlib.md5()
    md5.update(b'z' + b'\0' * 511)
    assert ud.md5.digest() == md5.digest()

    # test fileattr meta
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 1
    opts.one_shot_bytes = 0
    opts.store_file_properties.attributes = True
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    # file attr store is not avail on windows
    if not util.on_windows():
        ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                               mock.MagicMock())
        meta = ud.generate_metadata()
        assert metadata.JSON_KEY_BLOBXFER_METADATA in meta
        assert metadata._JSON_KEY_FILE_ATTRIBUTES in meta[
            metadata.JSON_KEY_BLOBXFER_METADATA]

    # test enc meta
    opts.store_file_properties.attributes = False
    opts.store_file_properties.md5 = False
    opts.rsa_public_key = 'abc'
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ase.encryption_metadata = mock.MagicMock()
    ase.encryption_metadata.convert_to_json_with_mac.return_value = {
        'encmeta': 'encmeta'
    }
    meta = ud.generate_metadata()
    assert 'encmeta' in meta

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud.hmac = None
    ase.encryption_metadata = mock.MagicMock()
    ase.encryption_metadata.convert_to_json_with_mac.return_value = {
        'encmeta': 'encmeta'
    }
    meta = ud.generate_metadata()
    assert 'encmeta' in meta

    opts.store_file_properties.md5 = True
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ase.encryption_metadata = mock.MagicMock()
    ase.encryption_metadata.convert_to_json_with_mac.return_value = {
        'encmeta': 'encmeta'
    }
    meta = ud.generate_metadata()
    assert 'encmeta' in meta

    # test vio meta
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 1
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    lp.view = mock.MagicMock()
    lp.view.mode = upload.VectoredIoDistributionMode.Stripe
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    with mock.patch(
            'blobxfer.models.metadata.generate_vectored_io_stripe_metadata',
            return_value={'viometa': 'viometa'}):
        meta = ud.generate_metadata()
        assert metadata.JSON_KEY_BLOBXFER_METADATA in meta
        assert 'viometa' in meta[metadata.JSON_KEY_BLOBXFER_METADATA]
Exemple #19
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config,
         lang, display, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    if gender=='Male':
        subprocess.Popen(["aplay", "{}/sample-audio-files/Startup-Male.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    else:
        subprocess.Popen(["aplay", "{}/sample-audio-files/Startup-Female.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')



    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "slowly":
            delay = 2
        elif speed == "quickly":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        def detected():
            continue_conversation=assistant.assist()
            if continue_conversation:
                print('Continuing conversation')
                assistant.assist()

        signal.signal(signal.SIGINT, signal_handler)
        sensitivity = [0.5]*len(models)
        callbacks = [detected]*len(models)
        detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity)
        def start_detector():
            detector.start(detected_callback=callbacks,
                                   interrupt_check=interrupt_callback,
                                   sleep_time=0.03)

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.

        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                if custom_wakeword:
                    start_detector()
                else:
                    button_state=GPIO.input(pushbuttontrigger)
                    if button_state==True:
                       continue
                    else:
                       pass
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break



    detector.terminate()
def test_descriptor_compute_remote_size(tmpdir):
    tmpdir.join('a').write('z' * 32)
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    # encrypted remote size with replica
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 16
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = 'abc'

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = mock.MagicMock()
    ase._encryption.symmetric_key = 'abc'
    ase2 = azmodels.StorageEntity('cont')
    ase2._mode = azmodels.StorageModes.Block
    ase2._name = 'name2'
    ase.replica_targets = [ase2]

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._compute_remote_size(opts)
    assert ud.entity.size == 48
    for rt in ase.replica_targets:
        assert rt.size == ud.entity.size

    # remote size
    opts = mock.MagicMock()
    opts.chunk_size_bytes = 16
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._compute_remote_size(opts)
    assert ud.entity.size == 32

    # remote size of zero
    tmpdir.join('b').ensure(file=True)
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('b'))

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._compute_remote_size(opts)
    assert ud.entity.size == 0

    # stdin as page, resize
    lp = upload.LocalPath(pathlib.Path('-'), pathlib.Path('-'), use_stdin=True)
    opts.stdin_as_page_blob_size = 0
    ase._mode = azmodels.StorageModes.Page
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._compute_remote_size(opts)
    assert ud.entity.size == upload._MAX_PAGE_BLOB_SIZE
    assert ud._needs_resize

    # stdin as page, no resize
    lp = upload.LocalPath(pathlib.Path('-'), pathlib.Path('-'), use_stdin=True)
    opts.stdin_as_page_blob_size = 32
    ase._mode = azmodels.StorageModes.Page
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._compute_remote_size(opts)
    assert ud.entity.size == 32
    assert not ud._needs_resize
def moveFiles(new_root,barcode_dict,portal_dict,unwanted,noPortalPath,badBarcodePath,barcodeLen,dPath):

    # Make all barcodes into caps for comparison
    barcode_Dict=dict((k.upper(), v) for k, v in barcode_dict.items())
    portal_Dict=dict((k.upper(), v) for k, v in portal_dict.items())

    # Start dictionary to put new file paths in
    # filename: [barcode, portal, date, current(new) path]
    new_dict={}
    # filename:newpath(where we want a large image but there is none)
    nolarge_dict={}
    # filename:[barcode,date,old_path]
    badbarcode_dict={}
    # filename:[barcode,date,old_path]
    duplicate_dict={}

    # Iterate through every barcode in image barcode dict
    for b in barcode_Dict:

        # if barcode is the right lenght, go through a lot of stuff. 
        if len(b) == int(barcodeLen):

            # Split apart letters and numbers from barcode
            try:
                b_letters,b_numbers = ["".join(x) for _, x in itertools.groupby(b, key=str.isdigit)]

            # For bad barcodes move them to a special folder for Jennie to check. 
            except ValueError:
                for p in barcode_Dict[b]:
                    # Add files to badbarcode dict, get creation date, and move to their special place. 
                    badbarcode_dict=badBarcodeSequence(p,b,barcode_Dict,unwanted,badBarcodePath,badbarcode_dict)
            
            # Loop goes here after try if there are no exceptions
            # For all good barcodes that can be split into Letters/Numbers
            else:
                #print(b+", good barcode")
                # If barcode is found in records, move it into correct portal file
                if b in portal_Dict:
                    #print(b+", in portal dict")

                    # Get portal for barcode 
                    portal=portal_Dict[b]

                    # Iterate through all file paths in barcode dict
                    for p in barcode_Dict[b]:
                        if os.path.exists(p):
                            # Ignore files in "unwanted" list 
                            if any(x in p for x in unwanted):
                                pass

                            else:
                                #print(p+", good path")
                                # Get new file path and uppercase file name 
                                newDir,newPath,fileName=newPathNames(b,p,new_root,portal)

                                # Check if file exists at new path
                                if not os.path.exists(newPath):
                                    #print(newPath+", new path")
                                    # Make new directories if needed https://docs.python.org/3/library/pathlib.html
                                    if not os.path.exists(newDir):
                                        pathlib.Path(newDir).mkdir(parents=True, exist_ok=True) 

                                    # Get creation date 
                                    d = creation_date(p)

                                    #filename: [barcode, portal, date, current(new) path]
                                    new_dict[fileName]=[b,portal,d,newPath]

                                    # Copy file, preserving permissions 
                                    print(newPath+", good barcode, in portal list")
                                    shutil.move(p,newPath)
                                # If path exists, check if this is a rerun, if not, put newest file in folder. make note of duplicates
                                elif os.path.exists(newPath):
                                    print(newPath+", duplicate, in portal dict")
                                    # Get creation dates for file already moved, and the one that is similar to it. likely different due to case sensitive issues.  
                                    # This got really ugly when moving files and dealing with files already moved getting passed to this loop. 
                                    # Not making it clean right now. don't care enough. just need to move files 
                                    try:
                                        d = creation_date(p)
                                    except FileNotFoundError:
                                        d = creation_date(newPath)
                                    d1 = creation_date(newPath)
                                    # Copy both duplicate files to new folder. add _1 to one of them. 
                                    dName=os.path.basename(p)
                                    ddPath=os.path.join(dPath,dName.upper())
                                    try:
                                        pass
                                        #shutil.copy2(p,ddPath)
                                    except FileNotFoundError:
                                        pass
                                    d1Name=os.path.basename(newPath)
                                    dd1Path=os.path.join(dPath,d1Name.upper(),'1')
                                    try:
                                        pass
                                        #shutil.copy2(p,dd1Path)
                                    except FileNotFoundError:
                                        pass
                                    # If dates are the same, probably rerunning script, dont add to duplicate dict
                                    if d == d1:
                                        new_dict[fileName]=[b,portal,d,newPath]
                                        #pass
                                    else:
                                        # Add to duplicate dict 
                                        duplicate_dict[fileName]=[b,d,p]
                                    # If this file is newer, replace older file. Rerun or not we want to move newer file to main folder.
                                    if d > d1:
                                        #print(p+", replace older image file ")

                                        #filename: [barcode, portal, date, current(new) path]
                                        new_dict[fileName]=[b,portal,d,newPath]

                                        # Copy file, preserving permissions 
                                        #print(ddPath+", duplicate path")
                                        shutil.move(p,newPath)
                        # If the path to the original image doesnt exist. In the case of having to rerun this script halfway through. 
                        else:
                            # File is already moved add to moved dict. 
                            newDir,newPath,fileName=newPathNames(b,p,new_root,portal)
                            d = creation_date(newPath)
                            new_dict[fileName]=[b,portal,d,newPath]


                # If no record in master list. Move to special folder.         
                elif b not in portal_Dict:
                    #print(b+", not in records")
                    # Iterate through all file paths in barcode dict
                    for p in barcode_Dict[b]:

                        # Ignore files in "unwanted" list 
                        if any(x in p for x in unwanted):
                            pass
                        else:
                            # Make new path to folder for images that don't have barcode in master list. 
                            fName=os.path.basename(p)
                            newPath=os.path.join(noPortalPath,fName.upper())

                            # Check if file exists at new path
                            if not os.path.exists(newPath):

                                # Get creation date 
                                d = creation_date(p)

                                #filename: [barcode, portal, date, current(new) path]
                                new_dict[fileName]=[b,"NoPortal",d,newPath]

                                # Copy file, preserving permissions
                                print(newPath+", not in portal dict") 
                                shutil.move(p,newPath)

                            # If path exists, check if this is a rerun, if not, put newest file in folder. make note of duplicates
                            elif os.path.exists(newPath):
                                print(newPath+", duplicate, not in portal dict")
                                # Get creation dates for file already moved, and the one that is similar to it. 
                                # likely different due to case sensitive issues.  
                                try:
                                    d = creation_date(p)
                                except FileNotFoundError:
                                    d = creation_date(newPath)
                                d1 = creation_date(newPath)
                                # Copy both duplicate files to new folder. add _1 to one of them. 
                                dName=os.path.basename(p)
                                ddPath=os.path.join(dPath,dName.upper())
                                try:
                                    pass
                                    #shutil.copy2(p,ddPath)
                                except FileNotFoundError:
                                    pass
                                d1Name=os.path.basename(newPath)
                                dd1Path=os.path.join(dPath,d1Name.upper(),'1')
                                try:
                                    pass
                                    #shutil.copy2(p,dd1Path)
                                except FileNotFoundError:
                                    pass
                                # If dates are the same, probably rerunning script, dont add to duplicate dict
                                if d == d1:
                                    new_dict[fName.upper()]=[b,"NoPortal",d,newPath]
                                    #pass
                                else:
                                    # Add to duplicate dict 
                                    duplicate_dict[fileName]=[b,d,p]
                                # If this file is newer, replace older file. Rerun or not we want to move newer file to main folder.
                                if d > d1:
                                    # Copy file, preserving permissions 
                                    shutil.move(p,newPath)

                                    #filename: [barcode, portal, date, current(new) path]
                                    new_dict[fName.upper()]=[b,"NoPortal",d,newPath]

                            #print("No record for "+str(b)+" moved to "+str(noPortalPath))
        # If barcode is wrong lenght, shove it somewhere else, and make note. 
        else:
            for p in barcode_Dict[b]:
                badbarcode_dict=badBarcodeSequence(p,b,barcode_Dict,unwanted,badBarcodePath,badbarcode_dict)
    return new_dict,badbarcode_dict,duplicate_dict
def test_descriptor_adjust_chunk_size(tmpdir):
    tmpdir.join('a').ensure(file=True)
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    opts = mock.MagicMock()
    opts.chunk_size_bytes = 0
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    assert ud._chunk_size == 0

    with mock.patch('blobxfer.models.upload._DEFAULT_AUTO_CHUNKSIZE_BYTES', 1):
        with mock.patch(
                'blobxfer.models.upload._MAX_BLOCK_BLOB_CHUNKSIZE_BYTES', 3):
            with mock.patch('blobxfer.models.upload._MAX_NUM_CHUNKS', 2):
                tmpdir.join('a').write('z' * 4)
                lp = upload.LocalPath(pathlib.Path(str(tmpdir)),
                                      pathlib.Path('a'))
                ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                                       mock.MagicMock())
                assert ud._chunk_size == 2

    lp = upload.LocalPath(pathlib.Path(str(tmpdir)),
                          pathlib.Path('-'),
                          use_stdin=True)
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    assert ud._chunk_size == upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES

    tmpdir.join('a').write('z' * 32)
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Page
    ase._name = 'name'
    ase._encryption = None

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    assert ud._chunk_size == 32

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Append
    ase._name = 'name'
    ase._encryption = None

    opts.chunk_size_bytes = upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES + 1
    with mock.patch(
            'blobxfer.models.upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES', 4):
        ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                               mock.MagicMock())
        assert ud._chunk_size == 4

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    opts.chunk_size_bytes = 32
    opts.one_shot_bytes = 32
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    assert ud._chunk_size == 32

    opts.one_shot_bytes = 31
    with mock.patch('blobxfer.models.upload._MAX_BLOCK_BLOB_CHUNKSIZE_BYTES',
                    4):
        ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                               mock.MagicMock())
        assert ud._chunk_size == 4

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.File
    ase._name = 'name'
    ase._encryption = None

    opts.chunk_size_bytes = upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES + 1
    with mock.patch(
            'blobxfer.models.upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES', 4):
        ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                               mock.MagicMock())
        assert ud._chunk_size == 4

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Page
    ase._name = 'name'
    ase._encryption = None

    opts.chunk_size_bytes = upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES + 1
    with mock.patch(
            'blobxfer.models.upload._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES', 4):
        ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                               mock.MagicMock())
        assert ud._chunk_size == 4

    with mock.patch('blobxfer.models.upload._MAX_PAGE_BLOB_SIZE', 4):
        with pytest.raises(RuntimeError):
            upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                              mock.MagicMock())
Exemple #23
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config, lang, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
def test_resume(tmpdir):
    tmpdir.join('a').write('zz')
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    opts = mock.MagicMock()
    opts.chunk_size_bytes = 0
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    # test no resume
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), None)
    assert ud._resume() is None

    # check if path exists in resume db
    resume = mock.MagicMock()
    resume.get_record.return_value = None
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    assert ud._resume() is None

    # check same lengths
    bad = mock.MagicMock()
    bad.length = 0
    resume.get_record.return_value = bad
    assert ud._resume() is None

    # check completed resume
    comp = mock.MagicMock()
    comp.length = 2
    comp.completed = True
    comp.total_chunks = 1
    comp.chunk_size = 2
    comp.completed_chunks = 1
    resume.get_record.return_value = comp
    ud._completed_chunks = mock.MagicMock()
    ud._src_ase = ase
    assert ud._resume() == 2

    ase.replica_targets = [ase]
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    ud._completed_chunks = mock.MagicMock()
    ud._src_ase = ase
    assert ud._resume() == 4

    # check no encryption
    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    opts.rsa_public_key = 'abc'

    nc = mock.MagicMock()
    nc.length = 16
    nc.completed = False
    nc.total_chunks = 2
    nc.chunk_size = 1
    nc.completed_chunks = 1

    resume.get_record.return_value = nc
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    assert ud._resume() is None

    # check rr path exists
    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'

    nc.length = 2
    nc.local_path = pathlib.Path('yyy')
    opts.rsa_public_key = None

    resume.get_record.return_value = nc
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    assert ud._resume() is None

    # check resume no md5
    opts.store_file_properties.md5 = False

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'

    nc = mock.MagicMock()
    nc.length = 2
    nc.completed = False
    nc.total_chunks = 2
    nc.chunk_size = 1
    cc = bitstring.BitArray(length=nc.total_chunks)
    cc.set(True, 0)
    nc.completed_chunks = cc.int
    nc.local_path = lp.absolute_path

    resume.get_record.return_value = nc
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    assert ud._resume() == 1

    # check resume with md5 mismatch
    opts.store_file_properties.md5 = True

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'

    nc = mock.MagicMock()
    nc.length = 2
    nc.completed = False
    nc.total_chunks = 2
    nc.chunk_size = 1
    cc = bitstring.BitArray(length=nc.total_chunks)
    cc.set(True, 0)
    nc.completed_chunks = cc.int
    nc.local_path = lp.absolute_path

    resume.get_record.return_value = nc
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    assert ud._resume() is None

    # check resume with md5 match
    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'

    nc = mock.MagicMock()
    nc.length = 2
    nc.completed = False
    nc.total_chunks = 2
    nc.chunk_size = 1
    cc = bitstring.BitArray(length=nc.total_chunks)
    cc.set(True, 0)
    nc.completed_chunks = cc.int
    nc.local_path = lp.absolute_path
    md5 = hashlib.md5()
    md5.update(b'z')
    nc.md5hexdigest = md5.hexdigest()

    resume.get_record.return_value = nc
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(), resume)
    assert ud._resume() == 1
Exemple #25
0
import pathlib2

HERE = pathlib2.Path(__file__).resolve().parent
FROZEN = HERE.joinpath('frozen.txt')
FROZEN_DOC = HERE.joinpath('frozen_docs.txt')


def test_match_files():
    with FROZEN.open() as fp:
        frozen = dict(x.strip().split('==') for x in fp)
    with FROZEN_DOC.open() as fp:
        frozen_doc = dict(x.strip().split('==') for x in fp)

    expected_frozen_doc = {k: v for k, v in frozen.items() if k in frozen_doc}
    assert expected_frozen_doc == frozen_doc
def test_localsourcepaths_files(tmpdir):
    tmpdir.mkdir('abc')
    tmpdir.join('moo.cow').write('z')
    abcpath = tmpdir.join('abc')
    abcpath.join('hello.txt').write('hello')
    abcpath.join('blah.x').write('x')
    abcpath.join('blah.y').write('x')
    abcpath.join('blah.z').write('x')
    abcpath.mkdir('def')
    defpath = abcpath.join('def')
    defpath.join('world.txt').write('world')
    defpath.join('moo.cow').write('y')

    a = upload.LocalSourcePath()
    a.add_includes('**')
    a.add_includes('*.txt')
    a.add_includes(('moo.cow', '*blah*'))
    with pytest.raises(ValueError):
        a.add_includes('**/**/*')
    a.add_excludes('**')
    a.add_excludes('**/blah.x')
    with pytest.raises(ValueError):
        a.add_excludes('**/**/blah.x')
    a.add_excludes(['world.txt'])
    a.add_path(str(tmpdir))
    a_set = set()
    for file in a.files():
        sfile = str(file.parent_path / file.relative_path)
        a_set.add(sfile)

    assert len(a._include) == 3
    assert len(a._exclude) == 2

    assert not a.can_rename()
    assert len(a.paths) == 1
    assert str(abcpath.join('blah.x')) in a_set
    assert str(defpath.join('world.txt')) in a_set
    assert str(defpath.join('moo.cow')) not in a_set

    b = upload.LocalSourcePath()
    b.add_includes(['moo.cow', '*blah*'])
    b.add_includes('*.txt')
    b.add_excludes(('world.txt', ))
    b.add_excludes('**/blah.x')
    b.add_paths([pathlib.Path(str(tmpdir))])
    for file in a.files():
        sfile = str(file.parent_path / file.relative_path)
        assert sfile in a_set

    assert upload.LocalSourcePath.is_stdin('-')
    assert upload.LocalSourcePath.is_stdin('/dev/stdin')
    assert not upload.LocalSourcePath.is_stdin('/')

    c = upload.LocalSourcePath()
    c.add_path('-')
    for file in c.files():
        assert file.use_stdin

    d = upload.LocalSourcePath()
    d.add_path(str(tmpdir.join('moo.cow')))
    i = 0
    for file in d.files():
        assert str(file.parent_path.absolute()) == str(tmpdir)
        assert str(file.relative_path) == 'moo.cow'
        assert not file.use_stdin
        i += 1
    assert i == 1

    tmpdir.join('moo.cow2').ensure(file=True)
    d.add_path(str(tmpdir.join('moo.cow2')))
    i = 0
    for file in d.files():
        i += 1
    assert i == 2
Exemple #27
0
napoleon_google_docstring = False
napoleon_use_param = True
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

source_suffix = ['.rst', '.md']

master_doc = 'index'

# General information about the project.
project = 'toppra'
copyright = '2020, Hung Pham'
author = 'Hung Pham'

version = pathlib.Path('./../../VERSION').read_text()

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', '**.ipynb_checkpoints']

# pygments_style = 'friendly'
def test_descriptor_next_offsets(tmpdir):
    tmpdir.join('a').write('ab')
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))

    opts = mock.MagicMock()
    opts.chunk_size_bytes = 1
    opts.one_shot_bytes = 0
    opts.store_file_properties.md5 = True
    opts.rsa_public_key = None

    ase = azmodels.StorageEntity('cont')
    ase._mode = azmodels.StorageModes.Block
    ase._name = 'name'
    ase._encryption = None

    # test normal
    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._resume = mock.MagicMock()
    ud._resume.return_value = None

    offsets, rb = ud.next_offsets()
    assert rb is None
    assert offsets.chunk_num == 0
    assert offsets.num_bytes == 1
    assert offsets.range_start == 0
    assert offsets.range_end == 0
    assert not offsets.pad
    assert ud._offset == 1
    assert ud._chunk_num == 1

    offsets, rb = ud.next_offsets()
    assert rb is None
    assert offsets.chunk_num == 1
    assert offsets.num_bytes == 1
    assert offsets.range_start == 1
    assert offsets.range_end == 1
    assert not offsets.pad
    assert ud._offset == 2
    assert ud._chunk_num == 2

    offsets, rb = ud.next_offsets()
    assert rb is None
    assert offsets is None

    # test chunk size exceeds size
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))
    opts.chunk_size_bytes = 3

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._chunk_size = 3
    ud._resume = mock.MagicMock()
    ud._resume.return_value = None

    offsets, rb = ud.next_offsets()
    assert rb is None
    assert offsets.chunk_num == 0
    assert offsets.num_bytes == 2
    assert offsets.range_start == 0
    assert offsets.range_end == 1
    assert not offsets.pad
    assert ud._offset == 2
    assert ud._chunk_num == 1

    # test encrypted
    tmpdir.join('a').write('z' * 16)
    lp = upload.LocalPath(pathlib.Path(str(tmpdir)), pathlib.Path('a'))
    opts.chunk_size_bytes = 16
    opts.rsa_public_key = 'abc'

    ud = upload.Descriptor(lp, ase, 'uid', opts, mock.MagicMock(),
                           mock.MagicMock())
    ud._resume = mock.MagicMock()
    ud._resume.return_value = None

    offsets, rb = ud.next_offsets()
    assert rb is None
    assert offsets.chunk_num == 0
    assert offsets.num_bytes == 16
    assert offsets.range_start == 0
    assert offsets.range_end == 15
    assert not offsets.pad
    assert ud._offset == 16
    assert ud._chunk_num == 1

    offsets, rb = ud.next_offsets()
    assert rb is None
    assert offsets.chunk_num == 1
    assert offsets.num_bytes == 16
    assert offsets.range_start == 16
    assert offsets.range_end == 31
    assert offsets.pad
    assert ud._offset == 32
    assert ud._chunk_num == 2
Exemple #29
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--device-model-id',
                        '--device_model_id',
                        type=str,
                        metavar='DEVICE_MODEL_ID',
                        required=False,
                        help='the device model ID registered with Google')
    parser.add_argument('--project-id',
                        '--project_id',
                        type=str,
                        metavar='PROJECT_ID',
                        required=False,
                        help='the project ID used to register this device')
    parser.add_argument('--device-config',
                        type=str,
                        metavar='DEVICE_CONFIG_FILE',
                        default=os.path.join(os.path.expanduser('~/.config'),
                                             'googlesamples-assistant',
                                             'device_config_library.json'),
                        help='path to store and read device configuration')
    parser.add_argument('--credentials',
                        type=existing_file,
                        metavar='OAUTH2_CREDENTIALS_FILE',
                        default=os.path.join(os.path.expanduser('~/.config'),
                                             'google-oauthlib-tool',
                                             'credentials.json'),
                        help='path to store and read OAuth2 credentials')
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + Assistant.__version_str__())

    args = parser.parse_args()
    with open(args.credentials, 'r') as f:
        credentials = google.oauth2.credentials.Credentials(token=None,
                                                            **json.load(f))

    device_model_id = None
    last_device_id = None
    try:
        with open(args.device_config) as f:
            device_config = json.load(f)
            device_model_id = device_config['model_id']
            last_device_id = device_config.get('last_device_id', None)
    except FileNotFoundError:
        pass

    if not args.device_model_id and not device_model_id:
        raise Exception('Missing --device-model-id option')

    # Re-register if "device_model_id" is given by the user and it differs
    # from what we previously registered with.
    should_register = (args.device_model_id
                       and args.device_model_id != device_model_id)

    device_model_id = args.device_model_id or device_model_id

    with Assistant(credentials, device_model_id) as assistant:
        events = assistant.start()

        device_id = assistant.device_id
        print('device_model_id:', device_model_id)
        print('device_id:', device_id + '\n')

        # Re-register if "device_id" is different from the last "device_id":
        if should_register or (device_id != last_device_id):
            if args.project_id:
                register_device(args.project_id, credentials, device_model_id,
                                device_id)
                pathlib.Path(os.path.dirname(
                    args.device_config)).mkdir(exist_ok=True)
                with open(args.device_config, 'w') as f:
                    json.dump(
                        {
                            'last_device_id': device_id,
                            'model_id': device_model_id,
                        }, f)
            else:
                print(WARNING_NOT_REGISTERED)

        for event in events:
            process_event(event, assistant)
Exemple #30
0
    def _get_components_to_download(self,
                                    component_path=None,
                                    download_to_path=None):
        """
        Identify components to be downloaded, along with their local destination paths.

        Parameters
        ----------
        component_path : str, optional
            Path to directory or file within blob.
        download_to_path : str, optional
            Local path to download to.

        Returns
        -------
        components_to_download : dict
            Map of component paths to local destination paths.
        downloaded_to_path : str
            Absolute path where file(s) were downloaded to. Matches `download_to_path` if it was
            provided as an argument.

        """
        implicit_download_to_path = download_to_path is None

        if component_path is not None:
            # look for an exact match with `component_path` as a file
            for path in self.list_paths():
                if path == component_path:
                    if implicit_download_to_path:
                        # default to filename from `component_path`, in cwd
                        local_path = os.path.basename(component_path)

                        # avoid collision with existing file
                        local_path = _file_utils.without_collision(local_path)
                    else:
                        # exactly where the user requests
                        local_path = download_to_path

                    return ({path: local_path}, os.path.abspath(local_path))
        # no exact match, so it's a folder download (or nonexistent path)

        # figure out where files are going to be downloaded to
        if implicit_download_to_path:
            if component_path is None:
                downloaded_to_path = DEFAULT_DOWNLOAD_DIR

                # avoid collision with existing directory
                downloaded_to_path = _file_utils.without_collision(
                    downloaded_to_path)
            else:  # need to automatically determine directory
                # NOTE: if `component_path` == "s3://" with any trailing slashes, it becomes "s3:"
                downloaded_to_path = pathlib2.Path(
                    component_path).name  # final path component

                if downloaded_to_path in {".", "..", "/", "s3:"}:
                    # rather than dump everything into cwd, use new child dir
                    downloaded_to_path = DEFAULT_DOWNLOAD_DIR

                # avoid collision with existing directory
                downloaded_to_path = _file_utils.without_collision(
                    downloaded_to_path)
        else:
            # exactly where the user requests
            downloaded_to_path = download_to_path

        # collect paths in blob and map them to download locations
        components_to_download = dict()
        if component_path is None:
            # download all
            for path in self.list_paths():
                local_path = os.path.join(
                    downloaded_to_path,
                    _file_utils.remove_prefix_dir(path, "s3:"),
                )

                components_to_download[path] = local_path
        else:
            # look for files contained in `component_path` as a directory
            component_path_as_dir = component_path if component_path.endswith(
                '/') else component_path + '/'
            for path in self.list_paths():
                if path.startswith(component_path_as_dir):
                    # rebase from `component_path` onto `downloaded_to_path`
                    #     Implicit `download_to_path` example:
                    #         component_blob.path = "coworker/downloads/data/info.csv"
                    #         component_path      = "coworker/downloads"
                    #         downloaded_to_path  =          "downloads" or "downloads 1", etc.
                    #         local_path          =          "downloads/data/info.csv"
                    #     Explicit `download_to_path` example:
                    #         component_blob.path = "coworker/downloads/data/info.csv"
                    #         component_path      = "coworker/downloads"
                    #         downloaded_to_path  =            "my-data"
                    #         local_path          =            "my-data/data/info.csv"
                    local_path = os.path.join(
                        downloaded_to_path,
                        _file_utils.remove_prefix_dir(
                            path, prefix_dir=component_path),
                    )

                    components_to_download[path] = local_path

        if not components_to_download:
            raise KeyError(
                "no components found for path {}".format(component_path))

        return (components_to_download, os.path.abspath(downloaded_to_path))