Exemplo n.º 1
0
 def get_eligible_features(self):
     features_list = base.WitWidgetBase.get_eligible_features_impl(self)
     output.eval_js(
         """eligibleFeaturesCallback({features_list})""".format(
             features_list=json.dumps(features_list)
         )
     )
Exemplo n.º 2
0
    def __init__(self, config_builder, height=1000):
        """Constructor for colab notebook WitWidget.

    Args:
      config_builder: WitConfigBuilder object containing settings for WIT.
      height: Optional height in pixels for WIT to occupy. Defaults to 1000.
    """
        self._ctor_complete = False
        self.id = WitWidget.index
        base.WitWidgetBase.__init__(self, config_builder)
        # Add this instance to the static instance list.
        WitWidget.widgets.append(self)

        # Display WIT Polymer element.
        display.display(display.HTML(self._get_element_html()))
        display.display(
            display.HTML(WIT_HTML.format(height=height, id=self.id)))

        # Increment the static instance WitWidget index counter
        WitWidget.index += 1

        # Send the provided config and examples to JS
        output.eval_js("""configCallback('{config}')""".format(
            config=json.dumps(self.config)))
        output.eval_js("""updateExamplesCallback({examples})""".format(
            examples=json.dumps(self.examples)))
        self._generate_sprite()
        self._ctor_complete = True
Exemplo n.º 3
0
 def sort_eligible_features(self, info):
   try:
     features_list = base.WitWidgetBase.sort_eligible_features_impl(self, info)
     output.eval_js("""sortEligibleFeaturesCallback({features_list})""".format(
         features_list=json.dumps(features_list)))
   except Exception as e:
     output.eval_js("""backendError({error})""".format(
         error=json.dumps({'msg': str(e)})))
Exemplo n.º 4
0
 def infer(self):
     try:
         inferences = base.WitWidgetBase.infer_impl(self)
         output.eval_js("""inferenceCallback({inferences})""".format(
             inferences=json.dumps(inferences)))
     except Exception as e:
         output.eval_js("""backendError({error})""".format(
             error=json.dumps({'msg': str(e)})))
Exemplo n.º 5
0
 def infer_mutants(self, info):
     try:
         json_mapping = base.WitWidgetBase.infer_mutants_impl(self, info)
         output.eval_js("""inferMutantsCallback({json_mapping})""".format(
             json_mapping=json.dumps(json_mapping)))
     except Exception as e:
         output.eval_js("""backendError({error})""".format(
             error=json.dumps({'msg': str(e)})))
Exemplo n.º 6
0
 def _set_examples_looper(self, eval_js_str):
     # Send the set examples to JS in chunks.
     num_pieces = math.ceil(len(self.examples) / self.SLICE_SIZE)
     i = 0
     while num_pieces > 0:
         num_pieces -= 1
         exs = self.examples[i:i + self.SLICE_SIZE]
         piece = {'examples': exs, 'countdown': num_pieces}
         output.eval_js(eval_js_str.format(data=json.dumps(piece)))
         i += self.SLICE_SIZE
Exemplo n.º 7
0
  def __init__(self, config_builder, height=1000):
    """Constructor for colab notebook WitWidget.

    Args:
      config_builder: WitConfigBuilder object containing settings for WIT.
      height: Optional height in pixels for WIT to occupy. Defaults to 1000.
    """
    tf.logging.set_verbosity(tf.logging.WARN)
    config = config_builder.build()
    copied_config = dict(config)
    self.estimator_and_spec = (
      dict(config.get('estimator_and_spec'))
      if 'estimator_and_spec' in config else {})
    self.compare_estimator_and_spec = (
      dict(config.get('compare_estimator_and_spec'))
      if 'compare_estimator_and_spec' in config else {})
    if 'estimator_and_spec' in copied_config:
      del copied_config['estimator_and_spec']
    if 'compare_estimator_and_spec' in copied_config:
      del copied_config['compare_estimator_and_spec']

    self.custom_predict_fn = (
      config.get('custom_predict_fn')
      if 'custom_predict_fn' in config else None)
    self.compare_custom_predict_fn = (
      config.get('compare_custom_predict_fn')
      if 'compare_custom_predict_fn' in config else None)
    if 'custom_predict_fn' in copied_config:
      del copied_config['custom_predict_fn']
    if 'compare_custom_predict_fn' in copied_config:
      del copied_config['compare_custom_predict_fn']


    self._set_examples(config['examples'])
    del copied_config['examples']

    self.config = copied_config

    # Add this instance to the static instance list.
    WitWidget.widgets.append(self)

    # Display WIT Polymer element.
    display.display(display.HTML(self._get_element_html()))
    display.display(display.HTML(
      WIT_HTML.format(
        examples=json.dumps(self.examples), height=height, id=WitWidget.index)))

    # Increment the static instance WitWidget index counter
    WitWidget.index += 1

    # Send the provided config and examples to JS
    output.eval_js("""configCallback('{config}')""".format(
      config=json.dumps(self.config)))
    output.eval_js('updateExamplesCallback()')
    self._generate_sprite()
Exemplo n.º 8
0
def download(filename):
  """Downloads the file to the user's local disk via a browser download action.

  Args:
    filename: Name of the file on disk to be downloaded.

  Raises:
    OSError: if the file cannot be found.
  """

  if not _os.path.exists(filename):
    msg = 'Cannot find file: {}'.format(filename)
    if _six.PY2:
      raise OSError(msg)
    else:
      raise FileNotFoundError(msg)  # pylint: disable=undefined-variable

  if _use_chunked_download:
    _download_with_comms(filename)
    return

  started = _threading.Event()
  port = _portpicker.pick_unused_port()

  def server_entry():
    httpd = _V6Server(('::', port), _FileHandler)
    started.set()
    # Handle a single request then exit the thread.
    httpd.handle_request()

  thread = _threading.Thread(target=server_entry)
  thread.start()
  started.wait()

  _output.eval_js(
      """
      (async function() {
        const response = await fetch('https://localhost:%(port)d%(path)s');
        if (!response.ok) {
          throw new Error('Failed to download: ' + response.statusText);
        }
        const blob = await response.blob();

        const a = document.createElement('a');
        a.href = window.URL.createObjectURL(blob);
        a.download = '%(name)s';
        document.body.appendChild(a);
        a.click();
        a.remove();
      })();
  """ % {
      'port': port,
      'path': _os.path.abspath(filename),
      'name': _os.path.basename(filename),
  })
Exemplo n.º 9
0
def upload():
  """Renders widget to upload local (to the browser) files to the kernel.

  Blocks until the files are available.

  Returns:
    A map of the form {<filename>: <file contents>} for all uploaded files.
  """
  upload_id = str(_uuid.uuid4())
  input_id = 'files-' + upload_id
  output_id = 'result-' + upload_id

  _IPython.display.display(
      _IPython.core.display.HTML("""
     <input type="file" id="{input_id}" name="files[]" multiple disabled
        style="border:none" />
     <output id="{output_id}">
      Upload widget is only available when the cell has been executed in the
      current browser session. Please rerun this cell to enable.
      </output>
      <script src="/nbextensions/google.colab/files.js"></script> """.format(
          input_id=input_id, output_id=output_id)))

  # First result is always an indication that the file picker has completed.
  result = _output.eval_js(
      'google.colab._files._uploadFiles("{input_id}", "{output_id}")'.format(
          input_id=input_id, output_id=output_id))
  files = _collections.defaultdict(_six.binary_type)
  # Mapping from original filename to filename as saved locally.
  local_filenames = dict()

  while result['action'] != 'complete':
    result = _output.eval_js(
        'google.colab._files._uploadFilesContinue("{output_id}")'.format(
            output_id=output_id))
    if result['action'] != 'append':
      # JS side uses a generator of promises to process all of the files- some
      # steps may not produce data for the Python side, so just proceed onto the
      # next message.
      continue
    data = _base64.b64decode(result['data'])
    filename = result['file']

    files[filename] += data
    local_filename = local_filenames.get(filename)
    if not local_filename:
      local_filename = _get_unique_filename(filename)
      local_filenames[filename] = local_filename
      print('Saving {filename} to {local_filename}'.format(
          filename=filename, local_filename=local_filename))
    with open(local_filename, 'ab') as f:
      f.write(data)

  return dict(files)
Exemplo n.º 10
0
 def videoContr(ms=10, stop=False):
   if not stop:
     while True:
       data = eval_js("getData(%s)" % str(ms))        
       if data:
         binary = b64decode(data.split(',')[1])
         f = BytesIO(binary)
         return Image.open(f)
       else:
         sleep(0.1)
   else:
     eval_js("stopVideo()")
Exemplo n.º 11
0
 def set_examples(self, examples):
   base.WitWidgetBase.set_examples(self, examples)
   # If this is called outside of the ctor, use a BroadcastChannel to send
   # the updated examples to the visualization. Inside of the ctor, no action
   # is necessary as the ctor handles all communication.
   if self._ctor_complete:
     # Use BroadcastChannel to allow this call to be made in a separate colab
     # cell from the cell that displays WIT.
     channel_name = 'updateExamples{}'.format(self.id)
     output.eval_js("""(new BroadcastChannel('{channel_name}')).postMessage(
         {examples})""".format(
             examples=json.dumps(self.examples), channel_name=channel_name))
     self._generate_sprite()
Exemplo n.º 12
0
def _refresh_watchers():
    output.eval_js('''
    (() => {
      const frames = window.parent.frames;
      for (let i = 0; i < frames.length; ++i) {
        try {
          const frame = frames[i];
          if (frame.window.refreshInspector) {
              frame.window.refreshInspector('user_global_ns');
          }
        } catch(e) {}
      }
    })()''')
Exemplo n.º 13
0
def show_frame(img, quality=0.8):
    """Put frame as <img src="data:image/jpg;base64,...."> """

    ret, data = cv2.imencode('.jpg',
                             img)  # compress array of pixels to JPG data

    data = b64encode(data)  # encode base64
    data = data.decode()  # convert bytes to string
    data = 'data:image/jpg;base64,' + data  # join header ("data:image/jpg;base64,") and base64 data (JPG)

    eval_js(
        'showImage("{}")'.format(data)
    )  # run JavaScript code to put image (JPG as string base64) in <img>
Exemplo n.º 14
0
    def render(self):
        """Render the widget to the display."""
        # Display WIT Polymer element.
        display.display(display.HTML(self._get_element_html()))
        display.display(
            display.HTML(WIT_HTML.format(height=self.height, id=self.id)))

        # Send the provided config and examples to JS
        output.eval_js("""configCallback({config})""".format(
            config=json.dumps(self.config)))
        output.eval_js("""updateExamplesCallback({examples})""".format(
            examples=json.dumps(self.examples)))
        self._generate_sprite()
        self._rendering_complete = True
Exemplo n.º 15
0
 def compute_custom_distance(self, index, callback_fn, params):
     try:
         distances = base.WitWidgetBase.compute_custom_distance_impl(
             self, index, params['distanceParams'])
         callback_dict = {
             'distances': distances,
             'exInd': index,
             'funId': callback_fn,
             'params': params['callbackParams']
         }
         output.eval_js("""distanceCallback({callback_dict})""".format(
             callback_dict=json.dumps(callback_dict)))
     except Exception as e:
         output.eval_js("""backendError({error})""".format(
             error=json.dumps({'msg': repr(e)})))
Exemplo n.º 16
0
 def compute_custom_distance(self, index, callback_fn, params):
     try:
         distances = base.WitWidgetBase.compute_custom_distance_impl(
             self, index, params["distanceParams"])
         callback_dict = {
             "distances": distances,
             "exInd": index,
             "funId": callback_fn,
             "params": params["callbackParams"],
         }
         output.eval_js("""distanceCallback({callback_dict})""".format(
             callback_dict=json.dumps(callback_dict)))
     except Exception as e:
         output.eval_js("""backendError({error})""".format(
             error=json.dumps({"msg": repr(e)})))
Exemplo n.º 17
0
def take_photo( img_width = 48, img_height = 48, quality=0.8 ):
  js = Javascript('''
    async function takePhoto(img_width, img_height, quality) {
      const div = document.createElement('div');
      const capture = document.createElement('button');
      capture.textContent = 'Capture';
      div.appendChild(capture);
      const video = document.createElement('video');
      video.style.display = 'block';
      const stream = await  navigator.mediaDevices.getUserMedia({video: {height:img_height, width:img_width}});
      document.body.appendChild(div);
      div.appendChild(video);
      video.srcObject = stream;
      await video.play();
      // Resize the output to fit the video element.
      google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
      // Wait for Capture to be clicked.
      await new Promise((resolve) => capture.onclick = resolve);
      const canvas = document.createElement('canvas');
      canvas.width = video.videoWidth;
      canvas.height = video.videoHeight;
      ctx = canvas.getContext('2d')
      ctx.drawImage(video, 0, 0);
      imageData  = ctx.getImageData(1,1, Math.round(img_width), Math.round(img_height))
      stream.getVideoTracks()[0].stop();
      div.remove();
      return imageData.data
    }
    ''')
  display(js)
  data = eval_js('takePhoto({},{},{})'.format(img_width, img_height, quality))
  return data
Exemplo n.º 18
0
    def render(self):
        """Render the widget to the display."""
        # Display WIT Polymer element.
        display.display(display.HTML(self._get_element_html()))
        display.display(
            display.HTML(WIT_HTML.format(height=self.height, id=self.id)))

        # Send the provided config and examples to JS.
        output.eval_js("""configCallback({config})""".format(
            config=json.dumps(self.config)))
        self.set_examples_in_progress = True
        self._set_examples_looper('updateExamplesCallback({data})')
        self.set_examples_in_progress = False

        self._generate_sprite()
        self._rendering_complete = True
Exemplo n.º 19
0
def get_host(host=None):
    global _host
    if host is not None:
        return PlotHost(host)
    if _host is None:
        # actual detection code
        if is_in_colab():
            from google.colab.output import eval_js
            ext_url = eval_js("google.colab.kernel.proxyPort(2908)")
            _host = PlotHost('http://localhost:2908', external_url=ext_url)
            try:
                status = _host.status()
            except:
                start_server_process()
            return _host
        jpy = my_jupyter_server()
        if jpy is not None:
            hub_prefix = os.getenv("JUPYTERHUB_SERVICE_PREFIX")
            if hub_prefix is None:
                ext = jpy['url'] + "plotar/"
            else:
                # on jupyter-/binderhub we don't know the external hostname,
                # so we use an absolute URL
                ext = hub_prefix + "plotar/"
            _host = PlotHost(jpy['url'] + "plotar/",
                             external_url=ext,
                             params=jpy['params'],
                             headers=jpy['headers'])
        else:
            _host = PlotHost(DEFAULT_SERVER)
    return _host
Exemplo n.º 20
0
def record(sec=1):
  display(Javascript(RECORD))
  s = output.eval_js('record(%d)' % (sec*1000))
  b = b64decode(s.split(',')[1])
  with open('audio.wav','wb') as f:
    f.write(b)
  return 'audio.wav'
def get_audio():
    display(HTML(AUDIO_HTML))
    data = eval_js("data")
    binary = b64decode(data.split(',')[1])

    process = (ffmpeg.input('pipe:0').output('pipe:1', format='wav').run_async(
        pipe_stdin=True,
        pipe_stdout=True,
        pipe_stderr=True,
        quiet=True,
        overwrite_output=True))
    output, err = process.communicate(input=binary)

    riff_chunk_size = len(output) - 8
    # Break up the chunk size into four bytes, held in b.
    q = riff_chunk_size
    b = []
    for i in range(4):
        q, r = divmod(q, 256)
        b.append(r)

    # Replace bytes 4:8 in proc.stdout with the actual size of the RIFF chunk.
    riff = output[:4] + bytes(b) + output[8:]

    sr, audio = wav_read(io.BytesIO(riff))

    return audio, sr
Exemplo n.º 22
0
def take_photo(filename='photo.jpg', quality=0.8):
    js = Javascript('''
        async function takePhoto(quality) {
          const div = document.createElement('div');
          const capture = document.createElement('button');
          capture.textContent = 'Capture';
          div.appendChild(capture);
          const video = document.createElement('video');
          video.style.display = 'block';
          const stream = await navigator.mediaDevices.getUserMedia({video: true});
          document.body.appendChild(div);
          div.appendChild(video);
          video.srcObject = stream;
          await video.play();
          // Resize the output to fit the video element.
          google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
          // Wait for Capture to be clicked.
          await new Promise((resolve) => capture.onclick = resolve);
          const canvas = document.createElement('canvas');
          canvas.width = video.videoWidth;
          canvas.height = video.videoHeight;
          canvas.getContext('2d').drawImage(video, 0, 0);
          stream.getVideoTracks()[0].stop();
          div.remove();
          return canvas.toDataURL('image/jpeg', quality);
        }
    ''')
    display(js)
    data = eval_js('takePhoto({})'.format(quality))
    binary = b64decode(data.split(',')[1])
    with open(filename, 'wb') as f:
        f.write(binary)
    return filename
Exemplo n.º 23
0
def record(sec=3):
    print("Mów teraz")
    display(Javascript(RECORD))
    sec += 1
    s = output.eval_js('record(%d)' % (sec * 1000))
    print("Nagrywanie zakończone, dziękuję!")
    b = b64decode(s.split(',')[1])

    process = (ffmpeg.input('pipe:0').output('pipe:1', format='wav').run_async(
        pipe_stdin=True,
        pipe_stdout=True,
        pipe_stderr=True,
        quiet=True,
        overwrite_output=True))
    output_fin, err = process.communicate(input=b)

    riff_chunk_size = len(output_fin) - 8
    # Break up the chunk size into four bytes, held in b.
    q = riff_chunk_size
    b_null = []
    for i in range(4):
        q, r = divmod(q, 256)
        b_null.append(r)

    # Replace bytes 4:8 in proc.stdout with the actual size of the RIFF chunk.
    riff = output_fin[:4] + bytes(b_null) + output_fin[8:]

    sr, audio = wav_read(io.BytesIO(riff))

    return audio, sr
Exemplo n.º 24
0
def record(sec=3):
    print("Speak Now...")
    display(Javascript(RECORD))
    sec += 1
    s = output.eval_js('record(%d)' % (sec * 1000))
    print("Done Recording !")
    binary = b64decode(s.split(',')[1])

    process = (ffmpeg.input('pipe:0').output('pipe:1', format='wav').run_async(
        pipe_stdin=True,
        pipe_stdout=True,
        pipe_stderr=True,
        quiet=True,
        overwrite_output=True))
    data, err = process.communicate(input=binary)

    riff_chunk_size = len(data) - 8
    # Break up the chunk size into four bytes, held in b.
    q = riff_chunk_size
    b = []
    for i in range(4):
        q, r = divmod(q, 256)
        b.append(r)

    # Replace bytes 4:8 in proc.stdout with the actual size of the RIFF chunk.
    riff = data[:4] + bytes(b) + data[8:]

    rate, audio = wavfile.read(io.BytesIO(riff))
    return rate, audio
Exemplo n.º 25
0
def take_photo(filename='photo.jpg', quality=0.8):
  display(HTML(VIDEO_HTML % quality))
  data = eval_js("data")
  binary = b64decode(data.split(',')[1])
  with open(filename, 'wb') as f:
    f.write(binary)
  return len(binary)
Exemplo n.º 26
0
def record_audio(seconds: int = 3, normalize_db: float = 0.1):
    # Use Javascript to record audio.
    record_js_code = """
      const sleep  = time => new Promise(resolve => setTimeout(resolve, time))
      const b2text = blob => new Promise(resolve => {
        const reader = new FileReader()
        reader.onloadend = e => resolve(e.srcElement.result)
        reader.readAsDataURL(blob)
      })
      var record = time => new Promise(async resolve => {
        stream = await navigator.mediaDevices.getUserMedia({ audio: true })
        recorder = new MediaRecorder(stream)
        chunks = []
        recorder.ondataavailable = e => chunks.push(e.data)
        recorder.start()
        await sleep(time)
        recorder.onstop = async ()=>{
          blob = new Blob(chunks)
          text = await b2text(blob)
          resolve(text)
        }
        recorder.stop()
      })
      """
    print('Starting recording for {} seconds...'.format(seconds))
    _display.display(_display.Javascript(record_js_code))
    audio_string = output.eval_js('record(%d)' % (seconds * 1000.0))
    print('Finished recording!')
    audio_bytes = base64.b64decode(audio_string.split(',')[1])
    return audio_bytes_to_np(audio_bytes, normalize_db=normalize_db)
Exemplo n.º 27
0
 def _generate_sprite(self):
   # Generate a sprite image for the examples if the examples contain the
   # standard encoded image feature.
   if not self.examples:
     return
   example_to_check = self.json_to_proto(self.examples[0])
   feature_list = (example_to_check.context.feature
                   if self.config.get('are_sequence_examples')
                   else example_to_check.features.feature)
   if 'image/encoded' in feature_list:
     example_strings = [
       self.json_to_proto(ex).SerializeToString()
       for ex in self.examples]
     encoded = base64.b64encode(
       inference_utils.create_sprite_image(example_strings))
     sprite = 'data:image/png;base64,{}'.format(encoded)
     output.eval_js("""spriteCallback('{sprite}')""".format(sprite=sprite))
Exemplo n.º 28
0
def record(duration=3):
  print("recording ... ", end = " ")
  display.display(display.Javascript(RECORD))
  s = output.eval_js(f'record({duration*1000})')
  print("  finished!")
  b = b64decode(s.split(',')[1])
  audio = AudioSegment.from_file(BytesIO(b))
  return audio
Exemplo n.º 29
0
    def join(self, room=None, signaling_folder='/content/webrtc', verbose=False):
        if self.room is None and room is None:
            raise ValueError('A room parameter must be specified')
        elif self.room:
            room = self.room

        if self.signaling_folder and self.signaling_folder != signaling_folder:
            signaling_folder = self.signaling_folder

        if signaling_folder:
            self.js_signaling = ColabSignaling(signaling_folder=signaling_folder, 
                                               room=room, javacript_callable=True)
        else:
            self.js_signaling = ColabApprtcSignaling(room=room, javacript_callable=True)

        display(Javascript(self._js))
        eval_js(f'start_js_peer("{room}")')
Exemplo n.º 30
0
def use_colab(port):
    try:
        from google.colab.output import eval_js

        return eval_js('google.colab.kernel.proxyPort(%d, {"cache": false})' %
                       port)
    except BaseException:
        return None