Example #1
0
	def add_memory(amodal, color, starting_time, ending_time):

		conn = r.connect("localhost", 28015)
		r.db('test').table("vision_memory").insert([
			{ "starting_time": starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
			  "ending_time": ending_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
			  "amodal": r.binary(amodal[0]),
			  "color": r.binary(color[0])
			}
		]).run(conn)
		r.db('test').table("vision_timestamps").insert([
			{ "starting_time": starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
			  "ending_time": ending_time.strftime("%Y-%m-%d %H:%M:%S.%f")
			}
		]).run(conn)
		conn.close()
Example #2
0
def process_update(virtue_id, heartbeat_conn, current_ruleset):
    transducers = []
    timestamp = int(time())
    for transducer_id in current_ruleset:
        enabled = current_ruleset[transducer_id]
        # TODO: We don't care about the config right now but eventually we
        # should

        # config = current_ruleset[transducer_id]
        config = '{}'
        transducer_type = 'SENSOR'
        row_signature = sign_message(virtue_id, transducer_id, transducer_type,
                                     config, enabled, timestamp, virtue_key)
        transducers.append({
            'id': [virtue_id, transducer_id],
            'virtue_id': virtue_id,
            'transducer_id': transducer_id,
            'type': transducer_type,
            'configuration': config,
            'enabled': enabled,
            'timestamp': timestamp,
            'signature': r.binary(row_signature)
        })
    try:
        res = r.db('transducers').table('acks').insert(transducers,
                                                       conflict='replace').run(
                                                           heartbeat_conn,
                                                           durability='soft')
        if res['errors'] > 0:
            error_wrapper('Failed to insert into ACKs table; first error: %s',
                          str(res['first_error']))
    except r.ReqlError as e:
        error_wrapper('Failed to insert into ACKs table because: %s', str(e))
Example #3
0
def send_ack(virtue_id, transducer_id, transducer_type, config, enabled,
             timestamp, virtue_key, conn):
    # Confirm to excalibur that changes were successful
    new_signature = sign_message(virtue_id, transducer_id, transducer_type,
                                 config, enabled, timestamp, virtue_key)

    try:
        res = r.db('transducers').table('acks').insert(
            {
                'id': [virtue_id, transducer_id],
                'virtue_id': virtue_id,
                'transducer_id': transducer_id,
                'type': transducer_type,
                'configuration': config,
                'enabled': enabled,
                'timestamp': timestamp,
                'signature': r.binary(new_signature)
            },
            conflict='replace').run(conn)
        if res['errors'] > 0:
            error_wrapper('Failed to insert into ACKs table; first error: %s',
                          str(res['first_error']))
            return False
    except r.ReqlError as e:
        error_wrapper('Failed to publish ACK to Excalibur because: %s', str(e))
        return False
    return True
Example #4
0
 def update_job(self, job):
     changes = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     results = self.table.get_all(job.id).update(changes).run(self.conn)
     skipped = False in map(lambda x: results[x] == 0, results.keys())
     if results['skipped'] > 0 or results['errors'] > 0 or not skipped:
         raise JobLookupError(job.id)
Example #5
0
 def add_job(self, job):
     job_dict = {
         'id': job.id,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     results = self.table.insert(job_dict).run(self.conn)
     if results['errors'] > 0:
         raise ConflictingIdError(job.id)
Example #6
0
    def add_memory(amodal, color, starting_time, ending_time):

        conn = r.connect("localhost", 28015)
        r.db('test').table("vision_memory").insert([{
            "starting_time":
            starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
            "ending_time":
            ending_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
            "amodal":
            r.binary(amodal[0]),
            "color":
            r.binary(color[0])
        }]).run(conn)
        r.db('test').table("vision_timestamps").insert([{
            "starting_time":
            starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
            "ending_time":
            ending_time.strftime("%Y-%m-%d %H:%M:%S.%f")
        }]).run(conn)
        conn.close()
Example #7
0
    def transform(self, data):
        """for now, just return data"""

        trans = {
            'title': self._filename_,
            'filename': self._filename_,
            'file': r.binary(data.read()),
            'created': self.created,
            'modified': self.modified,
            'appuserid': self.request.user.uid
        }

        return trans
Example #8
0
    def expire(self, session_id, timeout):
        session_dict = {
            'id': session_id,
            'expired': timeout,
            'payload': r.binary(self.to_r()),
        }
        results = r.table(R_TABLE).get(session_id).replace(session_dict).run(
            self.conn)

        if results['errors'] > 0:
            raise KeyError(
                u'Session ID (%s) conflicts with an existing session' %
                session_id)
Example #9
0
	def add_memory(data, starting_time, ending_time):
		conn = r.connect("localhost", 28015)
		r.db('test').table("language_memory").insert([
			{ "starting_time": starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
			  "ending_time": ending_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
			  "data": r.binary(data)
			}
		]).run(conn)
		r.db('test').table("language_timestamps").insert([
			{ "starting_time": starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
			  "ending_time": ending_time.strftime("%Y-%m-%d %H:%M:%S.%f")
			}
		]).run(conn)
		conn.close()
Example #10
0
 def add_memory(data, starting_time, ending_time):
     conn = r.connect("localhost", 28015)
     r.db('test').table("hearing_memory").insert([{
         "starting_time":
         starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
         "ending_time":
         ending_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
         "data":
         r.binary(data)
     }]).run(conn)
     r.db('test').table("hearing_timestamps").insert([{
         "starting_time":
         starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
         "ending_time":
         ending_time.strftime("%Y-%m-%d %H:%M:%S.%f")
     }]).run(conn)
     conn.close()
Example #11
0
 def chunk(self, consumer=None, query_ending=True):
     data = self.buffer.read(self.file_document[CHUNK_SIZE_BYTES_JSON_NAME])
     if not data:
         if query_ending:
             return self.bucket.db_query.do(lambda _: None)
         return None
     binary = rethinkdb.binary(data)
     chunk_document = {
         FILE_ID_JSON_NAME: self.file_id,
         NUM_JSON_NAME: self._chunk_index,
         DATA_JSON_NAME: binary
     }
     self._file_length += len(data)
     self._chunk_index += 1
     query = self.bucket.db_query.table(self.bucket.chunks_table_name).insert(chunk_document)
     if consumer is not None:
         consumer(query)
     return query
Example #12
0
def save_file(task_obj,params):

  import base64
  
  #decode data
  dec_data = base64.b64decode(task_obj._job_data)
  
  # make a copy to disk
  with open(params[0], 'wb') as fh:
    fh.write(dec_data)
 
  
    
  # verify if it's the first commit to the db from this user  
  try:
    result = r.db_create(params[2]).run(task_obj.db.conn)
    # create table...
    result = r.db(params[2]).table_create('files').run(task_obj.db.conn)
    task_obj.db.db_name = params[2]
  except:
    # db already exists simply proceed...
    pass 

  

  table_name = 'files'

  r.db(params[2]).table(table_name).insert({
    'uuid': task_obj.uuid,
    'type_id': params[1],
    'filename': params[0],
    'file': r.binary(task_obj._job_data)
  }).run(task_obj.db.conn)

  # save the table name, key, 
  task_obj.db.db_keys[task_obj.uuid] = table_name

  # extract the module and func for reply job
  # where params[3][0] = module_name 
  # and   params[3][1] = func_name
  result = params[3]

  return result
def _insert_session_id_if_unique(
    conn,
    timeout,
    session_id,
    serialize,
):
    """ Attempt to insert a given ``session_id`` and return the successful id
    or ``None``."""

    try:
        value = r.table(R_TABLE).get(session_id).run(conn)
        if value is not None:
            return None

        session_dict = {
            'id':
            session_id,
            'expired':
            timeout,
            'payload':
            r.binary(
                serialize({
                    'managed_dict': {},
                    'created': time.time(),
                    'timeout': timeout
                })),
        }
        results = r.table(R_TABLE).insert(session_dict).run(conn)

        if results['errors'] > 0:
            raise KeyError(
                u'Session ID (%s) conflicts with an existing session' %
                session_id)

        return session_id
    except Exception:
        return None
Example #14
0
 def save_str(self, key, value):
     r.table('binary').insert(
         {'key': key, 'value': r.binary(value)}).run(self.conn)
Example #15
0
    def post(self):
        # first of all, check if the user has reached their quota
        if not self.web_app.debug:
            quota = int(
                self.web_app.config.get("stories",
                                        "max_story_count",
                                        default=10))
            stories_query = self.db.query("stories").get_all(
                self.user_data["id"],
                index="user_id").pluck().limit(quota).count()
            count = self.db.run(stories_query)
            if count >= quota:
                raise Forbidden(
                    description="You have exceeded your quota of {0} stories. "
                    "Use the 'DELETE /story/<story_id>' to clean up your stories."
                    .format(quota))

        data = request.json or {}
        # optional parameter: public (true/false), default true
        # optional parameter: music (true/false), default true
        # optional parameter: video (true/false), default true
        # optional parameter: corpus (str), default "mixed"

        public = bool(data.get("public", True))
        music = bool(data.get("music", True))
        video = bool(data.get("video", True))
        corpus_param = str(data.get("corpus", "mixed"))

        corpus_dir = os.path.join(".", "assets", "texts")
        corpus_names = list_corpus()
        if not corpus_names:
            raise ServiceUnavailable(description="Corpus is unavailable.")

        if corpus_param == "mixed":
            # Read each corpus individually, and combine them into one markov chain
            models = []
            for corpus_path in corpus_names:
                with open(os.path.join(corpus_dir,
                                       "{0}.txt".format(corpus_path)),
                          encoding="utf-8") as corpus_file:
                    corpus_text = corpus_file.read()
                models.append(markovify.Text(corpus_text))
            model = markovify.combine(models)
        elif corpus_param in corpus_names:
            # use that corpus only
            with open(os.path.join(corpus_dir, "{0}.txt".format(corpus_param)),
                      encoding="utf-8") as corpus_file:
                corpus_text = corpus_file.read()
            model = markovify.Text(corpus_text)
        else:
            raise BadRequest(
                description="Unknown corpus '{0}'. Available corpus: {1}".
                format(corpus_param, ", ".join(corpus_names)))

        # Generate some sentences from a corpus, using markov chains (woo, how original)
        sentences = []
        for i in range(10):
            if i is 0:
                sentence = model.make_sentence_with_start(beginning="Once",
                                                          strict=False)
            else:
                sentence = model.make_sentence()
            if sentence:
                sentences.append(sentence)

        # Use TTS to recite the text
        tts = gTTS(text=" ".join(sentences), lang="en")
        with io.BytesIO() as tts_buffer:
            # Write the TTS result to the buffer
            tts.write_to_fp(tts_buffer)
            # Bring back the cursor to the beginning of the buffer
            tts_buffer.seek(0)
            # Read the buffer
            tts_binary = tts_buffer.read()

        if not music and not video:
            # no FFMPEG filtering necessary
            media_output = tts_binary
        else:
            # Music parameters
            music_path = os.path.join(".", "assets", "music", "track.mp3")
            music_time_start = random.randrange(0, 1200)  # seconds
            music_volume = 0.2

            # FFMPEG filters
            if music and video:
                # Mix music, TTS, and video
                # 1. First, the volume of the music track is reduced, while the TTS track is left at 100%
                # 2. Then, the audio of the music track is trimmed to start at a certain time
                # 3. Finally, the two audio tracks are mixed together to produce one track
                # 4. Duplicate the mixed audio track into two labels
                # 5. Generate a video with the samples waves from the first audio label
                # 6. Export video and audio to pipe in webm format
                ffmpeg_filter = "-filter_complex " \
                                "[0:a]volume=1[a0];[1:a]volume={music_volume}[a1];" \
                                "[a1]atrim=start={music_start}[a1];" \
                                "[a0][a1]amix=inputs=2:duration=shortest:dropout_transition=3[a];" \
                                "[a]asplit[outa1][outa2];" \
                                "[outa1]showwaves=s=1280x202:mode=line[sw]" \
                                " -map \"[sw]\" -map \"[outa2]\" -c:v libvpx -auto-alt-ref 0 -speed 8 " \
                                "-c:a libvorbis -f webm" \
                    .format(music_start=music_time_start,
                            music_volume=music_volume)
                ff_inputs = {"pipe:0": None, music_path: None}

            elif music:
                # Mix music and TTS
                # 1. First, the volume of the music track is reduced, while the TTS track is left at 100%
                # 2. Then, the audio of the music track is trimmed to start at a certain time
                # 3. Finally, the two audio tracks are mixed together to produce one track
                # 4. Export to pipe in MP3
                ffmpeg_filter = "-filter_complex " \
                                "[0:a]volume=1[a0];[1:a]volume={music_volume}[a1];" \
                                "[a1]atrim=start={music_start}[a1];" \
                                "[a0][a1]amix=inputs=2:duration=shortest:dropout_transition=3[a]" \
                                " -map \"[a]\" -f mp3".format(music_start=music_time_start,
                                                              music_volume=music_volume)
                ff_inputs = {"pipe:0": None, music_path: None}

            else:
                # Mix video and TTS
                # 1. The TTS audio track is duplicated into two labels
                # 2. Generate a video with the samples waves from the first audio label
                # 3. Export video and audio to pipe in webm format
                ffmpeg_filter = "-filter_complex " \
                                "[0:a]asplit[outa1][outa2];" \
                                "[outa1]showwaves=s=1280x202:mode=line[sw]" \
                                " -map \"[sw]\" -map \"[outa2]\" -c:v libvpx -auto-alt-ref 0 -speed 8 " \
                                "-c:a libvorbis -f webm"
                ff_inputs = {"pipe:0": None}

            # Launch ffmpeg
            ff = FFmpeg(executable=self.web_app.config.get(
                "stories", "ffmpeg"),
                        inputs=ff_inputs,
                        outputs={"pipe:1": ffmpeg_filter})
            log.debug("Executing FFMPEG command: {0}".format(ff.cmd))
            # The mixed track is output using the subprocess's STDOUT, piped to the mixed_bytes var
            media_output, stderr = ff.run(input_data=tts_binary,
                                          stdout=subprocess.PIPE)
            if stderr:
                log.debug("FFMPEG STDERR: %s", str(stderr.read()))
            else:
                log.debug("No FFMPEG STDERR")

        # store in database
        story_insert_doc = {
            "public": public,
            "user_id": self.user_data["id"],
            "sentences": sentences,
            "media": rethinkdb.binary(media_output),
            "media_type": "video/webm" if video else "audio/mpeg"
        }
        insert_query = self.db.query("stories").insert(story_insert_doc)
        story_id = self.db.run(insert_query)["generated_keys"][0]

        story_query = self.db.query("stories").get(story_id).pluck(
            "id", "public", "sentences", "media_type")
        story = self.db.run(story_query)
        story["url"] = url_for("api.stories.story", story_id=story_id)
        story["media"] = url_for("api.stories.play", story_id=story_id)
        return story
Example #16
0
conn = r.connect(argv[1], int(argv[2]))

table = argv[3]

try:
    r.db_create('streams').run(conn)
except:
    pass

try:
    r.db('streams').table_drop(table).run(conn)
except r.errors.RqlError:
    pass
r.db('streams').table_create(table).run(conn)

try:
    for i in count():
        data = read(0, 1024)
        if not data:
            break
        r.db('streams').table(table).insert({
            'id': i,
            'chunk': r.binary(data)
        }).run(conn, durability='soft')
finally:
    r.db('streams').table(table).insert({
        'id': i,
        'end': True
    }).run(conn, durability='soft')
Example #17
0
    def __change_ruleset(self,
                         virtue_id,
                         trans_id,
                         transducer_type,
                         enable,
                         virtue_running,
                         config=None):
        if self.conn is None:
            ret = self.__connect_rethinkdb()
            # Return if error
            if ret != True:
                return ret

        if type(transducer_type) is list:
            transducer_type = transducer_type[0]

        timestamp = int(time.time())

        row = {
            'id': [virtue_id, trans_id],
            'virtue_id': virtue_id,
            'transducer_id': trans_id,
            'type': transducer_type,
            'configuration': config,
            'enabled': enable,
            'timestamp': timestamp
        }
        (success, signature) = self.__sign_message(row)
        if not success:
            # Return error code
            return signature

        row['signature'] = r.binary(signature)

        # Send command to change ruleset
        try:
            res = r.db('transducers').table('commands')\
                .insert(row, conflict='replace').run(self.conn)
            if res['errors'] > 0:
                return self.__error(
                    'unspecifiedError',
                    details=
                    'Failed to insert into commands table; first error: ' +
                    res['first_error'])
        except r.ReqlError as e:
            return self.__error(
                'unspecifiedError',
                details='Failed to insert into commands table: ' + str(e))

        # If the virtue isn't running yet, don't bother waiting for an ACK
        if not virtue_running:
            return True

        # Wait for ACK from the virtue that the ruleset has been changed
        # try:
        cursor = r.db('transducers').table('acks')\
            .get([virtue_id, trans_id])\
            .changes(squash=False).run(self.conn)
        # except r.ReqlError as e:
        #       print 'ERROR: Failed to read from the ACKs table because:', e
        #       return False

        retry = True
        while retry:
            try:
                retry = False
                # Wait max 30 seconds - if we miss the real ACK, hopefully
                # at least the next heartbeat will suffice
                print 'INFO: Waiting for ACK'
                change = cursor.next(wait=self.wait_for_ack)
                row = change['new_val']

                verified = self.__verify_message(row)
                if (verified != True):
                    return verified

                if row['timestamp'] >= timestamp:
                    if row['enabled'] == enable:
                        print 'INFO: ACK received!'
                        return True
                    else:
                        return self.__error(
                            'unspecifiedError',
                            details=
                            'Received ACK with incorrect value for enabled: ' +
                            str(enable) + ' vs ' + str(row['enabled']))
                else:
                    print 'WARN: Timestamp incorrect:', timestamp, row[
                        'timestamp']
                    # Retry once in case that was just a wayward ACK
                    retry = True

            except (r.ReqlCursorEmpty, r.ReqlDriverError) as e:
                return self.__error(
                    'unspecifiedError',
                    details='Failed to receive ACK before timeout')
            finally:
                cursor.close()
        return self.__error('unspecifiedError',
                            details='Failed to receive ACK before timeout')