Ejemplo n.º 1
0
 def test_undoes_db_changes_when_error_2(self):
     """
     Tests that load_from_config.run() removes only the documents it saved to the database and leaves
     alone any documents that were already there when it comes across an unexpected error
     """
     expected_experi_model.switch_db(TEST_DB_ALIAS)
     expected_experi_model.save()
     expected_ds_model.switch_db(TEST_DB_ALIAS)
     expected_ds_model.save()
     json_config = open(path_string_json_full, 'w')
     json_config.write(breaking_json)
     json_config.close()
     with self.assertRaises(KeyError):
         load_from_config.run()
     with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
         query = TestEx.objects.all()
         self.assertEqual(len(query), 1)
         self.document_compare(query[0], expected_experi_model)
     with switch_db(DataSource, TEST_DB_ALIAS) as TestDs:
         query = TestDs.objects.all()
         self.assertEqual(len(query), 1)
         self.document_compare(query[0], expected_ds_model)
     with switch_db(Genotype, TEST_DB_ALIAS) as TestGen:
         query = TestGen.objects.all()
         self.assertEqual(len(query), 0)
Ejemplo n.º 2
0
 def document_compare(self, doc1, doc2):
     """
     Asserts the two given mongoengine.documents are equal. Ignores metadata fields and fields such
     as time stamps that default to datetime.now()
     """
     for key in doc1._fields_ordered:
         # ignores metadata fields and datetime fields that default to datetime.now()
         if key != 'id' and key[0] != '_' and key != 'dtt' and key != 'lastupdateddate':
             with self.subTest(key=key):
                 val = doc1[key]
                 if isinstance(doc1[key], dict):
                     self.assertDictEqual(doc1[key], doc2[key])
                 elif isinstance(val, DBRef):
                     if key == 'study':
                         with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
                             study = TestEx.objects.get(id=val.id)
                             self.document_compare(study, doc2[key])
                     elif key == 'datasource':
                         with switch_db(DataSource, TEST_DB_ALIAS) as TestDs:
                             ds = TestDs.objects.get(id=val.id)
                             self.document_compare(ds, doc2[key])
                     else:
                         self.fail("Unexpected reference field: " + key)
                 else:
                     self.assertEqual(doc1[key], doc2[key])
Ejemplo n.º 3
0
def create_update_footnotes(data, db_alias):
    total_created = 0
    total_updated = 0

    logging.info('Processing %d footnotes' % len(data))

    from usda_mongo.models import Footnote, Nutrient, Food

    with switch_db(Footnote, db_alias) as Footnote:
        with switch_db(Nutrient, db_alias) as Nutrient:
            with switch_db(Food, db_alias) as Food:

                for row in csv.DictReader(
                    data, fieldnames=(
                        'ndb_no', 'footnt_no', 'footnt_typ', 'nutr_no', 'footnt_txt'
                    ),
                    delimiter='^', quotechar='~'
                ):
                    created = False

                    # SR22 definition indicates that `footnt_no` and `footnt_typ` are required,
                    # but on occasion, either on is blank.  To compensate for this, we assume
                    # a blank `footnt_no` is '1' and a blank`footnt_typ` is 'N'.
                    if row['footnt_no'] == '':
                        row['footnt_no'] = 1
                    if row['footnt_typ'] not in (FOOTNOTE_DESC, FOOTNOTE_MEAS, FOOTNOTE_NUTR):
                        row['footnt_typ'] = FOOTNOTE_NUTR

                    if row.get('nutr_no'):
                        nutrient = Nutrient.objects.get(number=int(row['nutr_no']))
                    else:
                        nutrient = None

                    try:
                        footnote = Footnote.objects.get(
                            food=Food.objects.get(ndb_number=int(row['ndb_no'])),
                            number=int(row['footnt_no']),
                            nutrient=nutrient
                        )
                        total_updated += 1
                    except Footnote.DoesNotExist:
                        footnote = Footnote(
                            food=Food.objects.get(ndb_number=int(row['ndb_no'])),
                            number=int(row['footnt_no']),
                            nutrient=nutrient
                        )
                        total_created += 1
                        created = True

                    footnote.type = row['footnt_typ']
                    footnote.text = row['footnt_txt']
                    footnote.save()

                    if created:
                        logging.debug('Created %s' % footnote)
                    else:
                        logging.debug('Updated %s' % footnote)

    logging.info('Created %d new footnotes' % total_created)
    logging.info('Updated %d footnotes' % total_updated)
Ejemplo n.º 4
0
    def create_document(row, test=False):
        # Creates and returns a Genotype document from the values in the row
        db_alias = TEST_DB_ALIAS if test else 'default'
        build_dic = {}
        for key in row:
            if 'date' == key[-4:] or key == 'dtt':
                # Assumes values ending in 'date' are for date fields
                build_dic[key] = datetime.strptime(row[key], "%Y-%m-%d %H:%M:%S.%f")

            # Searches through the other collections for the reference field values
            elif 'datasource' in key:
                with switch_db(DataSource, db_alias) as TestDat:
                    datasource, created = fetch_or_save(
                        TestDat, db_alias=db_alias, name=row[key]
                    )
                build_dic['datasource'] = datasource
            elif 'study' in key:
                with switch_db(Experiment, db_alias) as TestEx:
                    study, created = fetch_or_save(
                        TestEx, db_alias=db_alias, name=row[key]
                    )
                build_dic['study'] = study

            elif key == 'obs':
                # Extracts the dictionary from the obs field
                build_dic[key] = ast.literal_eval(row[key])
            else:
                build_dic[key] = row[key]

        with switch_db(Genotype, db_alias) as TestGen:
            gen, created = fetch_or_save(TestGen, db_alias=db_alias, **build_dic)
        return gen
Ejemplo n.º 5
0
 def tearDown(self):
     """
     Clears the test database
     """
     with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
         TestEx.objects.all().delete()
     with switch_db(DataSource, TEST_DB_ALIAS) as TestDs:
         TestDs.objects.all().delete()
     with switch_db(Genotype, TEST_DB_ALIAS) as TestGen:
         TestGen.objects.all().delete()
Ejemplo n.º 6
0
    def load(self):
        logger.debug(u"iniciando metodo load() (uuid: %s)" % self.metadata['uuid'])

        logger.debug(u"salvando modelo %s no opac (_id: %s)" % (
            self.opac_model_name, self.opac_model_instance._id))

        with \
            switch_db(OpacCollection, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacJournal, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacIssue, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacArticle, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacSponsor, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacNews, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacPages, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacPressRelease, OPAC_WEBAPP_DB_NAME):

                self.opac_model_instance.switch_db(OPAC_WEBAPP_DB_NAME)
                self.opac_model_instance.save()
                self.opac_model_instance.reload()

        # atualizamos os dados do registro LOAD
        with switch_db(self.load_model_class, OPAC_PROC_DB_NAME):
            self.metadata['process_finish_at'] = datetime.now()
            self.metadata['process_completed'] = True
            self.metadata['must_reprocess'] = False
            json_opac_data = self.opac_model_instance.to_json()
            cleaned_json_opac_data = json_opac_data.replace('$', '')  # retiramos o $
            self.metadata['loaded_data'] = json.loads(cleaned_json_opac_data)
            self.load_model_instance.update(**self.metadata)
            self.load_model_instance.save()
            self.load_model_instance.reload()
            logger.debug(u"modelo %s no opac_proc (uuid: %s) foi atualizado" % (
                self.load_model_name, self.metadata['uuid']))

        logger.debug(u"finalizando metodo load() (uuid: %s)" % self.metadata['uuid'])
Ejemplo n.º 7
0
def query_genotype_by_experiment(experi_id):
    db_alias = TEST_DB_ALIAS if testing else 'default'
    # Make query
    try:
        with switch_db(Experiment, db_alias) as Exper:
            ex = Exper.objects.get(id=experi_id)
    except Experiment.DoesNotExist:
        raise Http404("Experiment does not exist")
    with switch_db(Genotype, db_alias) as Gen:
        genotype = Gen.objects(study=ex)
    return genotype, ex
Ejemplo n.º 8
0
def create_update_weights(data, encoding, db_alias):
    total_created = 0
    total_updated = 0

    logging.info('Processing %d weights' % len(data))

    from usda_mongo.models import Weight, Food

    with switch_db(Weight, db_alias) as Weight:
        with switch_db(Food, db_alias) as Food:

            for row in UnicodeDictReader(
                data, fieldnames=(
                    'ndb_no', 'seq', 'amount', 'msre_desc', 'gm_wgt', 'num_data_pts', 'std_dev'
                ),
                delimiter='^', quotechar='~',
                encoding=encoding
            ):
                created = False

                try:
                    weight = Weight.objects.get(
                        food=Food.objects.get(ndb_number=int(row['ndb_no'])),
                        sequence=int(row['seq'])
                    )
                    total_updated += 1
                except Weight.DoesNotExist:
                    weight = Weight(
                        food=Food.objects.get(ndb_number=int(row['ndb_no'])),
                        sequence=int(row['seq'])
                    )
                    total_created += 1
                    created = True

                weight.amount = float(row.get('amount'))
                weight.description = row.get('msre_desc')
                weight.gram_weight = float(row.get('gm_wgt'))
                if row.get('num_data_pts'):
                    weight.number_of_data_points = float(row['num_data_pts'])
                if row.get('std_dev'):
                    weight.standard_deviation = float(row['std_dev'])
                weight.save()

                if created:
                    logging.debug('Created %s' % weight)
                else:
                    logging.debug('Updated %s' % weight)

    logging.info('Created %d new weights' % total_created)
    logging.info('Updated %d weights' % total_updated)
Ejemplo n.º 9
0
    def prepare(self):
        logger.debug(u"iniciando metodo prepare (uuid: %s)" % self.metadata['uuid'])
        obj_dict = self.transform_model_instance_to_python()
        obj_dict['_id'] = self._uuid_str

        with \
            switch_db(OpacCollection, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacJournal, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacIssue, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacArticle, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacSponsor, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacNews, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacPages, OPAC_WEBAPP_DB_NAME), \
            switch_db(OpacPressRelease, OPAC_WEBAPP_DB_NAME):

                if self.opac_model_instance is None:
                    # crio uma nova instância
                    self.opac_model_instance = self.opac_model_class(**obj_dict)
                    self.opac_model_instance.switch_db(OPAC_WEBAPP_DB_NAME)
                else:  # já tenho uma instância no banco
                    self.opac_model_instance.switch_db(OPAC_WEBAPP_DB_NAME)
                    for k, v in obj_dict.iteritems():
                        self.opac_model_instance[k] = v
                    self.opac_model_instance.save()

        logger.debug(u"modelo opac (_id: %s) encontrado. atualizando registro" % obj_dict['_id'])

        logger.debug(u"finalizando metodo prepare(uuid: %s)" % self.metadata['uuid'])
        logger.debug(u'opac_model_instance SALVO: %s' % self.opac_model_instance.to_json())
        return self.opac_model_instance
Ejemplo n.º 10
0
def get_ref_fields(gen, testing):
    ref_fields = {"study": gen.study, "datasource": gen.datasource}
    if testing:
        # Reference fields would only hold database references instead of the actual
        # document, so need to query the test database for the documents
        study_son = ref_fields["study"].as_doc()
        ds_son = ref_fields["datasource"].as_doc()
        study_id = study_son.get("$id")
        with switch_db(Experiment, TEST_DB_ALIAS) as Exper:
            ref_fields["study"] = Exper.objects.get(id=study_id)
        ds_id = ds_son.get("$id")
        with switch_db(DataSource, TEST_DB_ALIAS) as Dat:
            ref_fields["datasource"] = Dat.objects.get(id=ds_id)
    return ref_fields
Ejemplo n.º 11
0
    def test_multiple_connections(self):
        """Make sure establishing multiple connections to a standalone
        MongoDB and switching between them works.
        """
        db = MongoEngine()
        self.app.config['MONGODB_SETTINGS'] = [
            {
                'ALIAS': 'default',
                'DB': 'flask_mongoengine_test_db_1',
                'HOST': 'localhost',
                'PORT': 27017
            },
            {
                'ALIAS': 'alternative',
                'DB': 'flask_mongoengine_test_db_2',
                'HOST': 'localhost',
                'PORT': 27017
            },
        ]

        class Todo(db.Document):
            title = db.StringField(max_length=60)
            text = db.StringField()
            done = db.BooleanField(default=False)
            meta = {'db_alias': 'alternative'}

        db.init_app(self.app)
        Todo.drop_collection()

        # Test saving a doc via the default connection
        with switch_db(Todo, 'default') as Todo:
            todo = Todo()
            todo.text = "Sample"
            todo.title = "Testing"
            todo.done = True
            s_todo = todo.save()

            f_to = Todo.objects().first()
            self.assertEqual(s_todo.title, f_to.title)

        # Make sure the doc doesn't exist in the alternative db
        with switch_db(Todo, 'alternative') as Todo:
            doc = Todo.objects().first()
            self.assertEqual(doc, None)

        # Make sure switching back to the default connection shows the doc
        with switch_db(Todo, 'default') as Todo:
            doc = Todo.objects().first()
            self.assertNotEqual(doc, None)
Ejemplo n.º 12
0
 def test_yaml_marked_loaded_no_load(self):
     """
     Test script does not load data from a directory where the config.yaml file has been marked as loaded
     """
     yaml_parser = configuration_parser.YamlConfigParser(path_string_yaml_full)
     yaml_parser.mark_loaded()
     load_from_config.load_in_dir(path_string_yaml)
     with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
         query = TestEx.objects.all()
         self.assertEqual(len(query), 0)
     with switch_db(DataSource, TEST_DB_ALIAS) as TestDs:
         query = TestDs.objects.all()
         self.assertEqual(len(query), 0)
     with switch_db(Genotype, TEST_DB_ALIAS) as TestGen:
         query = TestGen.objects.all()
         self.assertEqual(len(query), 0)
Ejemplo n.º 13
0
    def switch_db(self, db_alias, keep_created=True):
        """
        Temporarily switch the database for a document instance.

        Only really useful for archiving off data and calling `save()`::

            user = User.objects.get(id=user_id)
            user.switch_db('archive-db')
            user.save()

        :param str db_alias: The database alias to use for saving the document

        :param bool keep_created: keep self._created value after switching db, else is reset to True


        .. seealso::
            Use :class:`~mongoengine.context_managers.switch_collection`
            if you need to read from another collection
        """
        with switch_db(self.__class__, db_alias) as cls:
            collection = cls._get_collection()
            db = cls._get_db()
        self._get_collection = lambda: collection
        self._get_db = lambda: db
        self._collection = collection
        self._created = True if not keep_created else self._created
        self.__objects = self._qs
        self.__objects._collection_obj = collection
        return self
Ejemplo n.º 14
0
    def switch_db(self, db_alias):
        """
        Temporarily switch the database for a document instance.

        Only really useful for archiving off data and calling `save()`::

            user = User.objects.get(id=user_id)
            user.switch_db('archive-db')
            user.save()

        If you need to read from another database see
        :class:`~mongoengine.context_managers.switch_db`

        :param db_alias: The database alias to use for saving the document
        """
        with switch_db(self.__class__, db_alias) as cls:
            collection = cls._get_collection()
            db = cls._get_db()
        self._get_collection = lambda: collection
        self._get_db = lambda: db
        self._collection = collection
        self._created = True
        self.__objects = self._qs
        self.__objects._collection_obj = collection
        return self
Ejemplo n.º 15
0
def run(*args):
    global MODE
    if 'override' in args:
        Logger.Warning("OVERRIDE MODE!")
        MODE = "OVERRIDE"

    Logger.Message("Loading process in mode: " + MODE  + "started.")
    global db_alias
    if testing:
        db_alias = TEST_DB_ALIAS
    # for keeping track of documents saved to db by this run of script
    global created_doc_ids
    created_doc_ids = []

    dirs = DataDir.objects.all()
    for d in dirs:
        Logger.Message("Processing data dir: " + d.path)
        path = Path(d.path)
        try:
            look_for_config_dir(path)
        except Exception as e:
            Logger.Error(str(e))
            # 'Cancels' the script, by removing from db all documents saved to db in this script run-through
            for doc_type, doc_id in created_doc_ids:
                with switch_db(doc_type, db_alias) as Col:
                    Col.objects.get(id=doc_id).delete()
            raise e
Ejemplo n.º 16
0
def task_delete_selected_collections(selected_uuids):
    """
        Task para apagar Coleções Carregadas.
        @param:
        - selected_uuids: lista de UUIDs dos documentos a serem removidos

        Se a lista `selected_uuids` for maior a SLICE_SIZE
            A lista será fatiada em listas de tamanho: SLICE_SIZE
        Se a lista `selected_uuids` for < a SLICE_SIZE
            Será feito uma delete direto no queryset
    """

    stage = 'load'
    model = 'collection'
    model_class = LoadCollection
    get_db_connection()
    r_queues = RQueues()
    SLICE_SIZE = 1000

    if len(selected_uuids) > SLICE_SIZE:
        list_of_list_of_uuids = list(chunks(selected_uuids, SLICE_SIZE))
        for list_of_uuids in list_of_list_of_uuids:
            uuid_as_string_list = [str(uuid) for uuid in list_of_uuids]
            r_queues.enqueue(stage, model, task_delete_selected_collections, uuid_as_string_list)
    else:
        # removemos o conjunto de documentos do LoadCollection indicados pelos uuids
        documents_to_delete = model_class.objects.filter(uuid__in=selected_uuids)
        documents_to_delete.delete()

        # convertemos os uuid para _id e filtramos esses documentos no OPAC
        register_connections()
        opac_pks = [str(uuid).replace('-', '') for uuid in selected_uuids]
        with switch_db(opac_models.Collection, OPAC_WEBAPP_DB_NAME) as opac_model:
            selected_opac_records = opac_model.objects.filter(pk__in=opac_pks)
            selected_opac_records.delete()
Ejemplo n.º 17
0
 def query_by_pi(self):
     self.search_term = self.request.GET['search_pi'].strip()
     with switch_db(Experiment, self.db_alias) as db:
         query = db.objects if self.search_list is None else self.search_list
         self.search_list = query.filter(
             __raw__=self.raw_query_dict("pi", self.search_term)
         )
Ejemplo n.º 18
0
    def test_connection_alias(self):

        test_db_name_2 = TESTDB_NAME+"_2"
        try:
            db_data2 = DatabaseData(test_db_name_2, collection="test_collection")
            db_data2.connect_mongoengine(alias="test_alias")

            db_data1 = DatabaseData(TESTDB_NAME, collection="test_collection")
            db_data1.connect_mongoengine()

            class TestDocument(Document):
                test = StringField()

            with switch_db(TestDocument, "test_alias") as TestDocument:
                with db_data2.switch_collection(TestDocument) as TestDocument:
                    TestDocument(test="abc").save()

            # check the collection with pymongo
            client = MongoClient()
            db = client[test_db_name_2]
            collection = db[db_data2.collection]
            documents = collection.find()
            assert documents.count() == 1
            assert documents[0]['test'] == "abc"
        finally:
            if self._connection:
                self._connection.drop_database(test_db_name_2)
Ejemplo n.º 19
0
def create_update_food_groups(data, db_alias):
    total_created = 0
    total_updated = 0

    logging.info('Processing %d food groups' % len(data))

    from usda_mongo.models import FoodGroup

    with switch_db(FoodGroup, db_alias) as FoodGroup:

        for row in csv.DictReader(
            data, fieldnames=('fdgrp_cd', 'fdgrp_desc'),
            delimiter='^', quotechar='~'
        ):
            created = False

            try:
                food_group = FoodGroup.objects.get(code=int(row['fdgrp_cd']))
                total_updated += 1
            except FoodGroup.DoesNotExist:
                food_group = FoodGroup(code=int(row['fdgrp_cd']))
                total_created += 1
                created = True

            food_group.description = row['fdgrp_desc']
            food_group.save()

            if created:
                logging.debug('Created %s' % food_group)
            else:
                logging.debug('Updated %s' % food_group)

        logging.info('Created %d new food groups' % total_created)
        logging.info('Updated %d food groups' % total_updated)
Ejemplo n.º 20
0
def create_update_sources(data, db_alias):
    total_created = 0
    total_updated = 0

    logging.info('Processing %d sources' % len(data))

    from usda_mongo.models import Source

    with switch_db(Source, db_alias) as Source:

        for row in csv.DictReader(
            data, fieldnames=('src_cd', 'srccd_desc'),
            delimiter='^', quotechar='~'
        ):
            created = False

            try:
                source = Source.objects.get(code=int(row['src_cd']))
                total_updated += 1
            except Source.DoesNotExist:
                source = Source(code=int(row['src_cd']))
                total_created += 1
                created = True

            source.description = row['srccd_desc']
            source.save()

            if created:
                logging.debug('Created %s' % source)
            else:
                logging.debug('Updated %s' % source)

    logging.info('Created %d new sources' % total_created)
    logging.info('Updated %d sources' % total_updated)
Ejemplo n.º 21
0
 def get_document_by_id(self, object_id):
     """
     metodo que retorna o documento procurando pelo object_id.
     """
     register_connections()
     with switch_db(self.model_class, OPAC_PROC_LOGS_DB_NAME):
         return self.model_class.objects.get(id=object_id)
Ejemplo n.º 22
0
def main():
    register_connection(alias="default", name=TARGET_DB_NAME)

    with switch_db(Shred, "default") as TargetShred, \
         switch_db(Cluster, "default") as TargetCluster:

        client = pymongo.MongoClient()
        source_db = client[SOURCE_DB_NAME]

        for src_shred in source_db.shreds.find({}):
            new_shred, cluster = transform_shred(src_shred)

            shred_obj = TargetShred.objects.create(**new_shred)
            cluster_member = ClusterMember(shred=shred_obj, position=[0, 0],
                                           angle=0)
            cluster['members'] = [cluster_member]
            cluster_obj = TargetCluster.objects.create(**cluster)
Ejemplo n.º 23
0
 def test_run_yaml(self):
     """
     Test loads in the data correctly from a directory with a yaml format config file
     """
     load_from_config.load_in_dir(path_string_yaml)
     with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
         query = TestEx.objects.all()
         self.assertEqual(len(query), 1)
         self.document_compare(query.first(), expected_experiment_yaml)
     with switch_db(DataSource, TEST_DB_ALIAS) as TestDs:
         query = TestDs.objects.all()
         self.assertEqual(len(query), 1)
         self.document_compare(query.first(), expected_datasource_yaml)
     with switch_db(Genotype, TEST_DB_ALIAS) as TestGen:
         query = TestGen.objects.all()
         self.assertEqual(len(query), 1)
         self.document_compare(query.first(), expected_genotype_yaml)
Ejemplo n.º 24
0
 def get_transform_model_instance(self, query_dict):
     # recuperamos uma instância do transform_model_class
     # correspondente com a **query_dict dict.
     # caso não exista, levantamos uma exeção por não ter o dado fonte
     with switch_db(self.transform_model_class, OPAC_PROC_DB_NAME) as transform_model_class:
         logger.debug(u'recuperando modelo: %s' % self.transform_model_name)
         self.transform_model_instance = transform_model_class.objects(**query_dict).first()
         logger.debug(u'modelo %s encontrado. query_dict: %s' % (self.transform_model_name, query_dict))
Ejemplo n.º 25
0
 def tearDown(self):
     with switch_db(_Account, 'test') as Account:
         Account.drop_collection()
     a = AccountService.find_out_account("2014090801", '123456')
     if a is None:
         _Account(work_id="2014090801", username="******", email="*****@*****.**",
                  cell_phone_number="13303030033",
                  password=md5(bytes('123456', 'utf8')).hexdigest().upper()).save()
Ejemplo n.º 26
0
 def test_undoes_db_changes_when_error_1(self):
     """
     Tests that load_from_config.run() removes all documents it saved to the database when it comes
     across an unexpected error
     """
     json_config = open(path_string_json_full, 'w')
     json_config.write(breaking_json)
     json_config.close()
     with self.assertRaises(KeyError):
         load_from_config.run()
     with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
         query = TestEx.objects.all()
         self.assertEqual(len(query), 0)
     with switch_db(DataSource, TEST_DB_ALIAS) as TestDs:
         query = TestDs.objects.all()
         self.assertEqual(len(query), 0)
     with switch_db(Genotype, TEST_DB_ALIAS) as TestGen:
         query = TestGen.objects.all()
         self.assertEqual(len(query), 0)
Ejemplo n.º 27
0
 def test_fetch_or_save_id(self):
     """
     Tests fetch_or_save() fetches correctly when using db id
     """
     expected_experi_model.switch_db(TEST_DB_ALIAS)
     expected_experi_model.save()
     experi_id = expected_experi_model.id
     with switch_db(Experiment, TEST_DB_ALIAS) as TestEx:
         actual_experi = fetch_or_save(TestEx, TEST_DB_ALIAS, id=experi_id)
     self.assertEqual((expected_experi_model, False), actual_experi)
Ejemplo n.º 28
0
 def do_delete_all(self):
     register_connections()
     with switch_db(self.model_class, OPAC_WEBAPP_DB_NAME):
         try:
             self.model_class.objects.delete()
         except Exception as e:
             traceback_str = traceback.format_exc()
             self._trigger_messages(is_error=True, exception_obj=e, traceback_str=traceback_str)
         else:
             self._trigger_messages()
Ejemplo n.º 29
0
 def test_experiment_query_1(self):
     """
     Test csv of Experiment documents was correctly loaded into database
     """
     with switch_db(Experiment, TEST_DB_ALIAS) as test_db:
         actual_model = test_db.objects.get(name="What is up")
     self.assertEqual(expected_experi_model.name, actual_model.name)
     self.assertEqual(expected_experi_model.pi, actual_model.pi)
     self.assertEqual(expected_experi_model.createddate, actual_model.createddate)
     self.assertEqual(expected_experi_model.description, actual_model.description)
     self.assertEqual(expected_experi_model.createdby, actual_model.createdby)
Ejemplo n.º 30
0
 def test_data_source_query_1(self):
     """
     Test csv of DataSource documents was correctly loaded into database
     """
     with switch_db(DataSource, TEST_DB_ALIAS) as test_db:
         actual_model = test_db.objects.get(name="What is up")
     self.assertEqual(expected_ds_model.name, actual_model.name)
     self.assertEqual(expected_ds_model.source, actual_model.source)
     self.assertEqual(expected_ds_model.supplier, actual_model.supplier)
     self.assertEqual(expected_ds_model.supplieddate, actual_model.supplieddate)
     self.assertEqual(expected_ds_model.is_active, actual_model.is_active)
Ejemplo n.º 31
0
 def get(self, request):
     start_time, end_time, count = self.process_get_params(request)
     params = {
         'site_url': self.media_account.site_url,
         'date__gte': str(start_time.date()),
         'date__lt': str(end_time.date())
     }
     from mongoengine import context_managers
     from statistics.models import DailyMedia
     with context_managers.switch_db(DailyMedia,
                                     'statistics') as DailyMedia:
         stats = DailyMedia.objects(**params).order_by('-id')
     if not stats:
         return {}
     show_counts, read_counts, comment_counts, article_counts = \
             izip(*((stat.rec_count, stat.uclick_count, stat.comment_count, stat.new_count) for stat in stats))
     return dict(
         show_count=sum(stat_utils.normalize_count(c) for c in show_counts),
         read_count=sum(stat_utils.normalize_count(c) for c in read_counts),
         comment_count=sum(
             stat_utils.normalize_count(c) for c in comment_counts),
         article_count=sum(
             stat_utils.normalize_count(c) for c in article_counts))
Ejemplo n.º 32
0
def del_block_bulk(bundle):
    with Mongo_Wrapper() as (alias, db_client):
        with switch_db(Block_db, alias) as Block_db_switch:
            for b in bundle:
                block_hash, log_handler = b
                try:
                    blocks_to_delete = Block_db_switch.objects(block_hash=block_hash)
                except (CursorNotFound):
                    disconnect(alias)
                    raise DatabaseQueryFailed("Database query failed for get block hashes")

                if log_handler is not None:
                    log_handler("[-] Attempting to delete block with hash: {} from database".format(block_hash.hex()))
                if len(blocks_to_delete) == 1:
                    if log_handler is not None:
                        log_handler("[-] Block is already added to database. Deleting")
                    block_to_delete = blocks_to_delete[0]
                    block_to_delete.delete()
                    if log_handler is not None:
                        log_handler("[-] Block deleted")
                else:
                    if log_handler is not None:
                        log_handler("[-] Block is not in database.")
Ejemplo n.º 33
0
    def handleMatch(self, m):
        _, _, file_type, file_id, wh, w, h = [m.group(i) for i in range(7)]

        # parse comments on wiki pages
        with switch_db(WikiFile, self.wiki_group) as _WikiFile:
            _wf = _WikiFile.objects(id=file_id).first()
        if _wf:
            self.wiki_files.append(_wf)
            if file_type == 'image':
                el = render_wiki_image(self.wiki_group,
                                       file_id,
                                       _wf.name,
                                       tostring=False)
                if w is not None and int(w) != 0:
                    el.attrib['width'] = w
                if h is not None and int(h) != 0:
                    el.attrib['height'] = h
            elif file_type == 'file' and wh is None:
                el = render_wiki_file(self.wiki_group,
                                      file_id,
                                      _wf.name,
                                      tostring=False)
            return el
Ejemplo n.º 34
0
    def test_multiple_connections(self):
        db = MongoEngine()
        self.app.config['TESTING'] = True
        self.app.config['MONGODB_SETTINGS'] = [
            {
             "ALIAS": "default",
             "DB":    'my_db1',
             "HOST": 'localhost',
             "PORT": 27017
            },
            {
             "ALIAS": "my_db2",
             "DB": 'my_db2',
             "HOST": 'localhost',
             "PORT": 27017
            },
        ]
        class Todo(db.Document):
            title = db.StringField(max_length=60)
            text = db.StringField()
            done = db.BooleanField(default=False)
            meta = {"db_alias": "my_db2"}

        db.init_app(self.app)
        Todo.drop_collection()

        # Switch DB
        from mongoengine.context_managers import switch_db
        with switch_db(Todo, 'default') as Todo:
            todo = Todo()
            todo.text = "Sample"
            todo.title = "Testing"
            todo.done = True
            s_todo = todo.save()

            f_to = Todo.objects().first()
            self.assertEqual(s_todo.title, f_to.title)
Ejemplo n.º 35
0
def task_delete_selected_news(selected_uuids):
    """
        Task para apagar News Carregados.
        @param:
        - selected_uuids: lista de UUIDs dos documentos a serem removidos

        Se a lista `selected_uuids` for maior a SLICE_SIZE
            A lista será fatiada em listas de tamanho: SLICE_SIZE
        Se a lista `selected_uuids` for < a SLICE_SIZE
            Será feito uma delete direto no queryset
    """

    stage = 'load'
    model = 'news'
    model_class = LoadNews
    get_db_connection()
    r_queues = RQueues()
    SLICE_SIZE = 1000

    if len(selected_uuids) > SLICE_SIZE:
        list_of_list_of_uuids = list(chunks(selected_uuids, SLICE_SIZE))
        for list_of_uuids in list_of_list_of_uuids:
            uuid_as_string_list = [str(uuid) for uuid in list_of_uuids]
            r_queues.enqueue(stage, model, task_delete_selected_news,
                             uuid_as_string_list)
    else:
        # removemos o conjunto de documentos do LoadNews indicados pelos uuids
        documents_to_delete = model_class.objects.filter(
            uuid__in=selected_uuids)
        documents_to_delete.delete()

        # convertemos os uuid para _id e filtramos esses documentos no OPAC
        register_connections()
        opac_pks = [str(uuid).replace('-', '') for uuid in selected_uuids]
        with switch_db(opac_models.News, OPAC_WEBAPP_DB_NAME) as opac_model:
            selected_opac_records = opac_model.objects.filter(pk__in=opac_pks)
            selected_opac_records.delete()
Ejemplo n.º 36
0
def get_rt_articles_showable_data(articles):
    online_seq_ids = []
    langs = []
    articles_mapping = {}
    for article in articles:
        if not article.online_seq_id:
            continue
        online_seq_ids.append(str(article.online_seq_id))
        langs.append(article.language)
        articles_mapping[str(article.online_seq_id)] = {'article_id': str(article.id), 'lang': str(article.language)}
    if not online_seq_ids:
        return None
    from statistics.models import RealTimeArticle
    articles_showable_data = get_fixed_articles_showable_data_for_dict(online_seq_ids, set(langs),
            articles_mapping)
    with context_managers.switch_db(RealTimeArticle, 'statistics') as RealTimeArticle:
        rt_stat = RealTimeArticle.objects(online_seq_id__in=online_seq_ids,
                lang__in=set(langs))

    for rt_stat_one in rt_stat:
        if not articles_mapping[str(rt_stat_one.online_seq_id)]['lang'] == rt_stat_one.lang:
            continue
        key = articles_mapping[str(rt_stat_one.online_seq_id)]['article_id']
        if not articles_showable_data.get(key):
            articles_showable_data[key] = {
                    'show_count': 0,
                    'read_count': 0,
                    'comment_count': 0,
                    'share_count': 0,
                    'favorite_count': 0
                }
        articles_showable_data[key]['show_count'] += normalize_count(rt_stat_one.rec_count)
        articles_showable_data[key]['read_count'] += normalize_count(rt_stat_one.read_count)
        articles_showable_data[key]['comment_count'] += normalize_count(rt_stat_one.comment_count)
        articles_showable_data[key]['share_count'] += normalize_count(rt_stat_one.share_count)
        articles_showable_data[key]['favorite_count'] += normalize_count(rt_stat_one.favorite_count)
    return articles_showable_data
    def test_switch_db_context_manager(self):
        connect("mongoenginetest")
        register_connection("testdb-1", "mongoenginetest2")

        class Group(Document):
            name = StringField()

        Group.drop_collection()

        Group(name="hello - default").save()
        assert 1 == Group.objects.count()

        with switch_db(Group, "testdb-1") as Group:

            assert 0 == Group.objects.count()

            Group(name="hello").save()

            assert 1 == Group.objects.count()

            Group.drop_collection()
            assert 0 == Group.objects.count()

        assert 1 == Group.objects.count()
    def test_switch_db_context_manager(self):
        connect('mongoenginetest')
        register_connection('testdb-1', 'mongoenginetest2')

        class Group(Document):
            name = StringField()

        Group.drop_collection()

        Group(name="hello - default").save()
        self.assertEqual(1, Group.objects.count())

        with switch_db(Group, 'testdb-1') as Group:

            self.assertEqual(0, Group.objects.count())

            Group(name="hello").save()

            self.assertEqual(1, Group.objects.count())

            Group.drop_collection()
            self.assertEqual(0, Group.objects.count())

        self.assertEqual(1, Group.objects.count())
Ejemplo n.º 39
0
def del_tx_reference(address, tx_reference, log_handler=None):
    tx_hash = bytes(tx_reference[0])
    tx_index = tx_reference[1]
    with Mongo_Wrapper() as (alias, db_client):
        with switch_db(TxReference_db, alias) as TxReference_db_switch:
            if log_handler is not None:
                log_handler("[-] Attempting to delete tx reference: {}:{}".format(tx_hash.hex(), tx_index))

            try:
                tx_reference_docs = TxReference_db_switch.objects(tx_hash=tx_hash, tx_index=tx_index)
            except (CursorNotFound):
                disconnect(alias)
                raise DatabaseQueryFailed("Database query failed for tx reference {}:{} with address: {}".format(tx_hash.hex(), tx_index, address.hex()))

            if len(tx_reference_docs) == 1:
                tx_reference_doc = tx_reference_docs[0]
                if log_handler is not None:
                    log_handler("[-] Tx reference {}:{} is already added to database".format(tx_hash.hex(), tx_index))
                tx_reference_doc.delete()
                if log_handler is not None:
                    log_handler("[-] Tx reference {}:{} deleted".format(tx_hash.hex(), tx_index))
            else:
                if log_handler is not None:
                    log_handler("[-] Tx reference {}:{} is not in database".format(tx_hash.hex(), tx_index))
Ejemplo n.º 40
0
    def update_content(self, group, md, html, toc):
        """Update page content and make other changes accordingly.
        
        :param group: group name (no whitespace)
        :param md: markdown
        :param html: html rendered from `md`
        :param toc: table of contents generated based on headings in `md`
        """
        self.html = html
        self.toc = toc
        diff = unified_diff.make_patch(self.md, md)
        if diff:
            pv = WikiPageVersion(diff, self.current_version, self.modified_on,
                                 self.modified_by).switch_db(group).save()
            self.versions.append(pv)
            self.md = md
            self.modified_on = datetime.now()
            self.modified_by = current_user.name
            self.current_version += 1

            with switch_db(WikiCache, group) as _WikiCache:
                _cache = _WikiCache.objects.only('changes_id_title').first()
                _cache.add_changed_page(self.id, self.title, self.modified_on)
        self.save()
Ejemplo n.º 41
0
def get_fixed_articles_showable_data_for_dict(online_seq_ids, langs, articles_mapping):
    from statistics.models import DailyArticle
    with context_managers.switch_db(DailyArticle, 'statistics') as DailyArticle:
        stats = DailyArticle.objects(online_seq_id__in=online_seq_ids,
                lang__in=langs)
    fixed_articles_showable_data = {}
    for stat in stats:
        if not articles_mapping[str(stat.online_seq_id)]['lang'] == stat.lang:
            continue
        key = articles_mapping[str(stat.online_seq_id)]['article_id']
        if not fixed_articles_showable_data.get(key):
            fixed_articles_showable_data[key] = {
                    'show_count': 0,
                    'read_count': 0,
                    'comment_count': 0,
                    'share_count': 0,
                    'favorite_count': 0
                }
        fixed_articles_showable_data[key]['show_count'] += normalize_count(stat.rec_count)
        fixed_articles_showable_data[key]['read_count'] += normalize_count(stat.uclick_count)
        fixed_articles_showable_data[key]['comment_count'] += normalize_count(stat.comment_count)
        fixed_articles_showable_data[key]['share_count'] += normalize_count(stat.share_count)
        fixed_articles_showable_data[key]['favorite_count'] += normalize_count(stat.favorite_count)
    return fixed_articles_showable_data
Ejemplo n.º 42
0
 def create(self, *args, **kwargs):
     using = kwargs.pop('_using', None)
     if using:
         with switch_db(self._document, using) as cls:
             return cls(*args, **kwargs).save()
     return super(FixMultiDbQset, self).create(*args, **kwargs)
Ejemplo n.º 43
0
 def test_experiment_query_2(self):
     with switch_db(Experiment, TEST_DB_ALIAS) as test_db:
         with self.assertRaises(test_db.DoesNotExist):
             test_db.objects.get(name="Wort is up")
Ejemplo n.º 44
0
 def do_delete_all(self):
     register_connections()
     with switch_db(self.model_class, OPAC_PROC_LOGS_DB_NAME):
         super(TransformLogListView, self).do_delete_all()
Ejemplo n.º 45
0
 def get_sessions(hyperstream):
     with switch_db(SessionModel, "hyperstream"):
         for s in SessionModel.objects():
             yield Session(hyperstream=hyperstream, model=s)
Ejemplo n.º 46
0
    def load_workflow(self, workflow_id):
        """
        Load workflow from the database and store in memory
        :param workflow_id: The workflow id
        :return: The workflow
        """
        with switch_db(WorkflowDefinitionModel, db_alias='hyperstream'):
            workflow_definition = WorkflowDefinitionModel.objects.get(
                workflow_id=workflow_id)
            if not workflow_definition:
                logging.warn(
                    "Attempted to load workflow with id {}, but not found".
                    format(workflow_id))

            workflow = Workflow(workflow_id=workflow_id,
                                name=workflow_definition.name,
                                description=workflow_definition.description,
                                owner=workflow_definition.owner,
                                online=workflow_definition.online,
                                monitor=workflow_definition.monitor)

            for n in workflow_definition.nodes:
                workflow.create_node(
                    stream_name=n.stream_name,
                    channel=self.channel_manager.get_channel(n.channel_id),
                    plates=[self.plate_manager.plates[p] for p in n.plate_ids])

            for f in workflow_definition.factors:
                source_nodes = [
                    workflow.nodes[node_id] for node_id in f.sources
                ] if f.sources else []
                sink_nodes = [workflow.nodes[node_id]
                              for node_id in f.sinks] if f.sinks else []
                alignment_node = workflow.nodes[
                    f.alignment_node] if f.alignment_node else None
                splitting_node = workflow.nodes[
                    f.splitting_node] if f.splitting_node else None
                output_plate = f.output_plate

                parameters = Tool.parameters_from_model(f.tool.parameters)
                # tool = dict(name=f.tool.name, parameters=parameters)
                tool = self.channel_manager.get_tool(f.tool.name,
                                                     parameters,
                                                     version=None)

                if f.factor_type == "Factor":
                    if len(sink_nodes) != 1:
                        raise ValueError(
                            "Standard factors should have a single sink node, received {}"
                            .format(len(sink_nodes)))

                    if splitting_node is not None:
                        raise ValueError(
                            "Standard factors do not support splitting nodes")

                    if output_plate is not None:
                        raise ValueError(
                            "Standard factors do not support output plates")

                    workflow.create_factor(tool=tool,
                                           sources=source_nodes,
                                           sink=sink_nodes[0],
                                           alignment_node=alignment_node)

                elif f.factor_type == "MultiOutputFactor":
                    if len(source_nodes) > 1:
                        raise ValueError(
                            "MultiOutputFactor factors should have at most one source node, received {}"
                            .format(len(source_nodes)))

                    if len(sink_nodes) != 1:
                        raise ValueError(
                            "MultiOutputFactor factors should have a single sink node, received {}"
                            .format(len(sink_nodes)))

                    if alignment_node is not None:
                        raise ValueError(
                            "MultiOutputFactor does not support alignment nodes"
                        )

                    if output_plate is not None:
                        raise ValueError(
                            "MultiOutputFactor does not support output plates")

                    workflow.create_multi_output_factor(
                        tool=tool,
                        source=source_nodes[0] if source_nodes else None,
                        splitting_node=splitting_node,
                        sink=sink_nodes[0])

                elif f.factor_type == "NodeCreationFactor":
                    if len(source_nodes) > 1:
                        raise ValueError(
                            "NodeCreationFactor factors should no more than one source node, received {}"
                            .format(len(source_nodes)))

                    if len(sink_nodes) != 0:
                        raise ValueError(
                            "NodeCreationFactor factors should not have sink nodes"
                            .format(len(sink_nodes)))

                    if output_plate is None:
                        raise ValueError(
                            "NodeCreationFactor requires an output plate definition"
                        )

                    workflow.create_node_creation_factor(
                        tool=tool,
                        source=source_nodes[0] if source_nodes else None,
                        output_plate=output_plate.to_mongo().to_dict(),
                        plate_manager=self.plate_manager)

                else:
                    raise NotImplementedError(
                        "Unsupported factor type {}".format(f.factor_type))

            self.add_workflow(workflow, False)
            return workflow
Ejemplo n.º 47
0
 def register(self, user_name, user_password, user_email):
     # try:
     master_exists = self.is_user_exists(user_name)
     local_ip = host_controller().get_local_host_ip()
     print(local_ip)
     if master_exists == True:
         # print("注册失败,用户已存在")
         return {
             "result": {
                 "resultCode": "20000299",
                 "resultMessage": "用户注册失败,用户已存在"
             }
         }
     else:
         if master_ip == "127.0.0.1":
             with switch_db(user_info, 'local'):
                 new_user = user_info(
                     user_name=user_name,
                     user_password=generate_password_hash(user_password),
                     user_token=self.generate_auth_token(user_name).decode("utf-8"),
                     user_email=user_email,
                     user_role=1,
                     user_from=local_ip,
                     create_time=datetime.datetime.now(),
                     update_time=datetime.datetime.now()
                 )
                 print("local")
                 new_user.save()
             return {
                 "result": {
                     "resultCode": "20000200",
                     "resultMessage": "用户注册成功"
                 }
             }
         else:
             with switch_db(user_info, 'local'):
                 new_user = user_info(
                     user_name=user_name,
                     user_password=generate_password_hash(user_password),
                     user_token=self.generate_auth_token(user_name).decode("utf-8"),
                     user_role=1,
                     user_from=local_ip,
                     create_time=datetime.datetime.now(),
                     update_time=datetime.datetime.now()
                 )
                 print("local")
                 new_user.save()
             with switch_db(user_info, 'remote'):
                 a_user = user_info(
                     user_name=user_name,
                     user_password=generate_password_hash(user_password),
                     user_token=self.generate_auth_token(user_name).decode("utf-8"),
                     user_role=1,
                     user_from=local_ip,
                     create_time=datetime.datetime.now(),
                     update_time=datetime.datetime.now()
                 )
                 a_user.save()
             return {
                 "result": {
                     "resultCode": "20000200",
                     "resultMessage": "用户注册成功"
                 }
             }
Ejemplo n.º 48
0
 def delete(self):
     with switch_db(Session, self.database) as _Session:
         sess = _Session.objects(auth_key=self._auth_key.key).first()
         if sess:
             sess.delete()
    home_page_c = get_html_text('mock_html/cctv/home.html')
    mock_request.get('http://english.cctv.com/', text=home_page_c)

    article_1_c = get_html_text('mock_html/cctv/main-article.html')
    mock_request.get('http://english.cctv.com/main-article.html', text=article_1_c)

    article_2_c = get_html_text('mock_html/cctv/side-article.html')
    mock_request.get('http://english.cctv.com/side-article.html', text=article_2_c)

    home_page_d = get_html_text('mock_html/dw/home.html')
    mock_request.get('https://www.dw.com/en/top-stories/s-9097', text=home_page_d)

    article_1_d = get_html_text('mock_html/dw/main-article.html')
    mock_request.get('https://www.dw.com/en/top-stories/s-9097/main-article', text=article_1_d)

    article_2_d = get_html_text('mock_html/dw/side-article.html')
    mock_request.get('https://www.dw.com/en/top-stories/s-9097/side-article', text=article_2_d)

    plotly.plotly.plot = MagicMock(return_value=None)

def tearDownModule():
    Country.drop_collection()
    Article.drop_collection()

if __name__ == '__main__':
    connect('test-db', alias='test')
    with requests_mock.Mocker() as mock_request:
        with switch_db(Article, 'test') as Article:
            with switch_db(Country, 'test') as Country:
                unittest.main()
Ejemplo n.º 50
0
                           class_id=class_id,
                           date=get_date()):
            remarks_info = Remarks.objects(class_num=class_num,
                                           class_id=class_id,
                                           date=get_date()).first()
            return remarks_info['remarks']
        else:
            return '无'


if __name__ == '__main__':
    while True:
        time.sleep(10)
        students_connect_info = {}  # 所有学生的签到信息

        with switch_db(Student, 'local_db') as Student:
            students = Student.objects(class_id=class_id)
            for student in students:
                student_connect_info = {}  # 单个学生签到信息
                student_address_mac = student['address_mac']
                student_name = student['name']
                student_id = student['student_id']
                student_connect_info['student_id'] = student_id
                student_connect_info['name'] = student_name
                student_connect_info['class_id'] = class_id
                student_connect_info['address_mac'] = student_address_mac
                student_connect_info['status'] = '0'
                students_connect_info[
                    student_address_mac] = student_connect_info

        macs = get_macs()
Ejemplo n.º 51
0
 def get_entity_rows_by_phone(self, phone):
     with switch_db(Entity, self.database) as _Entity:
         return _Entity.objects(phone=phone)
Ejemplo n.º 52
0
 def query_by_pi(self):
     self.search_term = self.request.GET['search_pi'].strip()
     with switch_db(Experiment, self.db_alias) as db:
         query = db.objects if self.search_list is None else self.search_list
         self.search_list = query.filter(
             __raw__=self.raw_query_dict("pi", self.search_term))
Ejemplo n.º 53
0
 def list_sessions(cls):
     with switch_db(Session, self.database) as _Session:
         return _Session.objects
Ejemplo n.º 54
0
 def test_data_source_query_2(self):
     with switch_db(DataSource, TEST_DB_ALIAS) as test_db:
         with self.assertRaises(test_db.DoesNotExist):
             test_db.objects.get(name="Wort is up")
Ejemplo n.º 55
0
    def commit_workflow(self, workflow_id):
        """
        Commit the workflow to the database
        :param workflow_id: The workflow id
        :return: None
        """
        # TODO: We should also be committing the Stream definitions if there are new ones

        workflow = self.workflows[workflow_id]

        with switch_db(WorkflowDefinitionModel, "hyperstream"):
            workflows = WorkflowDefinitionModel.objects(
                workflow_id=workflow_id)
            if len(workflows) > 0:
                logging.warn(
                    "Workflow with id {} already exists in database".format(
                        workflow_id))
                return

            factors = []
            for f in workflow.factors:
                tool = f.tool.get_model()

                if isinstance(f, Factor):
                    sources = [s.node_id
                               for s in f.sources] if f.sources else []
                    sinks = [f.sink.node_id]
                    alignment_node = f.alignment_node.node_id if f.alignment_node else None
                    splitting_node = None
                    output_plate = None

                elif isinstance(f, MultiOutputFactor):
                    sources = [f.source.node_id] if f.source else []
                    sinks = [f.sink.node_id]
                    alignment_node = None
                    splitting_node = f.splitting_node.node_id if f.splitting_node else None
                    output_plate = None

                elif isinstance(f, NodeCreationFactor):
                    sources = [f.source.node_id] if f.source else []
                    sinks = []
                    alignment_node = None
                    splitting_node = None
                    output_plate = f.output_plate

                else:
                    raise NotImplementedError("Unsupported factor type")

                if output_plate:
                    output_plate_copy = deepcopy(output_plate)
                    if 'parent_plate' in output_plate_copy:
                        del output_plate_copy['parent_plate']
                else:
                    output_plate_copy = None

                factor = FactorDefinitionModel(
                    tool=tool,
                    factor_type=f.__class__.__name__,
                    sources=sources,
                    sinks=sinks,
                    alignment_node=alignment_node,
                    splitting_node=splitting_node,
                    output_plate=output_plate_copy)

                factors.append(factor)

            nodes = []
            for n in workflow.nodes.values():
                nodes.append(
                    NodeDefinitionModel(stream_name=n.node_id,
                                        plate_ids=n.plate_ids,
                                        channel_id=n._channel.channel_id))

            workflow_definition = WorkflowDefinitionModel(
                workflow_id=workflow.workflow_id,
                name=workflow.name,
                description=workflow.description,
                nodes=nodes,
                factors=factors,
                owner=workflow.owner,
                online=workflow.online,
                monitor=workflow.monitor)

            workflow_definition.save()

        with switch_db(WorkflowStatusModel, db_alias='hyperstream'):
            workflow_status = WorkflowStatusModel(
                workflow_id=workflow.workflow_id,
                last_updated=utcnow(),
                last_accessed=utcnow(),
                requested_intervals=[])

            workflow_status.save()

        self.uncommitted_workflows.remove(workflow_id)
        logging.info("Committed workflow {} to database".format(workflow_id))
Ejemplo n.º 56
0
 def test_experiment_query_3(self):
     with switch_db(Experiment, TEST_DB_ALIAS) as test_db:
         with self.assertRaises(test_db.MultipleObjectsReturned):
             test_db.objects.get(description="Hey man")
Ejemplo n.º 57
0
 def get_entity_rows_by_name(self, name):
     with switch_db(Entity, self.database) as _Entity:
         return _Entity.objects(name=name)
Ejemplo n.º 58
0
 def do_delete_selected(self, ids):
     register_connections()
     with switch_db(self.model_class, OPAC_PROC_LOGS_DB_NAME):
         super(LoadLogListView, self).do_delete_selected(ids)
Ejemplo n.º 59
0
 def get_objects(self):
     register_connections()
     with switch_db(self.model_class, OPAC_PROC_LOGS_DB_NAME):
         objects = super(LoadLogListView, self).get_objects()
         return objects.order_by('-time')
    def make_package(self):
        try:
            # Create .kit package
            self.logger.info('Creating .kit package')
            root_dir = 'temp'
            local_directory = f'{root_dir}/{self.project_id}/'
            local_files_directory = f'{local_directory}files/'
            local_data_directory = f'{local_directory}data/'
            compressed_file = f'{root_dir}/{self.project_id}-v{self.publish_stats.version}.kit'

            # Download production files
            runtime = self.copy(
                f's3://{self.buckets.production}/{self.prod_project_dir}/v{self.publish_stats.version}/',
                local_files_directory,
                action='cp',
                throw=True,
                recursive=True)

            # TODO: Delete dynamic .html and .html.dl files from this directory
            # TODO: Remove IsArchived = true files
            self.logger.info(f'Downloaded production to local in {runtime}s')

            if not os.path.exists(local_data_directory):
                os.mkdir(local_data_directory)

            # Add Routing
            routing_query = [
                os.environ.get('PROD_RESOURCE_COLLECTION_NAME',
                               'new_KitsuneResourcesProduction'),
                json.dumps({'ProjectId': self.project_id})
            ]
            routing_response = requests.post(os.environ['ROUTING_API'],
                                             data='\n'.join(routing_query))

            trees = {}
            if routing_response.ok:
                trees = routing_response.json()

            if trees == {}:
                self.logger.error('No routes found')
            with open(f'{local_data_directory}routes.json',
                      'w') as routes_file:
                self.logger.info('.kit: adding routes')
                json.dump(trees, routes_file)

            # Add Resources
            with open(f'{local_data_directory}resources.json',
                      'w') as resources_file:
                resources: QuerySet = KitsuneResourcesProduction.objects(
                    project_id=self.project_id)
                self.logger.info('.kit: adding resources')
                json.dump({'data': json.loads(resources.to_json())},
                          resources_file)

            # Add Website (User + Website + DNS + Project)
            with open(f'{local_data_directory}websites.json',
                      'w') as websites_file:
                website_data = list(
                    get_website_data(self.publish_stats.CustomerIds))
                self.logger.info('.kit: adding websites')
                json.dump({'data': website_data}, websites_file)

            try:
                # Add Schema Definition
                if self.project.schema_id:
                    with switch_db(KitsuneLanguage,
                                   'schema-db') as KitsuneSchema:
                        schema = json.loads(
                            KitsuneSchema.objects(
                                id=self.project.schema_id).first().to_json())
                        with open(f'{local_data_directory}schema.json',
                                  'w') as schema_file:
                            self.logger.info('.kit: adding schema')
                            json.dump({'data': schema}, schema_file)
                else:
                    self.logger.info(
                        f'Project: {self.project_id} has no schema')
            except Exception as e:
                self.logger.warning(
                    f'Unable to get schema with id {self.project.schema_id} due to: {e}'
                )

            # Add Manifest
            with open(f'{local_directory}manifest.json', 'w') as manifest_file:
                self.logger.info('.kit: adding manifest')
                components = self.project[
                    'Components'] if 'Components' in self.project else []

                manifest = {
                    'ManifestVersion': '1.0.0',
                    'KitsuneRuntime': '1.0.0',
                    'Components': components,
                    'CreatedOn':
                    datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
                    'Developer': self.project.user_email
                }

                json.dump(manifest, manifest_file)

            # Add to zip
            with zipfile.ZipFile(compressed_file, 'w',
                                 zipfile.ZIP_DEFLATED) as zip_file:
                directory_zip(local_directory, '.', zip_file)

            # Copy to Prod / Packages
            runtime = self.copy(
                compressed_file,
                f's3://{self.buckets.production}/{self.prod_project_dir}/packages/',
                action='cp',
                throw=True)
            self.logger.info(
                f'Copied .kit to production/packages in {runtime}s')

            # Remove local files
            shutil.rmtree(root_dir)

            # Encrypt Message
            dotkit_package_url = f'https://s3.{os.environ["AWS_BUCKET_REGION"]}.amazonaws.com/' \
                                 f'{self.buckets.production}/{self.project_id}/packages/' \
                                 f'{self.project_id}-v{self.publish_stats.version}.kit'

            deployment_message = {
                'kitfileUrl': dotkit_package_url,
                'providerSettings': self.cloud_config,
            }

            # Send to Cloud Orchestrator SQS
            sqs_client = boto3.client(
                'sqs', region_name=os.environ['AWS_ORCH_QUEUE_REGION'])
            response = sqs_client.send_message(
                QueueUrl=os.environ['ORCHESTRATOR_QUEUE_URL'],
                MessageBody=json.dumps(deployment_message))
            self.logger.info(f'Sent to orchestrator queue: {response}')
        except Exception as e:
            self.logger.error(f'Error in creating .kit package due to {e}')
            raise e