Beispiel #1
0
    def import_obj(cls, slc_to_import, slc_to_override, import_time=None):
        """Inserts or overrides slc in the database.

        remote_id and import_time fields in params_dict are set to track the
        slice origin and ensure correct overrides for multiple imports.
        Slice.perm is used to find the datasources and connect them.

        :param Slice slc_to_import: Slice object to import
        :param Slice slc_to_override: Slice to replace, id matches remote_id
        :returns: The resulting id for the imported slice
        :rtype: int
        """
        session = db.session
        make_transient(slc_to_import)
        slc_to_import.dashboards = []
        slc_to_import.alter_params(
            remote_id=slc_to_import.id, import_time=import_time)

        slc_to_import = slc_to_import.copy()
        params = slc_to_import.params_dict
        slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
            session, slc_to_import.datasource_type, params['datasource_name'],
            params['schema'], params['database_name']).id
        if slc_to_override:
            slc_to_override.override(slc_to_import)
            session.flush()
            return slc_to_override.id
        session.add(slc_to_import)
        logging.info('Final slice: {}'.format(slc_to_import.to_json()))
        session.flush()
        return slc_to_import.id
Beispiel #2
0
 def cash_instances_to_dict(self, list_of_classes, do_make_transient = False):
     #Закрываем сессию. Открываем новую и считываем в нее все классы. Отвязываем их от сессии.
     #Возвращаем список инстансов в виде словаря. Надеюсь, это поможет работать с ними сколь угодно много..
     #Была идея оставить возможность не закрывать сесиию - отказался. В худшем случае, можно отдельную сессию создавать.
     #Но две одновременные сессии - тоже опасно.
     self.close_session()
     self.private_activate_session()
     dict_with_instances = dict()
     for cls_i in list_of_classes:  #Интересно, нужно ли как-то особо считывать взаимосвязи
         repr_cls_i = with_polymorphic(cls_i, '*')
         inst_list = []
         for inst_i in self.active_session.query(repr_cls_i).options(immediateload('*')).all():
             #if not(inst_i in inst_list):
             inst_list.append(inst_i)
         dict_with_instances[cls_i.__name__] = inst_list
     self.active_session.expunge_all() #именно поэтому закрываем сессию до запуска
     for inst_list in dict_with_instances.itervalues():
         for inst_i in inst_list:
             if hasattr(inst_i, "disconnected_from_session"):
                 raise BaseException("[c_session_handler][cash_instances_to_dict] you cannot use 'disconnected_from_session' attribute in a class here")
             inst_i.disconnected_from_session = True
             if do_make_transient:  #Без этого может пытаться обратиться к базе данных
                 make_transient(inst_i)
     self.close_session()
     return dict_with_instances
Beispiel #3
0
 def _copy(self):
     schema = NavigationCopySchema().bind(request=self.request)
     try:
         controls = schema.deserialize(self.request.params)
         (
             DBSession.query(Navigation)
             .filter(
                 Navigation.condition_position_id(
                     controls.get('position_id')
                 )
             )
             .delete()
         )
         navigations_from = (
             DBSession.query(Navigation)
             .filter(
                 Navigation.condition_position_id(
                     controls.get('from_position_id')
                 )
             )
         )
         for navigation in navigations_from:
             make_transient(navigation)
             navigation.id = None
             navigation.position_id = controls.get('position_id')
             DBSession.add(navigation)
         return {'success_message': _(u'Copied')}
     except colander.Invalid, e:
         return {
             'error_message': _(u'Please, check errors'),
             'errors': e.asdict()
         }
Beispiel #4
0
def copy_from_position(source_position_id, target_position_id):
    assert isinstance(source_position_id, int), \
        u"Integer expected"
    assert isinstance(target_position_id, int), \
        u"Integer expected"

    (
        DBSession.query(Permision)
        .filter(
            Permision.condition_position_id(target_position_id)
        )
        .delete()
    )
    permisions_from = (
        DBSession.query(Permision)
        .filter(
            Permision.condition_position_id(source_position_id)
        )
    )
    for permision in permisions_from:
        make_transient(permision)
        permision.id = None
        permision.position_id = target_position_id
        DBSession.add(permision)
    return
Beispiel #5
0
    def run(self):
        if statsd:
            statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
        # Adding an entry in the DB
        session = settings.Session()
        self.state = State.RUNNING
        if not self.id: 
            session.add(self)
        else:
            session.merge(self)
        session.commit()
        id_ = self.id
        make_transient(self)
        self.id = id_

        # Run
        self._execute()
        #better to submit tasks and poll for completion... 

        # Marking the success in the DB
        self.end_date = datetime.now()
        self.state = State.SUCCESS
        session.merge(self)
        session.commit()
        session.close()

        if statsd:
            statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
    def test_import_dashboard_1_slice(self):
        slc = self.create_slice('health_slc', id=10006)
        dash_with_1_slice = self.create_dashboard(
            'dash_with_1_slice', slcs=[slc], id=10002)
        dash_with_1_slice.position_json = """
            {{"DASHBOARD_VERSION_KEY": "v2",
              "DASHBOARD_CHART_TYPE-{0}": {{
                "type": "DASHBOARD_CHART_TYPE",
                "id": {0},
                "children": [],
                "meta": {{
                  "width": 4,
                  "height": 50,
                  "chartId": {0}
                }}
              }}
            }}
        """.format(slc.id)
        imported_dash_id = models.Dashboard.import_obj(
            dash_with_1_slice, import_time=1990)
        imported_dash = self.get_dash(imported_dash_id)

        expected_dash = self.create_dashboard(
            'dash_with_1_slice', slcs=[slc], id=10002)
        make_transient(expected_dash)
        self.assert_dash_equals(
            expected_dash, imported_dash, check_position=False)
        self.assertEquals({'remote_id': 10002, 'import_time': 1990},
                          json.loads(imported_dash.json_metadata))

        expected_position = dash_with_1_slice.position
        self.assertEquals(expected_position, imported_dash.position)
def copy_notification(id):
    notification = Notification.query.filter_by(id=id).first_or_404()
    desc = notification.description

    if notification.user != current_user and not current_user.is_admin():
        abort(403)

    notification.id = None
    notification.description = desc + ' Clone'
    make_transient(notification)

    db.session.add(notification)
    db.session.commit()

    old_settings = NotificationSetting.query.filter_by(notification_id=id).all()

    for s in old_settings:
        s.id = None
        s.notification_id = notification.id
        make_transient(s)
        db.session.add(s)

    db.session.commit()

    current_app.decoder.refresh_notifier(notification.id)

    flash('Notification cloned.', 'success')
    return redirect(url_for('notifications.index'))
Beispiel #8
0
    def test_import_dashboard_1_slice(self):
        slc = self.create_slice('health_slc', id=10006)
        dash_with_1_slice = self.create_dashboard(
            'dash_with_1_slice', slcs=[slc], id=10002)
        dash_with_1_slice.position_json = """
            [{{
                "col": 5,
                "row": 10,
                "size_x": 4,
                "size_y": 2,
                "slice_id": "{}"
            }}]
        """.format(slc.id)
        imported_dash_id = models.Dashboard.import_obj(
            dash_with_1_slice, import_time=1990)
        imported_dash = self.get_dash(imported_dash_id)

        expected_dash = self.create_dashboard(
            'dash_with_1_slice', slcs=[slc], id=10002)
        make_transient(expected_dash)
        self.assert_dash_equals(
            expected_dash, imported_dash, check_position=False)
        self.assertEquals({"remote_id": 10002, "import_time": 1990},
                          json.loads(imported_dash.json_metadata))

        expected_position = dash_with_1_slice.position_array
        expected_position[0]['slice_id'] = '{}'.format(
            imported_dash.slices[0].id)
        self.assertEquals(expected_position, imported_dash.position_array)
Beispiel #9
0
    def import_obj(cls, slc_to_import, import_time=None):
        """Inserts or overrides slc in the database.

        remote_id and import_time fields in params_dict are set to track the
        slice origin and ensure correct overrides for multiple imports.
        Slice.perm is used to find the datasources and connect them.
        """
        session = db.session
        make_transient(slc_to_import)
        slc_to_import.dashboards = []
        slc_to_import.alter_params(
            remote_id=slc_to_import.id, import_time=import_time)

        # find if the slice was already imported
        slc_to_override = None
        for slc in session.query(Slice).all():
            if ('remote_id' in slc.params_dict and
                    slc.params_dict['remote_id'] == slc_to_import.id):
                slc_to_override = slc

        slc_to_import = slc_to_import.copy()
        params = slc_to_import.params_dict
        slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
            session, slc_to_import.datasource_type, params['datasource_name'],
            params['schema'], params['database_name']).id
        if slc_to_override:
            slc_to_override.override(slc_to_import)
            session.flush()
            return slc_to_override.id
        session.add(slc_to_import)
        logging.info('Final slice: {}'.format(slc_to_import.to_json()))
        session.flush()
        return slc_to_import.id
Beispiel #10
0
def restore(request, coupon_id):
    if coupon_id:
        coupon_id = int(coupon_id)
        if request.user.is_focus:
            coupon = request.db_session.query(Coupon).filter_by(id=coupon_id, is_deleted=True).first()
        else:
            coupon = request.db_session.query(Coupon).filter_by(id=coupon_id, is_deleted=True, company_id=request.user.company_id).first()
        try:
            coupon.modified_by_id = request.user.id
            request.db_session.commit()
            request.db_session.expunge(coupon)

            make_transient(coupon)
            coupon.main_coupon_id = coupon.main_coupon_id if coupon.main_coupon_id else coupon.id
            coupon.id = None
            coupon.restore()

            request.db_session.add(coupon)
            request.db_session.commit()
            response = json_response_content('success', 'Coupon was successfully restored!')
        except Exception as e:
            request.db_session.rollback()
            response = json_response_content('error', 'There was an error during coupon restore: {0}'.format(str(e)))
        return JsonResponse(response)
    else:
        raise Http404
Beispiel #11
0
    def test_import_override_dashboard_2_slices(self):
        e_slc = self.create_slice('e_slc', id=10009, table_name='energy_usage')
        b_slc = self.create_slice('b_slc', id=10010, table_name='birth_names')
        dash_to_import = self.create_dashboard(
            'override_dashboard', slcs=[e_slc, b_slc], id=10004)
        imported_dash_id_1 = models.Dashboard.import_obj(
            dash_to_import, import_time=1992)

        # create new instances of the slices
        e_slc = self.create_slice(
            'e_slc', id=10009, table_name='energy_usage')
        b_slc = self.create_slice(
            'b_slc', id=10010, table_name='birth_names')
        c_slc = self.create_slice('c_slc', id=10011, table_name='birth_names')
        dash_to_import_override = self.create_dashboard(
            'override_dashboard_new', slcs=[e_slc, b_slc, c_slc], id=10004)
        imported_dash_id_2 = models.Dashboard.import_obj(
            dash_to_import_override, import_time=1992)

        # override doesn't change the id
        self.assertEquals(imported_dash_id_1, imported_dash_id_2)
        expected_dash = self.create_dashboard(
            'override_dashboard_new', slcs=[e_slc, b_slc, c_slc], id=10004)
        make_transient(expected_dash)
        imported_dash = self.get_dash(imported_dash_id_2)
        self.assert_dash_equals(
            expected_dash, imported_dash, check_position=False)
        self.assertEquals({"remote_id": 10004, "import_time": 1992},
                          json.loads(imported_dash.json_metadata))
    def test_transient_exception(self):
        """An object that goes from a pk value to transient/pending
        doesn't count as a "pk" switch.

        """

        users, Address, addresses, User = (self.tables.users,
                                self.classes.Address,
                                self.tables.addresses,
                                self.classes.User)

        mapper(User, users)
        mapper(Address, addresses, properties={'user':relationship(User)})

        sess = create_session()
        u1 = User(id=5, name='u1')
        ad1 = Address(email_address='e1', user=u1)
        sess.add_all([u1, ad1])
        sess.flush()

        make_transient(u1)
        u1.id = None
        u1.username='******'
        sess.add(u1)
        sess.flush()

        eq_(ad1.user_id, 5)

        sess.expire_all()
        eq_(ad1.user_id, 5)
        ne_(u1.id, 5)
        ne_(u1.id, None)
        eq_(sess.query(User).count(), 2)
Beispiel #13
0
def restore_provider_details(notify_db, notify_db_session):
    """
    We view ProviderDetails as a static in notify_db_session, since we don't modify it... except we do, we updated
    priority. This fixture is designed to be used in tests that will knowingly touch provider details, to restore them
    to previous state.

    Note: This doesn't technically require notify_db_session (only notify_db), but kept as a requirement to encourage
    good usage - if you're modifying ProviderDetails' state then it's good to clear down the rest of the DB too
    """
    existing_provider_details = ProviderDetails.query.all()
    existing_provider_details_history = ProviderDetailsHistory.query.all()
    # make transient removes the objects from the session - since we'll want to delete them later
    for epd in existing_provider_details:
        make_transient(epd)
    for epdh in existing_provider_details_history:
        make_transient(epdh)

    yield

    # also delete these as they depend on provider_details
    ProviderRates.query.delete()
    ProviderDetails.query.delete()
    ProviderDetailsHistory.query.delete()
    notify_db.session.commit()
    notify_db.session.add_all(existing_provider_details)
    notify_db.session.add_all(existing_provider_details_history)
    notify_db.session.commit()
Beispiel #14
0
def update_scorer(scorer):

    # detach from the session to load the database instance to compare
    scorer_id = scorer.id
    make_transient(scorer)
    scorer.id = scorer_id
    scorer_db = Scorer.query.get(scorer_id)
    if scorer_db.scorer == scorer.scorer:
        return
    # reattach the instance
    scorer = db.session.merge(scorer)

    bets = BetScorer.query.filter(or_(BetScorer.scorer1==scorer, BetScorer.scorer2==scorer))

    for bet in bets:
        if not scorer.scorer and bet.score == 0:
            continue
        if scorer.scorer:
            bet.score += SCORER_POINTS
        else:
            bet.score -= SCORER_POINTS

        # pass as param since the user.bet_scorer backref is None here
        update_total_score(bet.user, bet_scorer=bet)

    db.session.commit()
Beispiel #15
0
    def copy(self):
        db.session.expunge(self)

        make_transient(self)
        self.id = None
        db.session.add(self)
        db.session.flush()
        return self
Beispiel #16
0
def main():
    with sessionmanager as session:
        for x in session.query(NPriceList).filter_by(almacen_id=1):
            session.expunge(x)
            make_transient(x)
            x.pid = None
            x.almacen_id = 3
            session.add(x)
Beispiel #17
0
    def test_sched_a_fulltext_trigger(self):
        # Test create
        nml_row = self.NmlSchedAFactory(
            rpt_yr=2014,
            contbr_nm='Sheldon Adelson',
            contb_receipt_dt=datetime.datetime(2014, 1, 1)
        )
        self.FItemReceiptOrExp(
            sub_id=nml_row.sub_id,
            rpt_yr=2014,
        )
        db.session.commit()
        manage.refresh_itemized()
        search = models.ScheduleA.query.filter(
            models.ScheduleA.sub_id == nml_row.sub_id
        ).one()
        self.assertEqual(search.contributor_name_text, "'adelson':2 'sheldon':1")

        # Test update
        nml_row.contbr_nm = 'Shelly Adelson'
        db.session.add(nml_row)
        db.session.commit()
        manage.refresh_itemized()
        search = models.ScheduleA.query.filter(
            models.ScheduleA.sub_id == nml_row.sub_id
        ).one()
        db.session.refresh(search)
        self.assertEqual(search.contributor_name_text, "'adelson':2 'shelli':1")

        # Test delete
        db.session.delete(nml_row)
        db.session.commit()
        manage.refresh_itemized()
        self.assertEqual(
            models.ScheduleA.query.filter(
                models.ScheduleA.sub_id == nml_row.sub_id
            ).count(),
            0,
        )

        # Test sequential writes
        make_transient(nml_row)
        db.session.add(nml_row)
        db.session.commit()

        db.session.delete(nml_row)
        db.session.commit()

        make_transient(nml_row)
        db.session.add(nml_row)
        db.session.commit()
        manage.refresh_itemized()
        self.assertEqual(
            models.ScheduleA.query.filter(
                models.ScheduleA.sub_id == nml_row.sub_id
            ).count(),
            1,
        )
Beispiel #18
0
    def save(self):
        data = self.data
        # remove the button
        data.pop('submit', None)
        forum = Forum(**data)
        # flush SQLA info from created instance so that it can be merged
        make_transient(forum)
        make_transient_to_detached(forum)

        return forum.save()
Beispiel #19
0
    def test_sched_a_fulltext_trigger(self):
        # Test create
        row = self.SchedAFactory(
            rpt_yr=2014,
            contbr_nm='Sheldon Adelson',
            load_date=datetime.datetime.now(),
            sub_id=7,
        )
        db.session.commit()
        db.session.execute('select update_aggregates()')
        search = models.ScheduleA.query.filter(
            models.ScheduleA.sched_a_sk == row.sched_a_sk
        ).one()
        self.assertEqual(search.contributor_name_text, "'adelson':2 'sheldon':1")

        # Test update
        row.contbr_nm = 'Shelly Adelson'
        db.session.add(row)
        db.session.commit()
        db.session.execute('select update_aggregates()')
        search = models.ScheduleA.query.filter(
            models.ScheduleA.sched_a_sk == row.sched_a_sk
        ).one()
        db.session.refresh(search)
        self.assertEqual(search.contributor_name_text, "'adelson':2 'shelli':1")

        # Test delete
        db.session.delete(row)
        db.session.commit()
        db.session.execute('select update_aggregates()')
        self.assertEqual(
            models.ScheduleA.query.filter(
                models.ScheduleA.sched_a_sk == row.sched_a_sk
            ).count(),
            0,
        )

        # Test sequential writes
        make_transient(row)
        db.session.add(row)
        db.session.commit()

        db.session.delete(row)
        db.session.commit()

        make_transient(row)
        db.session.add(row)
        db.session.commit()
        db.session.execute('select update_aggregates()')
        self.assertEqual(
            models.ScheduleA.query.filter(
                models.ScheduleA.sched_a_sk == row.sched_a_sk
            ).count(),
            1,
        )
Beispiel #20
0
    def save(self):
        data = self.data
        # delete submit and csrf_token from data
        data.pop('submit', None)
        data.pop('csrf_token', None)
        forum = Forum(**data)
        # flush SQLA info from created instance so that it can be merged
        make_transient(forum)
        make_transient_to_detached(forum)

        return forum.save()
Beispiel #21
0
    def new_version(self, session):
        # make us transient (removes persistent
        # identity). 
        make_transient(self)
        old_version = session.query(self.__class__).get(self.id)
        self.valid_from = datetime.datetime.now()
        old_version.valid_to = self.valid_from

        # set 'id' to None.  
        # a new PK will be generated on INSERT.
        self.id = None
Beispiel #22
0
def get_version(recipe_id, version_id):
    valid_ids = [x[0] for x in get_versions(recipe_id)]
    if version_id not in valid_ids:
        raise BadVersionError
    recipe = get_recipe(recipe_id)
    db.session.expunge(recipe)
    make_transient(recipe)
    data = db.session.query(db.RecipeData).get(version_id)
    db.session.expunge(data)
    make_transient(data)
    recipe.data = data
    return recipe
Beispiel #23
0
    def sync(self, other):
        types = self.types & other.types
        other_id = other.host_id
        for name in types:
            new_ver = max(self.local_version(name), other.local_version(name))+1
            basever = self.get_version(other.host_id, name)

            # hackedyhack. within this lies almost infinite potential for optimization.
            for o in self.objects(name):
                self.session.add(o.bump(new_ver))
            for o in other.objects(name):
                other.session.add(o.bump(new_ver))

            self.bump_version(other.host_id, name, new_ver)
            other.bump_version(self.host_id, name, new_ver)
            new_ver += 1

            self.session.commit()
            other.session.commit()

            local = { o._uuid: o for o in self.objects(name) }
            local_uuids = set(local.keys())

            remote = { o._uuid: o for o in other.objects(name) }
            remote_uuids = set(remote.keys())

            for uuid in local_uuids | remote_uuids:
                if uuid not in remote_uuids:
                    o = local[uuid]
                    make_transient(o)
                    other._sync_put(o)
                elif uuid not in local_uuids:
                    o = remote[uuid]
                    make_transient(o)
                    self._sync_put(o)
                else: # obj in local and remote
                    lo, ro, base = local[uuid], remote[uuid], self.find_base(name, uuid, basever)
                    if lo._updated != ro._updated:
                        lot, rot = lo.prepare_merge(ro, base), ro.prepare_merge(lo, base)
                        lo.apply_merge(lot)
                        ro.apply_merge(rot)
                        self._sync_put(lo)
                        other._sync_put(ro)
            self.session.commit()
            other.session.commit()

            # hackedyhack. within this lies almost infinite potential for optimization.
            for o in self.objects(name):
                self.session.add(o.bump(new_ver))
            for o in other.objects(name):
                other.session.add(o.bump(new_ver))
            self.session.commit()
            other.session.commit()
def disconnect_from_db(docs):
    '''Make sure that docs won't synchronise with the database if changed.'''
    for d in docs:
        #make_transient removes the primary keys, hack to back them up and
        #reset them after
        id_attrs = [a.name for a in class_mapper(d.__class__).primary_key]
        id_vals = {a: getattr(d, a) for a in id_attrs}
        
        session.make_transient(d)
        
        for attr, val in id_vals.iteritems():
            setattr(d, attr, val)
def clone_a_contract(contract):
    '''Takes a contract object and clones it

    The clone strips the following properties:
        + Financial ID
        + Expiration Date
        + Assigned To
        + Current Stage
        + Contract HREF

    Relationships are handled as follows:
        + Stars, Follows - moved to new contract (dropped from old)
        + Stage, Flow - Duplicated
        + Properties, Notes, Line Items, Companies kept on old
    '''
    old_contract_id = int(contract.id)

    subscribers = [
        ('follow', list(contract.followers)),
        ('star', list(contract.starred))
    ]

    db.session.expunge(contract)
    make_transient(contract)

    contract.id = None
    contract.financial_id = None
    contract.expiration_date = None
    contract.assigned_to = None
    contract.current_stage = None
    contract.contract_href = None

    old_contract = get_one_contract(old_contract_id)
    # group everything that will rebuild the trigger
    # into one flush
    db.session.add(contract)
    old_contract.is_archived = True
    old_contract.description = old_contract.description + ' [Archived]'

    # we have to commit here in order to manage the relationships
    db.session.commit()

    for interaction, users in subscribers:
        for i in users:
            unfollow_a_contract(old_contract_id, i, interaction)
            follow_a_contract(contract.id, i, interaction)
            db.session.commit()

    # set the parent
    contract.parent_id = old_contract_id

    db.session.commit()
    return contract
Beispiel #26
0
 def delay_feedback(self, submission):
     if submission is None or submission.score is None:
         return submission
     due_date = submission.question.homework.due_date
     submission.item_responses # instantiate item responses before we detach object from session
     make_transient(submission) # detaches SQLAlchemy object from session
     now = pdt_now()
     time_available = min(submission.time + timedelta(minutes=30), due_date)
     if now < time_available:
         submission.score = None
         submission.comments = '''Feedback on your submission will be available in %s minutes, at %s. Please refresh the page at that time to view it.''' % (1 + (time_available - now).seconds // 60, time_available.strftime("%H:%M"))
     return submission
Beispiel #27
0
def import_datasource(
        session,
        i_datasource,
        lookup_database,
        lookup_datasource,
        import_time):
    """Imports the datasource from the object to the database.

     Metrics and columns and datasource will be overrided if exists.
     This function can be used to import/export dashboards between multiple
     superset instances. Audit metadata isn't copies over.
    """
    make_transient(i_datasource)
    logging.info('Started import of the datasource: {}'.format(
        i_datasource.to_json()))

    i_datasource.id = None
    i_datasource.database_id = lookup_database(i_datasource).id
    i_datasource.alter_params(import_time=import_time)

    # override the datasource
    datasource = lookup_datasource(i_datasource)

    if datasource:
        datasource.override(i_datasource)
        session.flush()
    else:
        datasource = i_datasource.copy()
        session.add(datasource)
        session.flush()

    for m in i_datasource.metrics:
        new_m = m.copy()
        new_m.table_id = datasource.id
        logging.info('Importing metric {} from the datasource: {}'.format(
            new_m.to_json(), i_datasource.full_name))
        imported_m = i_datasource.metric_cls.import_obj(new_m)
        if (imported_m.metric_name not in
                [m.metric_name for m in datasource.metrics]):
            datasource.metrics.append(imported_m)

    for c in i_datasource.columns:
        new_c = c.copy()
        new_c.table_id = datasource.id
        logging.info('Importing column {} from the datasource: {}'.format(
            new_c.to_json(), i_datasource.full_name))
        imported_c = i_datasource.column_cls.import_obj(new_c)
        if (imported_c.column_name not in
                [c.column_name for c in datasource.columns]):
            datasource.columns.append(imported_c)
    session.flush()
    return datasource.id
 def delete(self, defer_commit=False):
     """
         Method to either Hard or Soft Delete a row.
         Params
             :param defer_commit: boolean
         Return
             :return: self
     """
     db.session.delete(self)
     if not defer_commit:
         db.session.commit()
         make_transient(self)
     return self
Beispiel #29
0
def customDuplicate(patch_id):

	if not localAdmin() and not adminRole():
		log_Error("{} does not have permission to duplicate custom patch.".format(session.get('user')))
		return json.dumps({'data': {}}, default=json_serial), 403


	qGet1 = MpPatch.query.filter(MpPatch.puuid == patch_id).first()
	if qGet1 is not None:
		make_transient(qGet1)

		_new_puuid = str(uuid.uuid4())
		_new_patch_name = "Copy_"+qGet1.patch_name
		qGet1.rid = None
		setattr(qGet1, 'puuid', _new_puuid)
		setattr(qGet1, 'patch_name', _new_patch_name)
		setattr(qGet1, 'patch_state', "Create")
		setattr(qGet1, 'active', "0")

		_pkg_path = qGet1.pkg_path
		_pkg_url = qGet1.pkg_url
		setattr(qGet1, 'pkg_path', _pkg_path.replace(patch_id, _new_puuid))
		setattr(qGet1, 'pkg_url', _pkg_url.replace(patch_id, _new_puuid))

		db.session.add(qGet1)

		# Duplicate patch criteria
		qGet2 = MpPatchesCriteria.query.filter(MpPatchesCriteria.puuid == patch_id).all()
		if qGet2 is not None:
			for x in qGet2:
				make_transient(x)
				x.rid = None
				setattr(x, 'puuid', _new_puuid)
				db.session.add(x)

		# Need to duplicate patch on file system
		try:
			_patch_dir0 = "/opt/MacPatch/Content/Web/patches/" + patch_id
			if os.path.exists(_patch_dir0):
				_patch_dir1 = "/opt/MacPatch/Content/Web/patches/" + _new_puuid
				copytree(_patch_dir0,_patch_dir1)
			else:
				log("{}, unable to duplicate custom patch {}({}). Directory does not exist".format(session.get('user'), qGet1.patch_name, patch_id))

		except OSError, e:
			log_Error("Error: %s - %s." % (e.filename,e.strerror))


		log("{} duplicate custom patch {}({}).".format(session.get('user'), qGet1.patch_name, patch_id))
Beispiel #30
0
def copy_tables():
    """ Copies a few items from each table into a test database

    Should not be called in the same session after reset_test_db(); you will get
    a mapper error for some reason. Instead, call reset_test_db(), then close
    the python session, then, in a new session, call copy_tables(), to update
    the tables for a schema change. """
    toy = get_db_session()
    db = models.get_db_session()

    logger.info("Populating test tables")

    businesses = db.query(Business).order_by(Business.id)[0:5]
    locations = [b.location for b in businesses]
    # [b.categories ...] is a list of lists, so we need a little more processing
    categories = set().union(*[b.categories for b in businesses])

    tweets = db.query(Tweet).order_by(Tweet.id)[0:5]
    reviews = []
    for b in businesses:
        reviews.extend(
            db.query(YelpReview).filter(YelpReview.business_id == b.id)[0:5]
        )
    documents = [r.document for r in reviews] + [t.document for t in tweets]
    doc_assoc = [r.document_rel for r in reviews] + \
                [t.document_rel for t in tweets]

    tables = [businesses, locations, categories, reviews, tweets, \
              documents, doc_assoc]

    # detach all objects from db session before putting them in toy
    for t in tables:
        for obj in t: make_transient(obj)

    # only after *everything* is transient do we add anything
    for t in tables: toy.add_all(t)

    # in addition we add the junction table for business categories
    b_ids = [b.id for b in businesses]
    business_cat = db.execute(business_category_table.select().
            where(business_category_table.c.business_id.in_(
                [b.id for b in businesses]
            )))

    for row in business_cat:
        toy.execute(business_category_table.insert(), row)

    toy.commit()
Beispiel #31
0
 def copy_bottle(self):
     make_transient(self)
     self.id = None
     db.session.add(self)
     db.session.commit()
Beispiel #32
0
    def _task_instances_for_dag_run(self, dag_run, session=None):
        """
        Returns a map of task instance key to task instance object for the tasks to
        run in the given dag run.
        :param dag_run: the dag run to get the tasks from
        :type dag_run: models.DagRun
        :param session: the database session object
        :type session: Session
        """
        tasks_to_run = {}

        if dag_run is None:
            return tasks_to_run

        # check if we have orphaned tasks
        if AIRFLOW_VERSION_2:
            reset_state_for_orphaned_tasks(
                self, filter_by_dag_run=dag_run, session=session
            )
        else:
            self.reset_state_for_orphaned_tasks(
                filter_by_dag_run=dag_run, session=session
            )

        # for some reason if we don't refresh the reference to run is lost
        dag_run.refresh_from_db()
        make_transient(dag_run)

        # DBNDPATCH
        # implements batch update
        session.query(TI).filter(
            TI.dag_id == self.dag_id,
            TI.execution_date == self.execution_date,
            TI.state == State.NONE,
        ).update(
            {
                TI.state: State.SCHEDULED,
                TI.start_date: timezone.utcnow(),
                TI.end_date: timezone.utcnow(),
            },
            synchronize_session="fetch",
        )
        # TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
        #
        task_instances = dag_run.get_task_instances()

        for ti in task_instances:
            # all tasks part of the backfill are scheduled to run
            if ti.state == State.NONE:
                # no waiting for the airflow - batch upate
                # ti.set_state(State.SCHEDULED, session=session)

                ti.state = State.SCHEDULED
                ti.start_date = timezone.utcnow()
                ti.end_date = timezone.utcnow()
                session.merge(ti)
            if ti.state != State.REMOVED:
                ti._log = logging.getLogger("airflow.task")
                tasks_to_run[ti.key] = ti

        session.commit()
        return tasks_to_run
 def make_copy(model: BaseModel) -> None:
     make_transient(model)
     model.id = None
     model.created_at = None
Beispiel #34
0
def create_admin_video(admin_name="Max", video_name="jacksonhole.mp4"):

    check_box_DB_segment = TinyDB("check_box_db_segment.json")

    #get annotation info
    annotations = get_annotations()
    annotation = annotations[video_name]
    #get admin info
    admin = session.query(User).filter(User.username == admin_name).first()
    admin_id = admin.id
    admin_name = admin.username
    #max id: primary key duplication issue
    max_video_id = session.query(func.max(Video.id).label("max_id")).one()

    #create video object for admin
    sample_video = session.query(Video).filter(
        Video.slug == annotation.workers[0] + '_' + video_name).first()
    sample_labels = session.query(Label).filter(
        Label.videoid == sample_video.id).all()
    labels = ""
    for sample_label in sample_labels:
        labels = labels + sample_label.text + " "
    load_admin_video(video_name, labels)
    session.commit()

    target_video = session.query(Video).filter(Video.slug == admin_name + '_' +
                                               video_name).first()

    query = Query()

    segments = check_box_DB_segment.search(query.videoname == video_name)
    #create segment object for admin
    for segment in segments:
        #print(1)
        sample_segment = session.query(Segment).filter(
            Segment.id == segment["segment_id"]).first()
        target_segment = session.query(Segment).filter(
            Segment.videoid == target_video.id,
            Segment.start == sample_segment.start).first()

        jobs = session.query(Job).filter(
            Job.segmentid == segment["segment_id"]).all()
        for job in jobs:
            target_job = session.query(Job).filter(
                Job.segmentid == target_segment.id).first()

            paths = session.query(Path).filter(Path.jobid == job.id).all()
            #print("job")
            #print(temp_jobid)
            for path in paths:

                temp_pathid = path.id
                session.expunge(path)
                make_transient(path)
                max_path_id = session.query(func.max(
                    Path.id).label("max_id")).one()
                path.id = max_path_id.max_id + 1
                path.jobid = target_job.id
                label_text = session.query(Label).filter(
                    Label.id == path.labelid).first().text
                label = session.query(Label).filter(
                    Label.videoid == target_video.id,
                    Label.text == label_text).first().id
                path.labelid = label

                new_path = Path(id=max_path_id.max_id + 1,
                                jobid=target_job.id,
                                labelid=label)

                session.add(new_path)
                session.commit()

                boxes = session.query(Box).filter(
                    Box.pathid == temp_pathid).all()

                for box in boxes:
                    print(box.frame)
                    session.expunge(box)
                    make_transient(box)
                    max_box_id = session.query(
                        func.max(Box.id).label("max_id")).one()
                    box.id = max_box_id.max_id + 1
                    box.pathid = path.id
                    #print(path.id)
                    session.add(box)
                    session.commit()
Beispiel #35
0
    def process_dag(self, dag, queue):
        """
        This method schedules a single DAG by looking at the latest
        run for each task and attempting to schedule the following run.

        As multiple schedulers may be running for redundancy, this
        function takes a lock on the DAG and timestamps the last run
        in ``last_scheduler_run``.
        """
        DagModel = models.DagModel
        session = settings.Session()

        # picklin'
        pickle_id = None
        if self.do_pickle and self.executor.__class__ not in (
                executors.LocalExecutor, executors.SequentialExecutor):
            pickle_id = dag.pickle(session).id

        # obtain db lock
        db_dag = session.query(DagModel).filter_by(
            dag_id=dag.dag_id).with_for_update().one()

        last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
        secs_since_last = (datetime.now() - last_scheduler_run).total_seconds()

        if secs_since_last < self.heartrate:
            # release db lock
            session.commit()
            session.close()
            return None

        # Release the db lock
        # the assumption here is that process_dag will take less
        # time than self.heartrate otherwise we might unlock too
        # quickly and this should moved below, but that would increase
        # the time the record is locked and is blocking for other calls.
        db_dag.last_scheduler_run = datetime.now()
        session.commit()

        # update the state of the previously active dag runs
        dag_runs = DagRun.find(dag_id=dag.dag_id,
                               state=State.RUNNING,
                               session=session)
        active_dag_runs = []
        for run in dag_runs:
            # do not consider runs that are executed in the future
            if run.execution_date > datetime.now():
                continue

            # todo: run.dag is transient but needs to be set
            run.dag = dag
            # todo: preferably the integrity check happens at dag collection time
            run.verify_integrity(session=session)
            run.update_state(session=session)
            if run.state == State.RUNNING:
                make_transient(run)
                active_dag_runs.append(run)

        for run in active_dag_runs:
            # this needs a fresh session sometimes tis get detached
            tis = run.get_task_instances(state=(State.NONE,
                                                State.UP_FOR_RETRY))

            # this loop is quite slow as it uses are_dependencies_met for
            # every task (in ti.is_runnable). This is also called in
            # update_state above which has already checked these tasks
            for ti in tis:
                task = dag.get_task(ti.task_id)

                # fixme: ti.task is transient but needs to be set
                ti.task = task

                # future: remove adhoc
                if task.adhoc:
                    continue

                if ti.is_runnable(flag_upstream_failed=True):
                    self.logger.debug('Queuing task: {}'.format(ti))
                    queue.put((ti.key, pickle_id))

        session.close()
Beispiel #36
0
    def clone_gp(self, version='latest'):
        global logger
        logger.info("Cloning GenomeProject {}, version: {}".format(
            self.gpv_id, version))

        session = fetch_session()

        try:
            # See if we have a GenomeProject_Meta for this GP
            logger.debug("Attempting to clone metadata")
            gp_meta = session.query(GenomeProject_Meta).filter(
                GenomeProject_Meta.gpv_id == self.gpv_id).first()

            old_path = self.gpv_directory
            old_gpv_id = self.gpv_id

            # Remove the GP object from the session and
            # unlink it's primary key
            logger.debug("Expunging ourself from the session")
            session.expunge(self)
            make_transient(self)
            self.gpv_id = None

            # Update the session with the new version,
            # add the GP back to the session and commit it
            version = Version.fetch(version)
            self.version_id = version
            self.gpv_directory = os.path.join(
                Version.fetch_path(version), self.genome_name,
                self.assembly_accession + '_' + self.asm_name)

            # We've saved the object, make the file system symlink
            # but first we have to check if we point to another base object,
            # if so find that path and link to it
            if self.prev_gpv:
                root_gp = session.query(GenomeProject).filter(
                    GenomeProject.gpv_id == self.prev_gpv).first()

                if not root_gp:
                    logger.critical(
                        "We think we should have a root gpv_id {} but we can't find it, time to freak out"
                        .format(self.prev_gpv))
                    raise Exception(
                        "We can't find gpv_id {} but we ({}) seem to point to it"
                        .format(self.prev_gpv, self.gpv_id))

                # Now remember the path for the root GP so we can symlink to it
                old_path = root_gp.gpv_directory

            if os.path.exists(old_path) and self.verify_basedir():
                logger.debug("Making symlink from {} to {}".format(
                    old_path, self.gpv_directory))
                os.symlink(old_path, self.gpv_directory)
            else:
                logger.error(
                    "We couldn't find the old path {} to make the symlink from, this is a problem"
                    .format(old_path))

            logger.debug("Committing self")
            session.add(self)
            session.commit()

            # If we have a metadata object and we've successfully
            # updated ourself, clone the gp_meta object
            if gp_meta:
                logger.debug("We have metadata, clone: {}".format(gp_meta))
                gp_meta.clone_gpmeta(self.gpv_id)

        # Clone the replicons as we clone the GP record
            update_params = {'version_id': version, 'gpv_id': self.gpv_id}
            for rep in session.query(Replicon).filter(
                    Replicon.gpv_id == old_gpv_id):
                logger.debug("Copying replicon {}".format(rep.rpv_id))
                rep.copy_and_update(**update_params)

            for gpcs in session.query(GenomeProject_Checksum).filter(
                    GenomeProject_Checksum.gpv_id == old_gpv_id):
                logger.debug("Copying GP_Checksum for file {}".format(
                    gpcs.filename))
                gpcs.copy_and_update(**update_params)

        except Exception as e:
            logger.exception("Exception cloning GenomeProject: " + str(e))

            session.rollback()
            raise e
Beispiel #37
0
    def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
        """
        Finds TIs that are ready for execution with respect to pool limits,
        dag max_active_tasks, executor state, and priority.

        :param max_tis: Maximum number of TIs to queue in this loop.
        :type max_tis: int
        :return: list[airflow.models.TaskInstance]
        """
        executable_tis: List[TI] = []

        # Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
        # Throws an exception if lock cannot be obtained, rather than blocking
        pools = models.Pool.slots_stats(lock_rows=True, session=session)

        # If the pools are full, there is no point doing anything!
        # If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
        pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))

        if pool_slots_free == 0:
            self.log.debug("All pools are full!")
            return executable_tis

        max_tis = min(max_tis, pool_slots_free)

        # Get all task instances associated with scheduled
        # DagRuns which are not backfilled, in the given states,
        # and the dag is not paused
        query = (
            session.query(TI)
            .join(TI.dag_run)
            .options(eagerload(TI.dag_run))
            .filter(DR.run_type != DagRunType.BACKFILL_JOB, DR.state != DagRunState.QUEUED)
            .join(TI.dag_model)
            .filter(not_(DM.is_paused))
            .filter(TI.state == State.SCHEDULED)
            .options(selectinload('dag_model'))
            .order_by(-TI.priority_weight, DR.execution_date)
        )
        starved_pools = [pool_name for pool_name, stats in pools.items() if stats['open'] <= 0]
        if starved_pools:
            query = query.filter(not_(TI.pool.in_(starved_pools)))

        query = query.limit(max_tis)

        task_instances_to_examine: List[TI] = with_row_locks(
            query,
            of=TI,
            session=session,
            **skip_locked(session=session),
        ).all()
        # TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
        # Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))

        if len(task_instances_to_examine) == 0:
            self.log.debug("No tasks to consider for execution.")
            return executable_tis

        # Put one task instance on each line
        task_instance_str = "\n\t".join(repr(x) for x in task_instances_to_examine)
        self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)

        pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
        for task_instance in task_instances_to_examine:
            pool_to_task_instances[task_instance.pool].append(task_instance)

        # dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
        dag_max_active_tasks_map: DefaultDict[str, int]
        task_concurrency_map: DefaultDict[Tuple[str, str], int]
        dag_max_active_tasks_map, task_concurrency_map = self.__get_concurrency_maps(
            states=list(EXECUTION_STATES), session=session
        )

        num_tasks_in_executor = 0
        # Number of tasks that cannot be scheduled because of no open slot in pool
        num_starving_tasks_total = 0

        # Go through each pool, and queue up a task for execution if there are
        # any open slots in the pool.

        for pool, task_instances in pool_to_task_instances.items():
            pool_name = pool
            if pool not in pools:
                self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
                continue

            open_slots = pools[pool]["open"]

            num_ready = len(task_instances)
            self.log.info(
                "Figuring out tasks to run in Pool(name=%s) with %s open slots "
                "and %s task instances ready to be queued",
                pool,
                open_slots,
                num_ready,
            )

            priority_sorted_task_instances = sorted(
                task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
            )

            num_starving_tasks = 0
            for current_index, task_instance in enumerate(priority_sorted_task_instances):
                if open_slots <= 0:
                    self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
                    # Can't schedule any more since there are no more open slots.
                    num_unhandled = len(priority_sorted_task_instances) - current_index
                    num_starving_tasks += num_unhandled
                    num_starving_tasks_total += num_unhandled
                    break

                # Check to make sure that the task max_active_tasks of the DAG hasn't been
                # reached.
                dag_id = task_instance.dag_id

                current_max_active_tasks_per_dag = dag_max_active_tasks_map[dag_id]
                max_active_tasks_per_dag_limit = task_instance.dag_model.max_active_tasks
                self.log.info(
                    "DAG %s has %s/%s running and queued tasks",
                    dag_id,
                    current_max_active_tasks_per_dag,
                    max_active_tasks_per_dag_limit,
                )
                if current_max_active_tasks_per_dag >= max_active_tasks_per_dag_limit:
                    self.log.info(
                        "Not executing %s since the number of tasks running or queued "
                        "from DAG %s is >= to the DAG's max_active_tasks limit of %s",
                        task_instance,
                        dag_id,
                        max_active_tasks_per_dag_limit,
                    )
                    continue

                task_concurrency_limit: Optional[int] = None
                if task_instance.dag_model.has_task_concurrency_limits:
                    # Many dags don't have a task_concurrency, so where we can avoid loading the full
                    # serialized DAG the better.
                    serialized_dag = self.dagbag.get_dag(dag_id, session=session)
                    if serialized_dag.has_task(task_instance.task_id):
                        task_concurrency_limit = serialized_dag.get_task(
                            task_instance.task_id
                        ).max_active_tis_per_dag

                    if task_concurrency_limit is not None:
                        current_task_concurrency = task_concurrency_map[
                            (task_instance.dag_id, task_instance.task_id)
                        ]

                        if current_task_concurrency >= task_concurrency_limit:
                            self.log.info(
                                "Not executing %s since the task concurrency for"
                                " this task has been reached.",
                                task_instance,
                            )
                            continue

                if task_instance.pool_slots > open_slots:
                    self.log.info(
                        "Not executing %s since it requires %s slots "
                        "but there are %s open slots in the pool %s.",
                        task_instance,
                        task_instance.pool_slots,
                        open_slots,
                        pool,
                    )
                    num_starving_tasks += 1
                    num_starving_tasks_total += 1
                    # Though we can execute tasks with lower priority if there's enough room
                    continue

                executable_tis.append(task_instance)
                open_slots -= task_instance.pool_slots
                dag_max_active_tasks_map[dag_id] += 1
                task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1

            Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)

        Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
        Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
        Stats.gauge('scheduler.tasks.executable', len(executable_tis))

        task_instance_str = "\n\t".join(repr(x) for x in executable_tis)
        self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
        if len(executable_tis) > 0:
            # set TIs to queued state
            filter_for_tis = TI.filter_for_tis(executable_tis)
            session.query(TI).filter(filter_for_tis).update(
                # TODO[ha]: should we use func.now()? How does that work with DB timezone
                # on mysql when it's not UTC?
                {TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
                synchronize_session=False,
            )

        for ti in executable_tis:
            make_transient(ti)
        return executable_tis
Beispiel #38
0
    def clone(project_id: int, author_id: int):
        """ Clone project """

        cloned_project = Project.get(project_id)

        # Remove clone from session so we can reinsert it as a new object
        db.session.expunge(cloned_project)
        make_transient(cloned_project)

        # Re-initialise counters and meta-data
        cloned_project.total_tasks = 0
        cloned_project.tasks_mapped = 0
        cloned_project.tasks_validated = 0
        cloned_project.tasks_bad_imagery = 0
        cloned_project.last_updated = timestamp()
        cloned_project.created = timestamp()
        cloned_project.author_id = author_id
        cloned_project.status = ProjectStatus.DRAFT.value
        cloned_project.id = None  # Reset ID so we get a new ID when inserted
        cloned_project.geometry = None
        cloned_project.centroid = None

        db.session.add(cloned_project)
        db.session.commit()

        # Now add the project info, we have to do it in a two stage commit because we need to know the new project id
        original_project = Project.get(project_id)

        for info in original_project.project_info:
            db.session.expunge(info)
            make_transient(
                info
            )  # Must remove the object from the session or it will be updated rather than inserted
            info.id = None
            info.project_id_str = str(cloned_project.id)
            cloned_project.project_info.append(info)

        # Now add allowed users now we know new project id, if there are any
        for user in original_project.allowed_users:
            cloned_project.allowed_users.append(user)

        # Add other project metadata
        cloned_project.priority = original_project.priority
        cloned_project.default_locale = original_project.default_locale
        cloned_project.mapper_level = original_project.mapper_level
        cloned_project.mapping_permission = original_project.mapping_permission
        cloned_project.validation_permission = original_project.validation_permission
        cloned_project.enforce_random_task_selection = (
            original_project.enforce_random_task_selection
        )
        cloned_project.private = original_project.private
        cloned_project.entities_to_map = original_project.entities_to_map
        cloned_project.due_date = original_project.due_date
        cloned_project.imagery = original_project.imagery
        cloned_project.josm_preset = original_project.josm_preset
        cloned_project.license_id = original_project.license_id
        cloned_project.mapping_types = original_project.mapping_types

        # We try to remove the changeset comment referencing the old project. This
        #  assumes the default changeset comment has not changed between the old
        #  project and the cloned. This is a best effort basis.
        default_comment = current_app.config["DEFAULT_CHANGESET_COMMENT"]
        changeset_comments = []
        if original_project.changeset_comment is not None:
            changeset_comments = original_project.changeset_comment.split(" ")
        if f"{default_comment}-{original_project.id}" in changeset_comments:
            changeset_comments.remove(f"{default_comment}-{original_project.id}")
        cloned_project.changeset_comment = " ".join(changeset_comments)

        db.session.add(cloned_project)
        db.session.commit()

        return cloned_project
Beispiel #39
0
    def SPStep1(self):
        logging.info("Input Param = %s" % (self.param.__dict__, ))
        # self.param.ProjectId="2"
        Prj = database.Projects.query.filter_by(
            projid=self.param.ProjectId).first()
        # self.param.IntraStep=0
        if getattr(self.param, 'IntraStep', 0) == 0:
            self.param.IntraStep = 1
            db.session.expunge(Prj)
            NewPrj = Prj
            Prj = copy.copy(
                NewPrj)  # Si on fait une copy on arrive plus à insérer.
            make_transient(NewPrj)
            NewPrj.title = self.param.subsetprojecttitle
            NewPrj.projid = None
            NewPrj.visible = False
            db.session.add(NewPrj)
            db.session.commit()
            pp = database.ProjectsPriv()
            pp.member = self.task.owner_id
            pp.privilege = "Manage"
            NewPrj.projmembers.append(pp)
            db.session.commit()
            self.param.subsetproject = NewPrj.projid
            self.UpdateProgress(
                5, "Subset Project %d Created : %s" %
                (NewPrj.projid, NewPrj.title))

        if self.param.IntraStep == 1:
            vaultroot = Path("../../vault")
            sqlparam = {'projid': self.param.ProjectId}
            sqlwhere = ""
            if self.param.extraprojects:
                sqlparam['projid'] += "," + self.param.extraprojects
            sqlparam['ranklimit'] = self.param.valeur
            if self.param.valtype == 'V': rankfunction = 'rank'
            elif self.param.valtype == 'P': rankfunction = '100*percent_rank'
            else: rankfunction = 'FunctionError'
            # if self.param.samplelist:
            #     sqlwhere+=" and s.orig_id in (%s) "%(",".join(["'%s'"%x for x in self.param.samplelist.split(",")]))
            # sqlwhere+=" and (o.classif_qual in (%s) "%(",".join(["'%s'"%x for x in self.param.what.split(",")]))
            # if self.param.what.find('N')>=0:
            #     sqlwhere+=" or o.classif_qual is null "
            # sqlwhere+=")"
            sqlwhere += sharedfilter.GetSQLFilter(self.param.filtres, sqlparam,
                                                  str(self.task.owner_id))
            logging.info("SQLParam=%s", sqlparam)
            sql = """select objid from (
                SELECT """ + rankfunction + """() OVER (partition by classif_id order by random() )rang,o.objid
                      from objects o left join samples s on o.sampleid=s.sampleid
                      where o.projid in ( %(projid)s ) """ + sqlwhere + """ ) sr
                where rang<=%(ranklimit)s """
            logging.info("SQL=%s %s", sql, sqlparam)
            # for obj in db.session.query(database.Objects).from_statement( text(sql) ).all():
            LstObjects = GetAll(sql, sqlparam)
            logging.info("matched %s objects", len(LstObjects))
            if len(LstObjects) == 0:
                self.task.taskstate = "Error"
                self.UpdateProgress(
                    10, "No object to include in the subset project")
            NbrObjects = 0
            for objid in LstObjects:
                obj = db.session.query(
                    database.Objects).filter_by(objid=objid[0]).first()
                objf = db.session.query(
                    database.ObjectsFields).filter_by(objfid=objid[0]).first()
                objcnn = db.session.query(
                    database.Objects_cnn_features).filter_by(
                        objcnnid=objid[0]).first()
                NbrObjects += 1
                oldobjid = obj.objid
                if self.param.withimg == 'Y':
                    for img in obj.images:
                        db.session.expunge(img)
                        make_transient(img)
                        self.pgcur.execute("select nextval('seq_images')")
                        img.imgid = self.pgcur.fetchone()[0]
                        # print("New Image id=",img.imgid)
                        SrcImg = img.file_name
                        SrcImgMini = img.thumb_file_name
                        VaultFolder = "%04d" % (img.imgid // 10000)
                        #creation du repertoire contenant les images si necessaire
                        CreateDirConcurrentlyIfNeeded(
                            vaultroot.joinpath(VaultFolder))
                        img.file_name = "%s/%04d%s" % (VaultFolder,
                                                       img.imgid % 10000,
                                                       Path(SrcImg).suffix)
                        shutil.copyfile(
                            vaultroot.joinpath(SrcImg).as_posix(),
                            vaultroot.joinpath(img.file_name).as_posix())
                        if SrcImgMini is not None:
                            img.thumb_file_name = "%s/%04d_mini%s" % (
                                VaultFolder, img.imgid % 10000,
                                Path(SrcImgMini).suffix)
                            shutil.copyfile(
                                vaultroot.joinpath(SrcImgMini).as_posix(),
                                vaultroot.joinpath(
                                    img.thumb_file_name).as_posix())

                db.session.expunge(obj)
                make_transient(obj)
                obj.objid = None
                obj.img0id = None
                obj.projid = self.param.subsetproject
                obj.sampleid = self.GetSampleID(obj.sampleid)
                obj.processid = self.GetProcessID(obj.processid)
                obj.acquisid = self.GetAcquisID(obj.acquisid)
                db.session.add(obj)
                db.session.commit()
                dummy = objf.n01  #permet de forcer l'etat de objf sinon perd ses données sur les instruction suivantes.
                db.session.expunge(objf)
                make_transient(objf)
                objf.objfid = obj.objid
                db.session.add(objf)
                if objcnn:
                    dummy = objcnn.cnn01  #permet de forcer l'etat de objcnn sinon perd ses données sur les instruction suivantes.
                    db.session.expunge(objcnn)
                    make_transient(objcnn)
                    objcnn.objcnnid = obj.objid
                    db.session.add(objcnn)
                db.session.commit()
                if NbrObjects % 20 == 0:
                    self.UpdateProgress(5 + 95 * NbrObjects / len(LstObjects),
                                        "Subset creation in progress")
                # print (oldobjid,obj.objid)
            # Recalcule les valeurs de Img0
            self.pgcur.execute("""update obj_head o
                                set imgcount=(select count(*) from images where objid=o.objid)
                                ,img0id=(select imgid from images where objid=o.objid order by imgrank asc limit 1 )
                                where projid=""" +
                               str(self.param.subsetproject))
            self.pgcur.connection.commit()
        import appli.project.main
        appli.project.main.RecalcProjectTaxoStat(self.param.subsetproject)
        appli.project.main.UpdateProjectStat(self.param.subsetproject)
        self.task.taskstate = "Done"
        self.UpdateProgress(100, "Subset created successfully")
Beispiel #40
0
def duplicate_instance(item):
    db.session.expunge(item)
    make_transient(item)
    item.id = None
    db.session.add(item)
    return item
Beispiel #41
0
    def on_delete(self, req, resp, *args, **kwargs):
        """
        Delete a single item.
        """
        if 'DELETE' not in getattr(self, 'methods',
                                   ['GET', 'PUT', 'PATCH', 'DELETE']):
            raise falcon.errors.HTTPMethodNotAllowed(
                getattr(self, 'methods', ['GET', 'PUT', 'PATCH', 'DELETE']))

        with session_scope(self.db_engine,
                           sessionmaker_=self.sessionmaker,
                           **self.sessionmaker_kwargs) as db_session:
            resources = self.apply_arg_filter(req, resp,
                                              db_session.query(self.model),
                                              kwargs)

            try:
                resource = resources.one()
            except sqlalchemy.orm.exc.NoResultFound:
                raise falcon.errors.HTTPNotFound()
            except sqlalchemy.orm.exc.MultipleResultsFound:
                self.logger.error(
                    'Programming error: multiple results found for patch of model {0}'
                    .format(self.model))
                raise falcon.errors.HTTPInternalServerError(
                    'Internal Server Error',
                    'An internal server error occurred')

            resources = self.delete_precondition(
                req, resp, self.filter_by_params(resources, req.params), *args,
                **kwargs)

            try:
                resource = resources.one()
            except sqlalchemy.orm.exc.NoResultFound:
                raise falcon.errors.HTTPConflict(
                    'Conflict', 'Resource found but conditions violated')
            except sqlalchemy.orm.exc.MultipleResultsFound:
                self.logger.error(
                    'Programming error: multiple results found for delete of model {0}'
                    .format(self.model))
                raise falcon.errors.HTTPInternalServerError(
                    'Internal Server Error',
                    'An internal server error occurred')

            before_delete = getattr(self, 'before_delete', None)
            if before_delete is not None:
                self.before_delete(req, resp, db_session, resource, *args,
                                   **kwargs)

            try:
                mark_deleted = getattr(self, 'mark_deleted', None)
                if mark_deleted is not None:
                    mark_deleted(req, resp, resource, *args, **kwargs)
                    db_session.add(resource)
                else:
                    make_transient(resource)
                    resources.delete()
                db_session.commit()
            except sqlalchemy.exc.IntegrityError as err:
                # As far we I know, this should only be caused by foreign key constraint being violated
                db_session.rollback()
                raise falcon.errors.HTTPConflict(
                    'Conflict', 'Other content links to this')
            except sqlalchemy.orm.exc.StaleDataError as err:
                # Version field in the model was not as expected
                db_session.rollback()
                raise falcon.errors.HTTPConflict(
                    'Conflict', 'Resource found but conditions violated')
            except sqlalchemy.exc.ProgrammingError as err:
                db_session.rollback()
                if self._is_foreign_key_violation(err):
                    raise falcon.errors.HTTPConflict(
                        'Conflict', 'Other content links to this')
                else:
                    raise

            resp.status = falcon.HTTP_OK
            req.context['result'] = {}

            after_delete = getattr(self, 'after_delete', None)
            if after_delete is not None:
                after_delete(req, resp, resource, *args, **kwargs)
Beispiel #42
0
def test_predictor_retrieve():
    with testing.postgresql.Postgresql() as postgresql:
        db_engine = create_engine(postgresql.url())
        ensure_db(db_engine)
        project_path = 'econ-dev/inspections'
        model_storage_engine = InMemoryModelStorageEngine(project_path)
        _, model_id = \
            fake_trained_model(project_path, model_storage_engine, db_engine)
        predictor = Predictor(project_path, model_storage_engine, db_engine, replace=False)
        dayone = datetime.date(2011, 1, 1).isoformat()
        daytwo = datetime.date(2011, 1, 2).isoformat()
        # create prediction set
        matrix_data = {
            'entity_id': [1, 2, 1, 2],
            'as_of_date': [dayone, dayone, daytwo, daytwo],
            'feature_one': [3, 4, 5, 6],
            'feature_two': [5, 6, 7, 8],
            'label': [7, 8, 8, 7]
        }
        matrix = pandas.DataFrame.from_dict(matrix_data)\
            .set_index(['entity_id', 'as_of_date'])
        metadata = {
            'label_name': 'label',
            'end_time': AS_OF_DATE,
            'label_window': '3month',
            'metta-uuid': '1234',
        }
        matrix_store = InMemoryMatrixStore(matrix, metadata)
        predict_proba = predictor.predict(model_id, matrix_store, misc_db_parameters=dict())

        # When run again, the predictions retrieved from the database
        # should match.
        #
        # Some trickiness here. Let's explain:
        #
        # If we are not careful, retrieving predictions from the database and
        # presenting them as a numpy array can result in a bad ordering,
        # since the given matrix may not be 'ordered' by some criteria
        # that can be easily represented by an ORDER BY clause.
        #
        # It will sometimes work, because without ORDER BY you will get
        # it back in the table's physical order, which unless something has
        # happened to the table will be the order you inserted it,
        # which could very well be the order in the matrix.
        # So it's not a bug that would necessarily immediately show itself,
        # but when it does go wrong your scores will be garbage.
        #
        # So we simulate a table order mutation that can happen over time:
        # Remove the first row and put it at the end.
        # If the Predictor doesn't explicitly reorder the results, this will fail
        session = sessionmaker(bind=db_engine)()
        obj = session.query(Prediction).first()
        session.delete(obj)
        session.commit()

        make_transient(obj)
        session = sessionmaker(bind=db_engine)()
        session.add(obj)
        session.commit()

        predictor.load_model = Mock()
        new_predict_proba = predictor.predict(model_id, matrix_store, misc_db_parameters=dict())
        assert_array_equal(new_predict_proba, predict_proba)
        assert not predictor.load_model.called
Beispiel #43
0
def import_files(files: List[CSVFile]):
    new_ids = {str(c.id): uuid4() for c in files}

    todo: List[CSVFile] = files.copy()

    imported: List[CSVFile] = []

    while todo:
        csv: CSVFile = todo.pop(0)

        new_id = new_ids.get(str(csv.id))
        ori_id = csv.id
        table = csv.table

        # remove from session
        for sub in csv.columns + csv.rows + csv.group_levels:
            db.session.expunge(sub)
            make_transient(sub)
            sub.csv_file_id = new_id

        db.session.expunge(csv)
        make_transient(csv)

        # update releated things
        csv.id = new_id
        csv.created = datetime.utcnow()
        csv.meta = csv.meta.copy()
        csv.sample_group = None
        # extra option to mimic the _read_csv logic
        csv.save_table(table, index=False, header=False)

        if 'merged' in csv.meta:
            # ensure we also merge the others
            merged_ids: List[str] = csv.meta['merged']
            new_merged_ids: List[str] = []
            for file_id in merged_ids:
                if file_id not in new_ids:
                    extra: CSVFile = CSVFile.query.get(file_id)
                    if extra:
                        extra_new_id = uuid4()
                        new_ids[extra.id] = extra_new_id
                        todo.append(extra)
                        new_merged_ids.append(str(extra_new_id))
                else:
                    new_merged_ids.append(str(new_ids[file_id]))
            csv.meta['merged'] = new_merged_ids

        db.session.add(csv)
        for sub in csv.columns + csv.rows + csv.group_levels:
            db.session.add(sub)

        imported.append(csv)

        v: ValidatedMetaboliteTable = ValidatedMetaboliteTable.query \
            .filter_by(csv_file_id=ori_id).first()
        if not v:
            continue

        db.session.expunge(v)
        make_transient(v)

        v.id = uuid4()
        v.csv_file_id = csv.id
        v.created = csv.created
        v.meta = csv.meta.copy()
        db.session.add(v)

    return imported
    def test_import_dashboard_2_slices(self):
        schema = get_example_default_schema()
        e_slc = self.create_slice("e_slc",
                                  id=10007,
                                  table_name="energy_usage",
                                  schema=schema)
        b_slc = self.create_slice("b_slc",
                                  id=10008,
                                  table_name="birth_names",
                                  schema=schema)
        dash_with_2_slices = self.create_dashboard("dash_with_2_slices",
                                                   slcs=[e_slc, b_slc],
                                                   id=10003)
        dash_with_2_slices.json_metadata = json.dumps({
            "remote_id": 10003,
            "expanded_slices": {
                "{}".format(e_slc.id): True,
                "{}".format(b_slc.id): False,
            },
            # mocked filter_scope metadata
            "filter_scopes": {
                str(e_slc.id): {
                    "region": {
                        "scope": ["ROOT_ID"],
                        "immune": [b_slc.id]
                    }
                }
            },
        })

        imported_dash_id = import_dashboard(dash_with_2_slices,
                                            import_time=1991)
        imported_dash = self.get_dash(imported_dash_id)

        expected_dash = self.create_dashboard("dash_with_2_slices",
                                              slcs=[e_slc, b_slc],
                                              id=10003)
        make_transient(expected_dash)
        self.assert_dash_equals(imported_dash,
                                expected_dash,
                                check_position=False,
                                check_slugs=False)
        i_e_slc = self.get_slice_by_name("e_slc")
        i_b_slc = self.get_slice_by_name("b_slc")
        expected_json_metadata = {
            "remote_id": 10003,
            "import_time": 1991,
            "filter_scopes": {
                str(i_e_slc.id): {
                    "region": {
                        "scope": ["ROOT_ID"],
                        "immune": [i_b_slc.id]
                    }
                }
            },
            "expanded_slices": {
                "{}".format(i_e_slc.id): True,
                "{}".format(i_b_slc.id): False,
            },
        }
        self.assertEqual(expected_json_metadata,
                         json.loads(imported_dash.json_metadata))
Beispiel #45
0
def test_predictor_retrieve(predict_setup_args):
    """Test the predictions retrieved from the database match the output from predict_proba"""
    (project_storage, db_engine, model_id) = predict_setup_args
    predictor = Predictor(project_storage.model_storage_engine(),
                          db_engine,
                          'worst',
                          replace=False)

    # create prediction set
    matrix_store = get_matrix_store(project_storage)

    predict_proba = predictor.predict(
        model_id,
        matrix_store,
        misc_db_parameters=dict(),
        train_matrix_columns=matrix_store.columns())

    # When run again, the predictions retrieved from the database
    # should match.
    #
    # Some trickiness here. Let's explain:
    #
    # If we are not careful, retrieving predictions from the database and
    # presenting them as a numpy array can result in a bad ordering,
    # since the given matrix may not be 'ordered' by some criteria
    # that can be easily represented by an ORDER BY clause.
    #
    # It will sometimes work, because without ORDER BY you will get
    # it back in the table's physical order, which unless something has
    # happened to the table will be the order you inserted it,
    # which could very well be the order in the matrix.
    # So it's not a bug that would necessarily immediately show itself,
    # but when it does go wrong your scores will be garbage.
    #
    # So we simulate a table order mutation that can happen over time:
    # Remove the first row and put it at the end.
    # If the Predictor doesn't explicitly reorder the results, this will fail
    # Only running on TestPrediction because TrainPrediction behaves the exact same way
    try:
        reorder_session = sessionmaker(bind=db_engine)()
        obj = reorder_session.query(TestPrediction).first()
        reorder_session.delete(obj)
        reorder_session.commit()
    finally:
        reorder_session.close()

    make_transient(obj)
    try:
        reorder_session = sessionmaker(bind=db_engine)()
        reorder_session.add(obj)
        reorder_session.commit()
    finally:
        reorder_session.close()

    predictor.load_model = Mock()
    new_predict_proba = predictor.predict(
        model_id,
        matrix_store,
        misc_db_parameters=dict(),
        train_matrix_columns=matrix_store.columns())
    assert_array_almost_equal(new_predict_proba, predict_proba, decimal=5)
    assert not predictor.load_model.called
Beispiel #46
0
    def _do_update(self, session, obj, existing_obj):
        """Updates the associated ParticipantSummary, and extracts HPO ID from the provider link
      or set pairing at another level (site/organization/awardee) with parent/child enforcement."""
        obj.lastModified = clock.CLOCK.now()
        obj.signUpTime = existing_obj.signUpTime
        obj.biobankId = existing_obj.biobankId
        obj.withdrawalTime = existing_obj.withdrawalTime
        obj.suspensionTime = existing_obj.suspensionTime

        need_new_summary = False
        if obj.withdrawalStatus != existing_obj.withdrawalStatus:
            obj.withdrawalTime = (obj.lastModified if obj.withdrawalStatus
                                  == WithdrawalStatus.NO_USE else None)

            need_new_summary = True

        if obj.suspensionStatus != existing_obj.suspensionStatus:
            obj.suspensionTime = (obj.lastModified if obj.suspensionStatus
                                  == SuspensionStatus.NO_CONTACT else None)
            need_new_summary = True
        update_pairing = True

        if obj.siteId is None and obj.organizationId is None and obj.hpoId is None and \
          obj.providerLink == 'null':
            # Prevent unpairing if /PUT is sent with no pairing levels.
            update_pairing = False

        if update_pairing is True:
            has_id = False
            if obj.organizationId or obj.siteId or (obj.hpoId >= 0):
                has_id = True

            provider_link_unchanged = True
            if obj.providerLink is not None:
                if existing_obj.providerLink:
                    provider_link_unchanged = json.loads(obj.providerLink) == \
                                              json.loads(existing_obj.providerLink)
                else:
                    provider_link_unchanged = False

            null_provider_link = obj.providerLink == 'null'
            # site,org,or awardee is sent in request: Get relationships and try to set provider link.
            if has_id and (provider_link_unchanged or null_provider_link):
                site, organization, awardee = self.get_pairing_level(obj)
                obj.organizationId = organization
                obj.siteId = site
                obj.hpoId = awardee
                if awardee is not None and (obj.hpoId != existing_obj.hpoId):
                    # get provider link for hpo_id (awardee)
                    obj.providerLink = make_primary_provider_link_for_id(
                        awardee)

                need_new_summary = True
            else:  # providerLink has changed
                # If the provider link changes, update the HPO ID on the participant and its summary.
                if obj.hpoId is None:
                    obj.hpoId = existing_obj.hpoId
                new_hpo_id = self._get_hpo_id(obj)
                if new_hpo_id != existing_obj.hpoId:
                    obj.hpoId = new_hpo_id
                    obj.siteId = None
                    obj.organizationId = None
                    need_new_summary = True

        # No pairing updates sent, keep existing values.
        if update_pairing == False:
            obj.siteId = existing_obj.siteId
            obj.organizationId = existing_obj.organizationId
            obj.hpoId = existing_obj.hpoId
            obj.providerLink = existing_obj.providerLink

        if need_new_summary and existing_obj.participantSummary:
            # Copy the existing participant summary, and mutate the fields that
            # come from participant.
            summary = existing_obj.participantSummary
            summary.hpoId = obj.hpoId
            summary.organizationId = obj.organizationId
            summary.siteId = obj.siteId
            summary.withdrawalStatus = obj.withdrawalStatus
            summary.withdrawalReason = obj.withdrawalReason
            summary.withdrawalReasonJustification = obj.withdrawalReasonJustification
            summary.withdrawalTime = obj.withdrawalTime
            summary.suspensionStatus = obj.suspensionStatus
            summary.suspensionTime = obj.suspensionTime
            summary.lastModified = clock.CLOCK.now()
            make_transient(summary)
            make_transient(obj)
            obj.participantSummary = summary
        self._update_history(session, obj, existing_obj)
        super(ParticipantDao, self)._do_update(session, obj, existing_obj)
Beispiel #47
0
def import_datasource(  # pylint: disable=too-many-arguments
    session: Session,
    i_datasource: Model,
    lookup_database: Callable[[Model], Optional[Model]],
    lookup_datasource: Callable[[Model], Optional[Model]],
    import_time: Optional[int] = None,
    database_id: Optional[int] = None,
) -> int:
    """Imports the datasource from the object to the database.

    Metrics and columns and datasource will be overrided if exists.
    This function can be used to import/export datasources between multiple
    superset instances. Audit metadata isn't copies over.
    """
    make_transient(i_datasource)
    logger.info("Started import of the datasource: %s", i_datasource.to_json())

    i_datasource.id = None
    i_datasource.database_id = (database_id if database_id else getattr(
        lookup_database(i_datasource), "id", None))
    i_datasource.alter_params(import_time=import_time)

    # override the datasource
    datasource = lookup_datasource(i_datasource)

    if datasource:
        datasource.override(i_datasource)
        session.flush()
    else:
        datasource = i_datasource.copy()
        session.add(datasource)
        session.flush()

    for metric in i_datasource.metrics:
        new_m = metric.copy()
        new_m.table_id = datasource.id
        logger.info(
            "Importing metric %s from the datasource: %s",
            new_m.to_json(),
            i_datasource.full_name,
        )
        imported_m = import_metric(session, new_m)
        if imported_m.metric_name not in [
                m.metric_name for m in datasource.metrics
        ]:
            datasource.metrics.append(imported_m)

    for column in i_datasource.columns:
        new_c = column.copy()
        new_c.table_id = datasource.id
        logger.info(
            "Importing column %s from the datasource: %s",
            new_c.to_json(),
            i_datasource.full_name,
        )
        imported_c = import_column(session, new_c)
        if imported_c.column_name not in [
                c.column_name for c in datasource.columns
        ]:
            datasource.columns.append(imported_c)
    session.flush()
    return datasource.id
Beispiel #48
0
def clone_model(db: Session, model, **kwargs):
    db.expunge(model)
    make_transient(model)
    for (field, value) in kwargs.items():
        setattr(model, field, value)
    return model
Beispiel #49
0
    def get_plain(cls, name):
        with new_session() as session:
            project = session.query(Project).filter_by(name=name).first()

            make_transient(project)
            return project
Beispiel #50
0
def test_dup_db_entry_refresh(setup_temp_appliance_provider,
                              temp_appliance_preconfig, provider):
    """
    Polarion:
        assignee: juwatts
        caseimportance: critical
        casecomponent: Containers
        initialEstimate: 1/6h

    Bugzilla:
        1732442
        1749060
    """
    appliance = temp_appliance_preconfig

    image_table = appliance.db.client['container_groups']

    image_query = appliance.db.client.session.query(image_table)

    all_db_entries = image_query.all()

    if not all_db_entries:
        pytest.fail("No Entries in the containter_groups DB table")

    # Grab the first entry in the table
    db_entry = all_db_entries[0]

    copied_db_entry = deepcopy(db_entry)

    # Remove the object from the session
    appliance.db.client.session.expunge(db_entry)

    make_transient(db_entry)

    # ID is the primary key, set it to something high
    db_entry_last = all_db_entries[-1]
    copied_db_entry.id = db_entry_last.id + 500

    try:
        with appliance.db.client.transaction:
            appliance.db.client.session.add(copied_db_entry)
    except IntegrityError as ex:
        pytest.fail('Exception while adding DB entry. {}'.format(ex))

    new_db_entry = image_query.filter(
        image_table.id == copied_db_entry.id).all()

    # Should only be one entry
    assert len(new_db_entry) == 1

    for column, value in vars(new_db_entry[0]).items():
        # _sa_instance_state is an sqlalchemy InstanceState key object
        if column == "_sa_instance_state":
            continue
        elif column == "id":
            assert value != getattr(db_entry, column)
        else:
            # Verify the entries in the DB are the same
            assert value == getattr(db_entry, column)

    with LogValidator(
            '/var/www/miq/vmdb/log/evm.log',
            failure_patterns=['.*nil:NilClass.*'],
    ).waiting(timeout=600):
        provider.refresh_provider_relationships()
        wait_for(provider.is_refreshed,
                 func_kwargs=dict(refresh_delta=10),
                 timeout=600)
Beispiel #51
0
    def test_get_reusable_module_use_latest_build(self, cfg, allow_ocbm):
        """
        Test that the `get_reusable_module` tries to reuse the latest module in case when
        multiple modules can be reused allow_only_compatible_base_modules is True.
        """
        cfg.return_value = allow_ocbm
        # Set "fedora" virtual stream to platform:f28.
        platform_f28 = db_session.query(models.ModuleBuild).filter_by(name="platform").one()
        mmd = platform_f28.mmd()
        xmd = mmd.get_xmd()
        xmd["mbs"]["virtual_streams"] = ["fedora"]
        mmd.set_xmd(xmd)
        platform_f28.modulemd = mmd_to_str(mmd)
        platform_f28.update_virtual_streams(db_session, ["fedora"])

        # Create platform:f29 with "fedora" virtual stream.
        mmd = load_mmd(read_staged_data("platform"))
        mmd = mmd.copy("platform", "f29")
        xmd = mmd.get_xmd()
        xmd["mbs"]["virtual_streams"] = ["fedora"]
        mmd.set_xmd(xmd)
        platform_f29 = import_mmd(db_session, mmd)[0]

        # Create another copy of `testmodule:master` which should be reused, because its
        # stream version will be higher than the previous one. Also set its buildrequires
        # to platform:f29.
        latest_module = db_session.query(models.ModuleBuild).filter_by(
            name="testmodule", state=models.BUILD_STATES["ready"]).one()
        # This is used to clone the ModuleBuild SQLAlchemy object without recreating it from
        # scratch.
        db_session.expunge(latest_module)
        make_transient(latest_module)

        # Change the platform:f28 buildrequirement to platform:f29 and recompute the build_context.
        mmd = latest_module.mmd()
        xmd = mmd.get_xmd()
        xmd["mbs"]["buildrequires"]["platform"]["stream"] = "f29"
        mmd.set_xmd(xmd)
        latest_module.modulemd = mmd_to_str(mmd)
        contexts = models.ModuleBuild.contexts_from_mmd(
            latest_module.modulemd
        )
        latest_module.build_context = contexts.build_context
        latest_module.context = contexts.context
        latest_module.buildrequires = [platform_f29]

        # Set the `id` to None, so new one is generated by SQLAlchemy.
        latest_module.id = None
        db_session.add(latest_module)
        db_session.commit()

        module = db_session.query(models.ModuleBuild)\
                           .filter_by(name="testmodule")\
                           .filter_by(state=models.BUILD_STATES["build"])\
                           .one()
        db_session.commit()

        reusable_module = get_reusable_module(module)

        if allow_ocbm:
            assert reusable_module.id == latest_module.id
        else:
            # There are two testmodules in ready state, the first one with
            # lower id is what we want.
            first_module = db_session.query(models.ModuleBuild).filter_by(
                name="testmodule", state=models.BUILD_STATES["ready"]
            ).order_by(models.ModuleBuild.id).first()

            assert reusable_module.id == first_module.id