def run(cls, env):
        return  # disabled by request of Sergey

        db_session = DBSession()
        transaction.manager.begin()

        LogEntry.info('InternalUpdateReactor started!', component=COMP_ID, group=InternalUpdateReactor.identity, append_dt=datetime.now())

        fs_resources = db_session.query(FoclStruct).all()

        for fs in fs_resources:

            # get const obj
            try:
                const_obj = db_session.query(ConstructObject).filter(ConstructObject.resource_id == fs.id).one()
            except:
                LogEntry.info('Error on update const obj with id = ' + str(fs.id),
                              component=COMP_ID, group=InternalUpdateReactor.identity, append_dt=datetime.now())
                continue

            # update from project vector layers
            const_obj.fosc_plan = cls.get_feat_count_for_layer(fs, 'fosc')
            const_obj.cross_plan = cls.get_feat_count_for_layer(fs, 'optical_cross')
            const_obj.spec_trans_plan = cls.get_feat_count_for_layer(fs, 'special_transition')


        db_session.flush()
        LogEntry.info('InternalUpdateReactor finished!', component=COMP_ID, group=InternalUpdateReactor.identity, append_dt=datetime.now())

        transaction.manager.commit()
예제 #2
0
    def load_domain_dicts(cls, force=False, federal=True, region=True, district=True):
        print 'Loading domain dicts...'
        from ..compulink_admin.model import Region, District
        from csv import DictReader


        if (region and not federal) or (district and not region):
            print('Not consist params!')
            return

        db_session = DBSession()
        db_session.autoflush = False
        with transaction.manager:

            if ((db_session.query(FederalDistrict).count() > 0 and federal) or
               (db_session.query(Region).count() > 0 and region) or
               (db_session.query(District).count() > 0 and district)) and not force:
                print '     Domain dictionary already existings! Returning...'
                return

            with open(path.join(BASE_PATH, 'federal_districts.csv')) as fed_csv, \
                 open(path.join(BASE_PATH, 'regions.csv')) as reg_csv, \
                 open(path.join(BASE_PATH, 'districts.csv')) as dist_csv:

                fed_reader = DictReader(fed_csv)
                reg_reader = DictReader(reg_csv)
                dist_reader = DictReader(dist_csv)


                feds = {}
                if federal:
                    for fed_row in fed_reader:
                        federal_dist = FederalDistrict()
                        federal_dist.name = fed_row['name']
                        federal_dist.short_name = fed_row['short_name']
                        federal_dist.persist()
                        feds[fed_row['id']] = federal_dist


                regs = {}
                if region:
                    for reg_row in reg_reader:
                        region = Region()
                        region.name = reg_row['name']
                        region.short_name = reg_row['short_name']
                        region.region_code = reg_row['region_code']
                        region.persist()
                        regs[reg_row['id']] = region

                if district:
                    for dist_row in dist_reader:
                        district = District()
                        district.name = dist_row['name']
                        district.short_name = dist_row['short_name']
                        district.region = regs[dist_row['region_id']]

                        district.persist()


            db_session.flush()
    def run(cls, env):

        ngw_session = NgwSession()
        ms_session = MsSqlSession()

        transaction.manager.begin()

        LogEntry.info('ExternalUpdateReactor started!', component=COMP_ID, group=ExternalUpdateReactor.identity, append_dt=datetime.now())

        # get mssql conn info
        enabled_sett = env.compulink_mssql_bridge.settings.get('enable', 'false').lower()
        mssql_enable = enabled_sett in ('true', 'yes', '1')

        if not mssql_enable:
            LogEntry.info('MSSQL disabled in config! Returning...', component=COMP_ID, group=ExternalUpdateReactor.identity, append_dt=datetime.now())
            return

        # get all external ids
        fs_external_ids = ngw_session.query(ConstructObject.external_id).filter(ConstructObject.external_id != None, ConstructObject.external_id != '').all()
        fs_external_ids = [r for (r,) in fs_external_ids]

        # get info from mssql
        CompulinkMssqlBridgeComponent.configure_db_conn(env.compulink_mssql_bridge.settings.get('conn_str', 'no'))
        ms_rows = ms_session.query(MssqConstObject)\
            .filter(MssqConstObject.ObjectID.in_(fs_external_ids))\
            .options(joinedload_all(MssqConstObject.Work3), joinedload_all(MssqConstObject.Work4)).all()

        for ms_row in ms_rows:
            # get const obj
            try:
                const_obj = ngw_session.query(ConstructObject).filter(ConstructObject.external_id == str(ms_row.ObjectID)).one()
            except:
                LogEntry.info('Error on update const obj with ext id = ' + str(ms_row.ObjectID),
                              component=COMP_ID, group=ExternalUpdateReactor.identity, append_dt=datetime.now())
                continue

            # update from mssql
            const_obj.cabling_plan = ms_row.PreliminaryLineLength     #new requ TODO: check!
            const_obj.access_point_plan = ms_row.AccessPointAmount              #new requ
            const_obj.subcontr_name = ms_row.Work3.SubContractor.ContractorName if ms_row.Work3 and ms_row.Work3.SubContractor else None
            const_obj.start_build_date = ms_row.Work3.AgreementStartDateWork if ms_row.Work3 else None
            const_obj.end_build_date = ms_row.Work3.AgreementFinishDateWork if ms_row.Work3 else None
            const_obj.start_deliver_date = ms_row.Work4.AgreementStartDateWork if ms_row.Work4 else None
            const_obj.end_deliver_date = ms_row.Work4.AgreementFinishDateWork if ms_row.Work4 else None

        ngw_session.flush()
        LogEntry.info('ExternalUpdateReactor finished!', component=COMP_ID, group=ExternalUpdateReactor.identity, append_dt=datetime.now())

        transaction.manager.commit()
def copy_existing_real_features(args):
    db_session = DBSession()

    transaction.manager.begin()

    fs_resources = db_session.query(FoclStruct).all()

    for focl_struct in fs_resources:

        layers = focl_struct.children
        real_layer = None
        actual_layer = None

        for real_layer_name in FOCL_REAL_LAYER_STRUCT:
            # get real layer and actual layer
            for lyr in layers:
                if lyr.keyname:
                    lyr_name = '_'.join(lyr.keyname.rsplit('_')[0:-1])
                else:
                    continue

                if real_layer_name == lyr_name:
                    real_layer = lyr

                if 'actual_' + real_layer_name == lyr_name:
                    actual_layer = lyr

            if not real_layer or not actual_layer:
                print 'Ops!!!!!!!!!!!!!!!! Needed layers not found!'
                return

            try:
                # clean actual layer
                actual_layer.feature_delete_all()

                # copy data from real layer
                query = real_layer.feature_query()
                query.geom()

                for feat in query():
                    feat.fields['change_author'] = u'Мобильное приложение'
                    feat.fields['change_date'] = feat.fields['built_date']
                    actual_layer.feature_create(feat)

                print "Layers %s was updated!" % actual_layer.keyname

            except Exception, ex:
                print "Error on update %s: %s" % (actual_layer.keyname, ex.message)
            db_session.flush()
    def run(cls, env):

        db_session = DBSession()
        transaction.manager.begin()

        cls.log_info('ConstructSpecTransitionLineReactor started!')

        fs_resources = db_session.query(FoclStruct).all()
        for fs in fs_resources:
            cls.smart_construct_line(fs)
            db_session.flush()

        cls.log_info('ConstructSpecTransitionLineReactor finished!')

        transaction.manager.commit()
예제 #6
0
    def drop_vector_layer_table(table_uid, make_transaction=False):
        db_session = DBSession()

        #start transaction
        if make_transaction:
            transaction.manager.begin()

        engine = db_session.get_bind()
        engine.execute('DROP TABLE "vector_layer"."layer_%s"' % table_uid)

        #close transaction
        if make_transaction:
            transaction.manager.commit()
        else:
            db_session.flush()
예제 #7
0
    def load_rt_domain_dicts(cls, force=False):
        print 'Loading RT domain dicts...'
        from ..compulink_admin.model import Region
        from ..compulink_reporting.model import RtMacroDivision, RtBranch, RtBranchRegion
        from csv import DictReader

        db_session = DBSession()
        db_session.autoflush = False

        if (db_session.query(RtMacroDivision).count() > 0 or
            db_session.query(RtBranch).count() > 0 or
            db_session.query(RtBranchRegion).count() > 0
            ) and not force:
            print '     RT Domain dictionary already existings! Returning...'
            return

        with open(path.join(BASE_PATH, 'rt_macro_division.csv')) as macro_csv, \
            open(path.join(BASE_PATH, 'rt_branch.csv')) as branch_csv, \
            open(path.join(BASE_PATH, 'rt_branch_region.csv')) as branch_region_csv:

            macro_reader = DictReader(macro_csv)
            branch_reader = DictReader(branch_csv)
            branch_region_reader = DictReader(branch_region_csv)


            macros = {}
            branches = {}
            for macro_row in macro_reader:
                macro = RtMacroDivision()
                macro.name = macro_row['name']
                macro.persist()
                macros[macro_row['id']] = macro

            for branch_row in branch_reader:
                branch = RtBranch()
                branch.name = branch_row['name']
                branch.rt_macro_division = macros[branch_row['macro_division_id']]
                branch.persist()
                branches[branch_row['id']] = branch

            for br_reg_row in branch_region_reader:
                branch_reg = RtBranchRegion()
                branch_reg.rt_branch = branches[br_reg_row['rt_branch_id']]
                branch_reg.region = db_session.query(Region).filter(Region.region_code == br_reg_row['region_code']).one()
                branch_reg.persist()

        db_session.flush()
    def run(cls, env):

        db_session = DBSession()
        transaction.manager.begin()

        cls.log_info('ConstructFoclLineReactor started! (%s)' % CONNECTOR_NAME)

        fs_resources = db_session.query(FoclStruct).all()
        for fs in fs_resources:
            try:
                cls.smart_construct_line(fs)
                db_session.flush()
            except Exception as ex:
                print 'Error on construct line %s: %s' % (fs.id, ex.message)
                cls.log_error('Error on construct line %s: %s' % (fs.id, ex.message))

        cls.log_info('ConstructFoclLineReactor finished!')
        transaction.manager.commit()
예제 #9
0
def remove_video_entry(video_id):
    db_session = DBSession()
    transaction.manager.begin()

    task = VideoProduceTask.filter(VideoProduceTask.id == video_id).first()
    if task:
        fileobjs = FileObj.filter(FileObj.id == task.fileobj_id, FileObj.component == COMP_ID)
        for fileobj in fileobjs:
            fn = env.file_storage.filename(fileobj)
            db_session.delete(fileobj)
            try:
                remove(fn)
            except:
                pass

        db_session.delete(task)
    db_session.flush()
    transaction.manager.commit()

    return success_response()
def fill_guid_field(args):
    db_session = DBSession()

    transaction.manager.begin()

    resources = db_session.query(VectorLayer).options(joinedload_all('fields')).filter(VectorLayer.keyname.like('real_%')).all()

    for vec_layer in resources:
        try:
            query = vec_layer.feature_query()
            query.geom()

            for feat in query():
                if not feat.fields['feat_guid']:
                    feat.fields['feat_guid'] = str(uuid.uuid4().hex)
                    vec_layer.feature_put(feat)

            print "GUIDs of %s was updated!" % vec_layer.keyname

        except Exception, ex:
            print "Error on update GUIDs %s: %s" % (vec_layer.keyname, ex.message)
        db_session.flush()
예제 #11
0
    def change_field_display_name(resource, field_keyname, new_displayname, make_transaction=False):

        if not isinstance(resource, VectorLayer):
            raise Exception('Unsupported resource type!')

        if field_keyname not in [field.keyname for field in resource.fields]:
            raise Exception('Field does not exists in the table!')

        #start transaction
        if make_transaction:
            transaction.manager.begin()

        for field in resource.fields:
            if field.keyname == field_keyname:
                field.display_name = new_displayname

        #close transaction
        if make_transaction:
            transaction.manager.commit()
        else:
            db_session = DBSession()
            db_session.flush()
예제 #12
0
    def change_field_datatype(resource, field_keyname, new_field_type, make_transaction=False):

        if not isinstance(resource, VectorLayer):
            raise Exception('Unsupported resource type!')

        if new_field_type not in FIELD_TYPE.enum:
            raise Exception('Unsupported field type!')

        target_field = filter(lambda field: field.keyname == field_keyname, resource.fields)

        if not target_field:
            raise Exception('Field not found in the table!')
        else:
            target_field = target_field[0]

        if new_field_type == target_field.datatype:
            print 'Field already has such type!'
            return

        #start transaction
        if make_transaction:
            transaction.manager.begin()

        # change data type physic
        VectorLayerUpdater.__change_column_datatype(resource.tbl_uuid, target_field.fld_uuid, _FIELD_TYPE_2_DB[new_field_type])

        # set new type in field registry
        target_field.datatype = new_field_type
        target_field.persist()


        #close transaction
        if make_transaction:
            transaction.manager.commit()
        else:
            db_session = DBSession()
            db_session.flush()
예제 #13
0
    def append_field(resource, field_keyname, field_type, field_display_name, field_grid_vis=True, make_transaction=False):

        if not isinstance(resource, VectorLayer):
            raise Exception('Unsupported resource type!')

        if field_type not in FIELD_TYPE.enum:
            raise Exception('Unsupported field type!')

        if field_keyname in [field.keyname for field in resource.fields]:
            raise Exception('Field already exists in the table!')

        #start transaction
        if make_transaction:
            transaction.manager.begin()

        #create uuid for field
        uid = str(uuid.uuid4().hex)

        #create column
        VectorLayerUpdater.__create_column(resource.tbl_uuid, uid, _FIELD_TYPE_2_DB[field_type])

        #add field to register
        vfl = VectorLayerField()
        vfl.keyname = field_keyname
        vfl.datatype = field_type
        vfl.display_name = field_display_name
        vfl.grid_visibility = field_grid_vis
        vfl.fld_uuid = uid

        resource.fields.append(vfl)

        #close transaction
        if make_transaction:
            transaction.manager.commit()
        else:
            db_session = DBSession()
            db_session.flush()
예제 #14
0
def init_calendar():
    print('Fill calendar...')

    db_session = DBSession()
    db_session.autoflush = False

    count = db_session.query(Calendar).count()
    if count != 0:
        print '     Calendar is not empty! Returning...'
        return

    start_date = date(2014, 1, 1)
    max_date = date(2025, 1, 1)

    active_date = start_date

    quarter_names = {
        1: u'1 кв.',
        2: u'2 кв.',
        3: u'3 кв.',
        4: u'4 кв.',
    }

    month_names = {
        1: u'Январь',
        2: u'Февраль',
        3: u'Март',
        4: u'Апрель',
        5: u'Май',
        6: u'Июнь',
        7: u'Июль',
        8: u'Август',
        9: u'Сентябрь',
        10: u'Октябрь',
        11: u'Ноябрь',
        12: u'Декабрь',
    }

    week_day_names = {
        1: u'Понедельник',
        2: u'Вторник',
        3: u'Среда',
        4: u'Четверг',
        5: u'Пятница',
        6: u'Суббота',
        7: u'Воскресенье',
    }

    week_day_short_names = {
        1: u'Пн',
        2: u'Вт',
        3: u'Ср',
        4: u'Чт',
        5: u'Пт',
        6: u'Сб',
        7: u'Вс',
    }

    relat_day = relativedelta.relativedelta(days=+1)

    while active_date < max_date:
        cal = Calendar()
        cal.id = active_date.year * 10000 + active_date.month * 100 + active_date.day
        cal.full_date = active_date
        cal.year_number = active_date.year
        cal.semester_number = 1 if active_date.month < 7 else 2
        cal.semester_name = u'1 полугодие'  if active_date.month<7 else u'2 полугодие'
        cal.quarter_number = (active_date.month-1)//3 + 1
        cal.quarter_name = quarter_names[cal.quarter_number]
        cal.month_number = active_date.month
        cal.month_name = month_names[active_date.month]
        cal.year_week_number = active_date.isocalendar()[1]
        cal.month_week_number = get_week_of_month(active_date)
        cal.month_decade_number = (active_date.day < 11) * 1 + \
                                  (11 <= active_date.day <= 20) * 2 + \
                                  (active_date.day > 20) * 3
        cal.year_day_number = active_date.timetuple().tm_yday
        cal.month_day_number = active_date.day
        cal.week_day_number = active_date.weekday() + 1
        cal.week_day_name = week_day_names[cal.week_day_number]
        cal.week_day_short_name = week_day_short_names[cal.week_day_number]
        cal.weekend = cal.week_day_number > 5

        cal.persist()
        active_date = active_date + relat_day

    db_session.flush()
    def run(cls):
        db_session = NgwSession()
        transaction.manager.begin()

        # clear all data from tables
        for table in [BuiltFosc, BuiltCable, BuiltOpticalCross, BuiltAccessPoint, BuiltSpecTransition]:
            db_session.query(table).delete()
        db_session.flush()

        # get dicts
        fosc_types = {x.type: x for x in db_session.query(FoscType).all()}
        cable_laying_methods = {x.method: x for x in db_session.query(CableLayingMethod).all()}
        optical_cross_types = {x.type: x for x in db_session.query(OpticalCrossType).all()}
        ap_types = {x.type: x for x in db_session.query(AccessPointType).all()}
        spec_laying_methods = {x.method: x for x in db_session.query(SpecLayingMethod).all()}


        # get all focls
        fs_resources = db_session.query(FoclStruct).all()

        for fs in fs_resources:

            # fosc handler
            fact_lyr = cls.get_layer_by_type(fs, 'actual_real_fosc')
            if fact_lyr:
                # get all rows and aggregate
                fosc_values = []
                query = fact_lyr.feature_query()
                for feat in query():
                    if feat.fields['built_date']:
                        fosc_values.append((feat.fields['built_date'].date(), feat.fields['type_fosc']))

                fosc_aggregation = {k: len(list(g)) for k, g in itertools.groupby(sorted(fosc_values))}

                # save to table
                for (build_dt, fs_type), count in fosc_aggregation.iteritems():
                    row = BuiltFosc()
                    row.resource_id = fs.id
                    row.fosc_count = count

                    if fs_type:
                        if fs_type not in fosc_types.keys():
                            ot = FoscType()
                            ot.type = fs_type
                            ot.persist()
                            fosc_types[fs_type] = ot
                        row.fosc_type = fosc_types[fs_type]

                    row.build_date = db_session.query(Calendar).filter(Calendar.full_date==build_dt).one()
                    row.persist()

                db_session.flush()


            # cross handler
            fact_lyr = cls.get_layer_by_type(fs, 'actual_real_optical_cross')
            if fact_lyr:
                # get all rows and aggregate
                cross_values = []
                query = fact_lyr.feature_query()
                for feat in query():
                    if feat.fields['built_date']:
                        cross_values.append((feat.fields['built_date'].date(), feat.fields['type_optical_cross']))

                cross_aggregation = {k: len(list(g)) for k, g in itertools.groupby(sorted(cross_values))}

                # save to table
                for (build_dt, cross_type), count in cross_aggregation.iteritems():
                    row = BuiltOpticalCross()
                    row.resource_id = fs.id
                    row.optical_cross_count = count

                    if cross_type:
                        if cross_type not in optical_cross_types.keys():
                            ot = OpticalCrossType()
                            ot.type = cross_type
                            ot.persist()
                            optical_cross_types[cross_type] = ot
                        row.optical_cross_type = optical_cross_types[cross_type]

                    row.build_date = db_session.query(Calendar).filter(Calendar.full_date==build_dt).one()
                    row.persist()

                db_session.flush()

            # ap handler
            fact_lyr = cls.get_layer_by_type(fs, 'actual_real_access_point')
            if fact_lyr:
                # get all rows and aggregate
                ap_values = []
                query = fact_lyr.feature_query()
                for feat in query():
                    if feat.fields['built_date']:
                        ap_values.append((feat.fields['built_date'].date(), None))

                ap_aggregation = {k: len(list(g)) for k, g in itertools.groupby(sorted(ap_values))}

                # save to table
                for (build_dt, ap_type), count in ap_aggregation.iteritems():
                    row = BuiltAccessPoint()
                    row.resource_id = fs.id
                    row.access_point_count = count

                    if ap_type:
                        if ap_type not in ap_types.keys():
                            apt = AccessPointType()
                            apt.type = ap_type
                            apt.persist()
                            ap_types[ap_type] = apt
                        row.access_point_type = ap_types[ap_type]

                    row.build_date = db_session.query(Calendar).filter(Calendar.full_date==build_dt).one()
                    row.persist()

                db_session.flush()

            # cabling handler
            fact_lyr = cls.get_layer_by_type(fs, 'actual_real_optical_cable')
            if fact_lyr:
                # get all rows and aggregate
                cable_values = []
                query = fact_lyr.feature_query()
                query.geom_length()
                for feat in query():
                    if feat.fields['built_date']:
                        cable_values.append((feat.fields['built_date'].date(), feat.fields['laying_method'], feat.calculations['geom_len']))

                cable_aggregation = {k: sum(x[2] for x in list(g)) for k, g in itertools.groupby(sorted(cable_values), key=lambda x: (x[0], x[1]))}

                # save to table
                for (build_dt, lay_meth), cable_len in cable_aggregation.iteritems():
                    row = BuiltCable()
                    row.resource_id = fs.id
                    row.cable_length = round(cable_len/1000.0, 3) if cable_len else 0

                    if lay_meth:
                        if lay_meth not in cable_laying_methods.keys():
                            lm = CableLayingMethod()
                            lm.method = lay_meth
                            lm.persist()
                            cable_laying_methods[lay_meth] = lm
                        row.laying_method = cable_laying_methods[lay_meth]

                    row.build_date = db_session.query(Calendar).filter(Calendar.full_date==build_dt).one()
                    row.persist()

                db_session.flush()

            # spec trans handler
            fact_lyr = cls.get_layer_by_type(fs, 'actual_real_special_transition')
            if fact_lyr:
                # get all rows and aggregate
                spec_trans_values = []
                query = fact_lyr.feature_query()
                query.geom_length()
                for feat in query():
                    if feat.fields['built_date']:
                        spec_trans_values.append((feat.fields['built_date'].date(), feat.fields['special_laying_method'], feat.calculations['geom_len']))

                spec_trans_aggregation = {k: list(g) for k, g in itertools.groupby(sorted(spec_trans_values), key=lambda x: (x[0], x[1]))}

                # save to table
                for (build_dt, spec_lay_meth), specs in spec_trans_aggregation.iteritems():
                    row = BuiltSpecTransition()
                    row.resource_id = fs.id
                    length = sum(x[2] for x in specs)
                    row.spec_trans_length = round(length/1000.0, 3) if length else 0
                    row.spec_trans_count = len(specs)

                    if spec_lay_meth:
                        if spec_lay_meth not in spec_laying_methods.keys():
                            slm = SpecLayingMethod()
                            slm.method = spec_lay_meth
                            slm.persist()
                            spec_laying_methods[spec_lay_meth] = slm
                        row.spec_laying_method = spec_laying_methods[spec_lay_meth]

                    row.build_date = db_session.query(Calendar).filter(Calendar.full_date==build_dt).one()
                    row.persist()

                db_session.flush()

        transaction.manager.commit()