def joinAll(validInputs, params, context, feedback): outp = QgsProcessing.TEMPORARY_OUTPUT totalValidInputs = len(validInputs) step = 0 steps = totalValidInputs feedback = QgsProcessingMultiStepFeedback(steps, feedback) i = 0 for k, v in validInputs.items(): i = i + 1 # if i == totalValidInputs: # outp = params['OUTPUT'] feedback.pushConsoleInfo(str(i)) val = validInputs[k] layer = str(val[0]) attr = str(val[1]) if i == 1: result = layer else: step = step + 1 feedback.setCurrentStep(step) result = joinAttrByLocation(result, layer, attr, [IGUALA], UNDISCARD_NONMATCHING, context, feedback) result = result['OUTPUT'] return result
def lineal(params, context, feedback): steps = 0 totalStpes = 1 pointA = params['A'] pointB = params['B'] pointC = params['C'] pointD = params['D'] value = params['VALUE'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) #feedback.pushConsoleInfo(str(typeFunction)) steps = steps + 1 feedback.setCurrentStep(steps) formulaLineal = calculateLineal(value, pointA, pointB, pointC, pointD) feedback.pushConsoleInfo(str(formulaLineal)) fieldOputName = "n_" + value proximity2OpenSpace = calculateField(params['GRID'], fieldOputName, formulaLineal, context, feedback, params['OUTPUT']) return proximity2OpenSpace
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpaMan = params['DPA_MAN'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) pathCsvVivienda = params['CENSO_VIVIENDA'] file = pathCsvVivienda cols = [ 'I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I09', 'I10', 'V02', 'V04', 'V06' ] df = pd.read_csv(file, usecols=cols) # fix codes df['I01'] = df['I01'].astype(str) df['I02'] = df['I02'].astype(str) df['I03'] = df['I03'].astype(str) df['I04'] = df['I04'].astype(str) df['I05'] = df['I05'].astype(str) df['I06'] = df['I06'].astype(str) df['I09'] = df['I09'].astype(str) df['I10'] = df['I10'].astype(str) df.loc[df['I01'].str.len() < 2, 'I01'] = "0" + df['I01'] df.loc[df['I02'].str.len() < 2, 'I02'] = "0" + df['I02'] df.loc[df['I03'].str.len() < 2, 'I03'] = "0" + df['I03'] df.loc[df['I04'].str.len() == 1, 'I04'] = "00" + df['I04'] df.loc[df['I04'].str.len() == 2, 'I04'] = "0" + df['I04'] df.loc[df['I05'].str.len() == 1, 'I05'] = "00" + df['I05'] df.loc[df['I05'].str.len() == 2, 'I05'] = "0" + df['I05'] df.loc[df['I06'].str.len() < 2, 'I06'] = "0" + df['I06'] df.loc[df['I09'].str.len() == 1, 'I09'] = "00" + df['I09'] df.loc[df['I09'].str.len() == 2, 'I09'] = "0" + df['I09'] df.loc[df['I10'].str.len() < 2, 'I10'] = "0" + df['I10'] df['codman'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) #Para el cálculo se utilizan los datos de las cubiertas, # pareds y pisos en estado MALO según el Censo de pobalción y vivienda 2010. # V02: categoría 3 (techo). # V04: categoría 3 (paredes). # V06: categoría 3 (piso). df['pt'] = 1.0 df['vivcarencias'] = 0.0 df.loc[(df['V02'] == '3') & (df['V04'] == '3') & (df['V06'] == '3'), 'vivcarencias'] = 1.0 df['pt'] = df['pt'].astype(float) df['vivcarencias'] = df['vivcarencias'].astype(float) aggOptions = { 'codman': 'first', 'pt': 'count', 'vivcarencias': 'sum', } resManzanas = df.groupby('codman').agg(aggOptions) resManzanas['pobconcaren'] = None resManzanas['pobconcaren'] = (resManzanas['vivcarencias'] / resManzanas['pt']) * 100 df = resManzanas steps = steps + 1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH + '/pobconcaren.csv' feedback.pushConsoleInfo(str(('pobconcaren en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps + 1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if (exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps + 1 feedback.setCurrentStep(steps) steps = steps + 1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpaMan, outputCsv, 'codman', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) expressionNotNull = "pobconcaren IS NOT '' AND pobconcaren is NOT NULL" notNull = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'pobconcaren * 1.0' result = calculateField(notNull['OUTPUT'], 'pobconcaren_n', formulaDummy, context, feedback) # ----------------------CONVERTIR A NUMERICOS -------------------- steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'vivcarencias * 1.0' result = calculateField(result['OUTPUT'], 'vivcarencias_n', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'pt * 1.0' result = calculateField(result['OUTPUT'], 'pt_n', formulaDummy, context, feedback) # ----------------------PROPORCIONES AREA-------------------------- steps = steps + 1 feedback.setCurrentStep(steps) blocks = calculateArea(result['OUTPUT'], 'area_bloc', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], ['vivcarencias_n', 'pt_n', 'area_bloc'], ['id_grid', 'area_grid'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) # -------------------------PROPORCIONES VALORES------------------------- steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * vivcarencias_n' result = calculateField(segmentsArea['OUTPUT'], 'vivcarencias_n_seg', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * pt_n' result = calculateField(result['OUTPUT'], 'pt_n_seg', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) result = makeSureInside(result['OUTPUT'], context, feedback) #---------------------------------------------------------------------- steps = steps + 1 feedback.setCurrentStep(steps) result = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['vivcarencias_n_seg', 'pt_n_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = '(vivcarencias_n_seg_sum/pt_n_seg_sum) * 100' result = calculateField(result['OUTPUT'], NAMES_INDEX['ID02'][0], formulaDummy, context, feedback, params['OUTPUT']) # steps = steps+1 # feedback.setCurrentStep(steps) # gridNeto = joinByLocation(gridNeto['OUTPUT'], # result['OUTPUT'], # ['pobconcaren_n'], # [INTERSECTA], [MEDIA], # UNDISCARD_NONMATCHING, # context, # feedback) # fieldsMapping = [ # {'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4}, # {'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6}, # {'expression': '"acceso_inter_n_mean"', 'length': 20, 'name': NAMES_INDEX['ID02'][0], 'precision': 2, 'type': 6} # ] # steps = steps+1 # feedback.setCurrentStep(steps) # result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], # context, # feedback, params['OUTPUT']) return result
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpa = params['DPA_SECTOR'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps+1 feedback.setCurrentStep(steps) path = params['ENCUESTA'] file = path cols = ['CIUDAD', 'ZONA', 'SECTOR', 'VIVIENDA', 'HOGAR', 'I52'] df = pd.read_csv(file, usecols=cols) # fix codes df['CIUDAD'] = df['CIUDAD'].astype(str) df['ZONA'] = df['ZONA'].astype(str) df['SECTOR'] = df['SECTOR'].astype(str) df['VIVIENDA'] = df['VIVIENDA'].astype(str) df['HOGAR'] = df['HOGAR'].astype(str) df.loc[df['CIUDAD'].str.len() == 5, 'CIUDAD'] = "0" + df['CIUDAD'] df.loc[df['ZONA'].str.len() == 1, 'ZONA'] = "00" + df['ZONA'] df.loc[df['ZONA'].str.len() == 2, 'ZONA'] = "0" + df['ZONA'] df.loc[df['SECTOR'].str.len() == 1, 'SECTOR'] = "00" + df['SECTOR'] df.loc[df['SECTOR'].str.len() == 2, 'SECTOR'] = "0" + df['SECTOR'] df.loc[df['VIVIENDA'].str.len() == 1, 'VIVIENDA'] = "0" + df['VIVIENDA'] # I52, categorías 1 y 2 (muy inseguro e inseguro) df['pobinse'] = 0.0 df.loc[(df['I52'] == 'Inseguro') | (df['I52'] == 'Muy inseguro'), 'pobinse'] = 1.0 # codigo sector df['codsec'] = df['CIUDAD'].astype(str) + df['ZONA'].astype(str) + df['SECTOR'].astype(str) df['codzon'] = df['CIUDAD'].astype(str) + df['ZONA'].astype(str) df.rename(columns={'CIUDAD':'pbt'}, inplace=True) aggOptions = { 'codzon' : 'first', 'pbt' : 'count', 'pobinse' : 'sum', } resManzanas = df.groupby('codzon').agg(aggOptions) resManzanas['percepcionins'] = None resManzanas['percepcionins'] = (resManzanas['pobinse'] / resManzanas['pbt']) * 100 df = resManzanas steps = steps+1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH+'/percepcionins.csv' feedback.pushConsoleInfo(str(('percepcionins en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps+1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if(exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps+1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpa, outputCsv, 'codzon', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) # steps = steps+1 # feedback.setCurrentStep(steps) # expressionNotNull = "percepcionins IS NOT '' AND percepcionins is NOT NULL" # result = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) # ----------------------CONVERTIR A NUMERICOS -------------------- steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'pobinse * 1.0' result = calculateField(result['OUTPUT'], 'pobinse_n', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'pbt * 1.0' result = calculateField(result['OUTPUT'], 'pbt_n', formulaDummy, context, feedback) # ----------------------PROPORCIONES AREA-------------------------- steps = steps+1 feedback.setCurrentStep(steps) blocks = calculateArea(result['OUTPUT'], 'area_bloc', context, feedback) steps = steps+1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], ['pobinse_n','pbt_n','area_bloc'], ['id_grid','area_grid'], context, feedback) steps = steps+1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) # -------------------------PROPORCIONES VALORES------------------------- steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * pobinse_n' result = calculateField(segmentsArea['OUTPUT'], 'pobinse_n_seg', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * pbt_n' result = calculateField(result['OUTPUT'], 'pbt_n_seg', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) result = makeSureInside(result['OUTPUT'], context, feedback) #---------------------------------------------------------------------- steps = steps+1 feedback.setCurrentStep(steps) result = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['pobinse_n_seg','pbt_n_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(pobinse_n_seg_sum/pbt_n_seg_sum) * 100' result = calculateField(result['OUTPUT'], NAMES_INDEX['ID15'][0], formulaDummy, context, feedback, params['OUTPUT']) # steps = steps+1 # feedback.setCurrentStep(steps) # gridNeto = joinByLocation(gridNeto['OUTPUT'], # result['OUTPUT'], # ['pobinse_viv_n'], # [INTERSECTA], [MEDIA], # UNDISCARD_NONMATCHING, # context, # feedback) # fieldsMapping = [ # {'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4}, # {'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6}, # {'expression': '"tenencia_viv_n_mean"', 'length': 20, 'name': NAMES_INDEX['ID15'][0], 'precision': 2, 'type': 6} # ] # steps = steps+1 # feedback.setCurrentStep(steps) # result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], # context, # feedback, params['OUTPUT']) return result
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 31 # fieldPopulation = params['FIELD_POPULATION'] fieldHousing = params['FIELD_HOUSING'] DISTANCE_EDUCATION = 500 DISTANCE_HEALTH = 1200 DISTANCE_APPROVAL = 500 DISTANCE_SPORTS = 1000 DISTANCE_ADMIN_PUBLIC = 1000 MIN_FACILITIES = 5 OPERATOR_GE = 3 feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) """ ----------------------------------------------------------------- Calcular las facilidades ----------------------------------------------------------------- """ steps = steps + 1 feedback.setCurrentStep(steps) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) blocks = calculateArea(params['BLOCKS'], 'area_bloc', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], 'area_bloc;' + fieldHousing, 'id_grid', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaHousingSegments = '(area_seg/area_bloc) * ' + fieldHousing housingForSegments = calculateField(segmentsArea['OUTPUT'], 'hou_seg', formulaHousingSegments, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksWithId = calculateField(housingForSegments['OUTPUT'], 'id_block', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = createCentroids(blocksWithId['OUTPUT'], context, feedback) result = [] idxs = ['idxedu', 'idxhea', 'idxapp', 'idkspor', 'idxadmin'] if (params['DISTANCE_OPTIONS'] == 0): steps = steps + 1 feedback.setCurrentStep(steps) feedback.pushConsoleInfo(str(('Cálculo de áreas de servicio'))) layers = [ [params['EDUCATION'], STRATEGY_DISTANCE, DISTANCE_EDUCATION], [params['HEALTH'], STRATEGY_DISTANCE, DISTANCE_HEALTH], [params['APPROVAL'], STRATEGY_DISTANCE, DISTANCE_APPROVAL], [params['SPORTS'], STRATEGY_DISTANCE, DISTANCE_SPORTS], [ params['ADMIN_PUBLIC'], STRATEGY_DISTANCE, DISTANCE_ADMIN_PUBLIC ], ] serviceAreas = multiBufferIsocrono(params['ROADS'], layers, context, feedback) iidx = -1 for serviceArea in serviceAreas: iidx = iidx + 1 idx = idxs[iidx] steps = steps + 1 feedback.setCurrentStep(steps) serviceArea = calculateField(serviceArea, idx, '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = joinByLocation(centroidsBlocks['OUTPUT'], serviceArea['OUTPUT'], [idx], [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'coalesce(idxedu_count, 0) + coalesce(idxhea_count, 0) + coalesce(idxapp_count,0) + coalesce(idkspor_count, 0) + coalesce(idxadmin_count, 0)' facilitiesCover = calculateField(centroidsBlocks['OUTPUT'], 'facilities', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) facilitiesFullCover = filter(facilitiesCover['OUTPUT'], 'facilities', OPERATOR_GE, MIN_FACILITIES, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilitiesCover = joinByLocation(gridNeto['OUTPUT'], facilitiesCover['OUTPUT'], ['hou_seg', 'facilities'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) fieldsMapping = [{ 'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4 }, { 'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6 }, { 'expression': '"hou_seg_sum"', 'length': 20, 'name': 'ptotal', 'precision': 2, 'type': 6 }, { 'expression': '"facilities_sum"', 'length': 20, 'name': 'facilities', 'precision': 2, 'type': 6 }] steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilitiesCover = refactorFields( fieldsMapping, gridNetoFacilitiesCover['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilities = joinByLocation( gridNetoFacilitiesCover['OUTPUT'], facilitiesFullCover['OUTPUT'], ['hou_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(hou_seg_sum,0) / coalesce(ptotal,""))*100,"")' proximity2BasicU = calculateField(gridNetoFacilities['OUTPUT'], NAMES_INDEX['IA07'][0], formulaProximity, context, feedback, params['OUTPUT']) result = proximity2BasicU else: feedback.pushConsoleInfo(str(('Cálculo de buffer radial'))) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4Education = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_EDUCATION, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4Health = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_HEALTH, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4Approval = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_APPROVAL, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) BlockBuffer4Sports = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_SPORTS, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) BlockBuffer4Admin = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_ADMIN_PUBLIC, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) layerEducation = calculateField(params['EDUCATION'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerHealth = calculateField(params['HEALTH'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerApproval = calculateField(params['APPROVAL'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerSports = calculateField(params['SPORTS'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerAdmin = calculateField(params['ADMIN_PUBLIC'], 'idx', '$id', context, feedback, type=1) layerEducation = layerEducation['OUTPUT'] layerHealth = layerHealth['OUTPUT'] layerApproval = layerApproval['OUTPUT'] layerSports = layerSports['OUTPUT'] layerAdmin = layerAdmin['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counterEducation = joinByLocation(blockBuffer4Education['OUTPUT'], layerEducation, 'idx', [CONTIENE, INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterHealth = joinByLocation(blockBuffer4Health['OUTPUT'], layerHealth, 'idx', [CONTIENE, INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterApproval = joinByLocation(blockBuffer4Approval['OUTPUT'], layerApproval, 'idx', [CONTIENE, INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterSport = joinByLocation(BlockBuffer4Sports['OUTPUT'], layerSports, 'idx', [CONTIENE, INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterAdmin = joinByLocation(BlockBuffer4Admin['OUTPUT'], layerAdmin, 'idx', [CONTIENE, INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksWithId['OUTPUT'], 'id_block', counterEducation['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'edu_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterHealth['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'hea_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterApproval['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'app_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterSport['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'spo_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterAdmin['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'adm_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaFacilities = 'edu_idx_count * hea_idx_count * app_idx_count * spo_idx_count * adm_idx_count' blocksFacilities = calculateField(blocksJoined['OUTPUT'], 'facilities', formulaFacilities, context, feedback) """ ----------------------------------------------------------------- Calcular numero de viviendas por hexagano ----------------------------------------------------------------- """ # Haciendo el buffer inverso aseguramos que los segmentos # quden dentro de la malla steps = steps + 1 feedback.setCurrentStep(steps) facilitiesForSegmentsFixed = makeSureInside( blocksFacilities['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegments = joinByLocation( gridNeto['OUTPUT'], facilitiesForSegmentsFixed['OUTPUT'], 'edu_idx_count;hea_idx_count;app_idx_count;spo_idx_count;adm_idx_count;facilities;hou_seg', [CONTIENE], [MAX, SUM], UNDISCARD_NONMATCHING, context, feedback) # tomar solo los que tienen cercania simultanea (descartar NULL) steps = steps + 1 feedback.setCurrentStep(steps) facilitiesNotNullForSegmentsFixed = filter( facilitiesForSegmentsFixed['OUTPUT'], 'facilities', NOT_NULL, '', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegmentsNotNull = joinByLocation( gridNeto['OUTPUT'], facilitiesNotNullForSegmentsFixed['OUTPUT'], 'hou_seg', [CONTIENE], [MAX, SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) totalHousing = joinByAttr(gridNetoAndSegments['OUTPUT'], 'id_grid', gridNetoAndSegmentsNotNull['OUTPUT'], 'id_grid', 'hou_seg_sum', UNDISCARD_NONMATCHING, 'net_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(net_hou_seg_sum,0) / coalesce(hou_seg_sum,0))*100, "")' proximity2BasicU = calculateField(totalHousing['OUTPUT'], NAMES_INDEX['IA07'][0], formulaProximity, context, feedback, params['OUTPUT']) result = proximity2BasicU return proximity2BasicU
def processAlgorithm(self, parameters, context, model_feedback): """ Here is where the processing itself takes place. """ results = {} feedback = QgsProcessingMultiStepFeedback(5, model_feedback) addressLayer = self.parameterAsVectorLayer(parameters, "addresslayer", context) addressfields = self.parameterAsFields(parameters, 'addressfield', context) popmeshLayer = self.parameterAsVectorLayer(parameters, "popmeshlayer", context) if popmeshLayer is None: raise QgsProcessingException(self.tr('popmesh layer missed')) popmeshidfields = self.parameterAsFields(parameters, 'popmeshid', context) popmeshpopfields = self.parameterAsFields(parameters, 'popmeshpop', context) dpop_fieldname = self.parameterAsString(parameters, "POPCOLUMN", context) feedback.setCurrentStep(1) if feedback.isCanceled(): return {} meshid = popmeshidfields[0] # 行政界の面積計算 # # 面積出力フィールド名 area_column = 'mesh_area' params3 = { 'INPUT': popmeshLayer, 'FIELD_NAME': area_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, 'FIELD_PRECISION': 5, 'NEW_FIELD': 1, 'FORMULA': '$area', 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res3 = processing.run('qgis:fieldcalculator', params3, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("caluculate area OK ") # ここから関数化がいいかも # メッシュと行政界のIntesect feedback.setCurrentStep(2) params2 = { 'INPUT': res3["OUTPUT"], 'INPUT_FIELDS': [], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'OVERLAY': addressLayer, 'OVERLAY_FIELDS': [] } # 'OUTPUT' : parameters["OUTPUT"], 'OVERLAY' : res3["OUTPUT"], 'OVERLAY_FIELDS' : [] } res2 = processing.run('qgis:union', params2, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("union OK ") # union ポリゴンの面積計算 feedback.setCurrentStep(3) params_del = { 'INPUT': res2["OUTPUT"], 'COLUMN': ['fid'], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } #'OUTPUT' : parameters["OUTPUT"] } res_del = processing.run('qgis:deletecolumn', params_del, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("delete column OK ") feedback.setCurrentStep(4) alg_paramsg_n = { 'LAYERS': res_del["OUTPUT"], 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res_n = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("convert to geopackage OK ") feedback.setCurrentStep(5) area_column2 = 'div_area' params4 = { 'INPUT': res_n["OUTPUT"], 'FIELD_NAME': area_column2, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, 'FIELD_PRECISION': 5, 'NEW_FIELD': 1, 'FORMULA': '$area', 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } # 'NEW_FIELD':1,'FORMULA':'$area','OUTPUT' : parameters["OUTPUT"] } res4 = processing.run('qgis:fieldcalculator', params4, context=context, feedback=feedback, is_child_algorithm=True) # 分割ポリゴンの面積と元ポリゴンの面積の比率にメッシュ人口をかけて分割ポリゴンの想定人口を算出する ppopfield = popmeshpopfields[0] new_column = dpop_fieldname exp_str = area_column2 + "/" + area_column + "*" + ppopfield feedback.pushConsoleInfo("expression " + exp_str) params5 = { 'INPUT': res4["OUTPUT"], 'FIELD_NAME': new_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, 'FIELD_PRECISION': 5, # 'NEW_FIELD':1,'FORMULA':exp_str ,'OUTPUT' :QgsProcessing.TEMPORARY_OUTPUT } 'NEW_FIELD': 1, 'FORMULA': exp_str, 'OUTPUT': parameters["OUTPUT"] } res5 = processing.run('qgis:fieldcalculator', params5, context=context, feedback=feedback, is_child_algorithm=True) feedback.pushConsoleInfo("snum calc end ") results["OUTPUT"] = res5["OUTPUT"] return results params_del2 = { 'INPUT': res5["OUTPUT"], 'COLUMN': ['fid'], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } #'OUTPUT' : parameters["OUTPUT"] } res_del2 = processing.run('qgis:deletecolumn', params_del2, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("delete column 2 OK ") feedback.setCurrentStep(6) alg_paramsg_n2 = { 'LAYERS': res_del2["OUTPUT"], 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res_n2 = processing.run('native:package', alg_paramsg_n2, context=context, feedback=feedback, is_child_algorithm=True) tgLayer = res5["OUTPUT"] if type(tgLayer) is str: feedback.pushConsoleInfo("tglayer is string") tgLayer = QgsVectorLayer(tgLayer, "union", "memory") agar = [] # フィールド単位に集計方法を指定している for field in tgLayer.fields(): agreg = {} feedback.pushConsoleInfo("name " + field.name()) if field.name() != "fid": agreg['input'] = '"' + field.name() + '"' agreg['name'] = field.name() agreg['aggregate'] = 'first_value' agreg['length'] = field.length() agreg['precision'] = field.precision() agreg['type'] = field.type() if field.name() == new_column: agreg['aggregate'] = 'sum' agar.append(agreg) addressf = addressfields[0] # 集計 #params6 = { 'INPUT' : res5["OUTPUT"], 'GROUP_BY' : addressf, 'AGGREGATES': agar, 'OUTPUT' :QgsProcessing.TEMPORARY_OUTPUT } params6 = { 'INPUT': res5["OUTPUT"], 'GROUP_BY': addressf, 'AGGREGATES': agar, 'OUTPUT': parameters["OUTPUT"] } feedback.pushConsoleInfo("aggregate ") res8 = processing.run('qgis:aggregate', params6, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("aggregate OK ") # レイヤ結合 qgis:joinattributestable #QgsProject.instance().addMapLayer(res7["OUTPUT"]) param7 = { 'DISCARD_NONMATCHING': False, 'FIELD': addressf, 'FIELDS_TO_COPY': [new_column], 'FIELD_2': addressf, 'INPUT': addressLayer, 'INPUT_2': res8['OUTPUT'], 'METHOD': 1, 'OUTPUT': parameters["OUTPUT"], 'PREFIX': '' } res9 = processing.run('qgis:joinattributestable', param7, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("joinattributetable OK") # Return the results of the algorithm. In this case our only result is # the feature sink which contains the processed features, but some # algorithms may return multiple feature sinks, calculated numeric # statistics, etc. These should all be included in the returned # dictionary, with keys matching the feature corresponding parameter # or output names. # フォーマット変換(gdal_translate) alg_params = { 'INPUT': res2["OUTPUT"], 'OPTIONS': '', 'OUTPUT': parameters['OUTPUT'] } # ocv = processing.run('gdal:convertformat', alg_params, context=context, feedback=feedback, is_child_algorithm=True) results["OUTPUT"] = res9["OUTPUT"] return results
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpaMan = params['DPA_MAN'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps+1 feedback.setCurrentStep(steps) # pathCsvPoblacion = params['CENSO_POBLACION'] pathCsvHogar = params['CENSO_HOGAR'] pathCsvVivienda = params['CENSO_VIVIENDA'] fileH = pathCsvHogar colsH = ['I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I09','H15'] df = pd.read_csv(fileH, usecols=colsH) # fix codes df['I01'] = df['I01'].astype(str) df['I02'] = df['I02'].astype(str) df['I03'] = df['I03'].astype(str) df['I04'] = df['I04'].astype(str) df['I05'] = df['I05'].astype(str) df['I06'] = df['I06'].astype(str) df['I09'] = df['I09'].astype(str) df.loc[df['I01'].str.len() < 2, 'I01'] = "0" + df['I01'] df.loc[df['I02'].str.len() < 2, 'I02'] = "0" + df['I02'] df.loc[df['I03'].str.len() < 2, 'I03'] = "0" + df['I03'] df.loc[df['I04'].str.len() == 1, 'I04'] = "00" + df['I04'] df.loc[df['I04'].str.len() == 2, 'I04'] = "0" + df['I04'] df.loc[df['I05'].str.len() == 1, 'I05'] = "00" + df['I05'] df.loc[df['I05'].str.len() == 2, 'I05'] = "0" + df['I05'] df.loc[df['I06'].str.len() < 2, 'I06'] = "0" + df['I06'] df.loc[df['I09'].str.len() == 1, 'I09'] = "00" + df['I09'] df.loc[df['I09'].str.len() == 2, 'I09'] = "0" + df['I09'] df['codv'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) \ + df['I09'].astype(str) # Calcular tenencia de la vivienda # 1 Propia y totalmente pagada # 2 Propia y la está pagando # 3 Propia? (regalada, donada, heredada o por posesión # 4 Prestada o cedida (no paga) # 5 Por servicios # 6 Arrendada # 7 Anticresis df['tenencia'] = None df['H15'] = df['H15'].astype(str) df.loc[(df['H15'] >= '1') & (df['H15'] <= '3'), 'tenencia'] = 1 df.loc[(df['H15'] >= '4') & (df['H15'] < '6'), 'tenencia'] = 0 df.loc[df['H15'] == '6', 'tenencia'] = 1 df.loc[df['H15'] > '6', 'tenencia'] = 0 df['tenencia'] = df['tenencia'].astype(float) group = df.groupby('codv')['tenencia'].sum() df = group fileV = pathCsvVivienda colsV = ['I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I09', 'I10', 'V16', 'TOTPER' ] dfV = pd.read_csv(fileV, usecols=colsV) # fix codes dfV['I01'] = dfV['I01'].astype(str) dfV['I02'] = dfV['I02'].astype(str) dfV['I03'] = dfV['I03'].astype(str) dfV['I04'] = dfV['I04'].astype(str) dfV['I05'] = dfV['I05'].astype(str) dfV['I06'] = dfV['I06'].astype(str) dfV['I09'] = dfV['I09'].astype(str) dfV['I10'] = dfV['I10'].astype(str) dfV.loc[dfV['I01'].str.len() < 2, 'I01'] = "0" + dfV['I01'] dfV.loc[dfV['I02'].str.len() < 2, 'I02'] = "0" + dfV['I02'] dfV.loc[dfV['I03'].str.len() < 2, 'I03'] = "0" + dfV['I03'] dfV.loc[dfV['I04'].str.len() == 1, 'I04'] = "00" + dfV['I04'] dfV.loc[dfV['I04'].str.len() == 2, 'I04'] = "0" + dfV['I04'] dfV.loc[dfV['I05'].str.len() == 1, 'I05'] = "00" + dfV['I05'] dfV.loc[dfV['I05'].str.len() == 2, 'I05'] = "0" + dfV['I05'] dfV.loc[dfV['I06'].str.len() < 2, 'I06'] = "0" + dfV['I06'] dfV.loc[dfV['I09'].str.len() == 1, 'I09'] = "00" + dfV['I09'] dfV.loc[dfV['I09'].str.len() == 2, 'I09'] = "0" + dfV['I09'] dfV.loc[dfV['I10'].str.len() < 2, 'I10'] = "0" + dfV['I10'] dfV['codv'] = dfV['I01'].astype(str) + dfV['I02'].astype(str) + dfV['I03'].astype(str) \ + dfV['I04'].astype(str) + dfV['I05'].astype(str) + dfV['I06'].astype(str) \ + dfV['I09'].astype(str) merge = None merge = pd.merge(dfV, df, how='left', on='codv') merge.loc[merge['V16'] == ' ', 'V16'] = None df = merge df['codman'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) df['V16'] = df['V16'].astype(float) aggOptions = {'codv' : 'count', 'tenencia':'sum', 'V16' : 'sum', 'codman' : 'first' } resManzanas = df.groupby('codman').agg(aggOptions) df = resManzanas df['tenencia_viv'] = (df['tenencia'] / df['V16']) * 100 steps = steps+1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH+'/tenencia_viv.csv' feedback.pushConsoleInfo(str(('tenencia_viv en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps+1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if(exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps+1 feedback.setCurrentStep(steps) steps = steps+1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpaMan, outputCsv, 'codman', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) expressionNotNull = "tenencia_viv IS NOT '' AND tenencia_viv is NOT NULL" notNull = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'tenencia_viv * 1.0' result = calculateField(notNull['OUTPUT'], 'tenencia_viv_n', formulaDummy, context, feedback) # ----------------------CONVERTIR A NUMERICOS -------------------- steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'tenencia * 1.0' result = calculateField(result['OUTPUT'], 'tenencia_n', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'V16 * 1.0' result = calculateField(result['OUTPUT'], 'V16_n', formulaDummy, context, feedback) # ----------------------PROPORCIONES AREA-------------------------- steps = steps+1 feedback.setCurrentStep(steps) blocks = calculateArea(result['OUTPUT'], 'area_bloc', context, feedback) steps = steps+1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], ['tenencia_n','V16_n','area_bloc'], ['id_grid','area_grid'], context, feedback) steps = steps+1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) # -------------------------PROPORCIONES VALORES------------------------- steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * tenencia_n' result = calculateField(segmentsArea['OUTPUT'], 'tenencia_n_seg', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * V16_n' result = calculateField(result['OUTPUT'], 'V16_n_seg', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) result = makeSureInside(result['OUTPUT'], context, feedback) #---------------------------------------------------------------------- steps = steps+1 feedback.setCurrentStep(steps) result = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['tenencia_n_seg','V16_n_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(tenencia_n_seg_sum/V16_n_seg_sum) * 100' result = calculateField(result['OUTPUT'], NAMES_INDEX['ID10'][0], formulaDummy, context, feedback, params['OUTPUT']) # steps = steps+1 # feedback.setCurrentStep(steps) # gridNeto = joinByLocation(gridNeto['OUTPUT'], # result['OUTPUT'], # ['tenencia_viv_n'], # [INTERSECTA], [MEDIA], # UNDISCARD_NONMATCHING, # context, # feedback) # fieldsMapping = [ # {'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4}, # {'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6}, # {'expression': '"tenencia_viv_n_mean"', 'length': 20, 'name': NAMES_INDEX['ID10'][0], 'precision': 2, 'type': 6} # ] # steps = steps+1 # feedback.setCurrentStep(steps) # result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], # context, # feedback, params['OUTPUT']) return result
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpa = params['DPA_SECTOR'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) path = params['ENCUESTA'] file = path #p03 edad cols = [ 'id_hogar', 'P03', 'UT98A', 'UT98B', 'UT99A', 'UT99B', 'UT100A', 'UT100B', 'UT101A', 'UT101B', 'UT102A', 'UT102B', 'UT103A', 'UT103B', 'UT104A', 'UT104B', 'UT105A', 'UT105B', 'UT106A', 'UT106B', 'UT107A', 'UT107B', 'UT108A', 'UT108B', 'UT109A', 'UT109B', 'UT110A', 'UT110B', 'UT111A', 'UT111B', 'UT112A', 'UT112B', 'UT113A', 'UT113B', 'UT114A', 'UT114B', 'UT116A', 'UT116B', 'UT117A', 'UT117B', 'UT118A', 'UT118B', 'UT119A', 'UT119B', 'UT120A', 'UT120B', 'UT121A', 'UT121B', 'UT122A', 'UT122B' ] df = pd.read_csv(file, usecols=cols) df['id_hogar'] = df['id_hogar'].astype(str) df['P03'] = df['P03'].astype(str) df.loc[df['id_hogar'].str.len() == 14, 'id_hogar'] = "0" + df['id_hogar'] df['codsec'] = df['id_hogar'].str[0:12] df['codzon'] = df['id_hogar'].str[0:9] df = df[(df['P03'] >= '12')] fieldTimes = cols[2:] fildTimesRename = [] # print(fieldTimes[:]) for fieldTime in fieldTimes: df.loc[(df[fieldTime] == ' '), fieldTime] = "00" df[fieldTime] = df[fieldTime].astype(int) timerSplit = fieldTime.split('A') newName = timerSplit[0] isA = len(timerSplit) == 2 indexElement = fieldTimes.index(fieldTime) if isA: nameB = newName + "B" df.loc[(df[nameB] == ' '), nameB] = "00" df[newName] = df[fieldTime].astype( str) + ":" + df[nameB].astype(str) + ":00" fildTimesRename.append(newName) df['sumTime'] = datetime.timedelta() for field in fildTimesRename: df[field] = pd.to_timedelta(df[field]) df['sumTime'] = df['sumTime'] + df[field] df['hours'] = df['sumTime'].dt.total_seconds() / 3600 df['hours'] = df['hours'].astype(float) aggOptions = { 'codzon': 'first', 'hours': 'mean', } resSectores = df.groupby('codzon').agg(aggOptions) df = resSectores steps = steps + 1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH + '/usoTiempo.csv' feedback.pushConsoleInfo(str(('usoTiempo en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps + 1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if (exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps + 1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpa, outputCsv, 'codzon', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) # steps = steps+1 # feedback.setCurrentStep(steps) # expressionNotNull = "des IS NOT '' AND des is NOT NULL" # result = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'hours * 1.0' result = calculateField(result['OUTPUT'], 'hours_n', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNeto = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['hours_n'], [INTERSECTA], [MEDIA], UNDISCARD_NONMATCHING, context, feedback) fieldsMapping = [{ 'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4 }, { 'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6 }, { 'expression': '"hours_n_mean"', 'length': 20, 'name': NAMES_INDEX['ID06'][0], 'precision': 2, 'type': 6 }] steps = steps + 1 feedback.setCurrentStep(steps) result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], context, feedback, params['OUTPUT']) return result
def processAlgorithm(self, parameters, context, model_feedback): """ Here is where the processing itself takes place. """ results = {} feedback = QgsProcessingMultiStepFeedback(1, model_feedback) csvfile = self.parameterAsFile(parameters, self.INPUT, context) if csvfile is None: raise QgsProcessingException(self.tr('csv file error')) #df = QgsVirtualLayerDefinition() enc = self.parameterAsInt(parameters, 'ENCODING', context) #enc = self.parameterAsFile( # parameters, # self.ENCODING, # context #) meshLayer = self.parameterAsVectorLayer(parameters, "meshlayer", context) if meshLayer is None: raise QgsProcessingException(self.tr('mesh layer missed')) meshidfields = self.parameterAsFields(parameters, 'meshid', context) limit_sample = self.parameterAsInt(parameters, 'limit_sample', context) maxdivide = self.parameterAsInt(parameters, 'maxdivide', context) uneven_div = self.parameterAsInt(parameters, 'uneven_div', context) #out_crs = self.parameterAsCrs( parameters, 'CRS', context ) out_crs = parameters['CRS'] feedback.setCurrentStep(1) if feedback.isCanceled(): return {} # 住所別集計 alg_params = { 'addresslayer': parameters['addresslayer'], 'addressfield': parameters['addressfield'], 'INPUT': csvfile, 'ENCODING': enc, 'CRS': None, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } #Stat_CSVAddressPolygon outputs_statv = processing.run('QGIS_stat:Stat_CSVAddressPolygon', alg_params, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} statv = outputs_statv["OUTPUT"] meshid = meshidfields[0] param1 = { 'INPUT': statv, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'aggrefield': 'snum', 'meshid': meshid, 'meshlayer': meshLayer } #parameters['OUTPUT'] # メッシュ集計 res1 = processing.run('QGIS_stat:AggregateAdmbyMeshAlgorithm', param1, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} numberof_under_limit = 0 #numberof_under_limit = res1["LIMITPOL"] # レイヤをGeoPackage化 alg_paramsg = { 'LAYERS': res1["OUTPUT"], 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } retg1 = processing.run('native:package', alg_paramsg, context=context, feedback=feedback, is_child_algorithm=True) last_output = retg1["OUTPUT"] new_mesh = retg1["OUTPUT"] mesh_layb = retg1["OUTPUT"] if type(mesh_layb) is str: mesh_layb = QgsVectorLayer(mesh_layb, "mesh", "ogr") numberof_under_limit = 0 # 作業用レイヤの作成 crs_str = mesh_layb.crs() layerURI = "Polygon?crs=" + crs_str.authid() #feedback.pushConsoleInfo( "work layer " + layerURI ) resLayer = QgsVectorLayer(layerURI, "mesh_result", "memory") appended = {} adfields = [] for field in mesh_layb.fields(): #print(field.name(), field.typeName()) adfields.append(field) #resLayer.addField(field) resLayer.dataProvider().addAttributes(adfields) resLayer.updateFields() lower_ids = [] value_column = "snum" # limit 値より小さい値のポリゴン数算出 for f in mesh_layb.getFeatures(): # feedback.pushConsoleInfo( "value " +str( f["value"]) ) if not f[value_column] is None: if f[value_column] > 0 and f[value_column] < limit_sample: numberof_under_limit += 1 lower_ids.append(f[meshid]) next_output = None # 集計結果が最小サンプルより小さいものがある場合 if numberof_under_limit > 0: # 最初のポリゴン集計の場合終了 feedback.pushConsoleInfo("最初の集計で指定値以下の集計値がありましたので集計を中止しました") results["OUTPUT"] = None return results if uneven_div: rmid = [] for tgid in (lower_ids): feedback.pushConsoleInfo("lower id " + str(tgid)) # next_output code の下3桁 削除 C27210-02 -> C27210 が last_output の code 番号 # next_output では last_output が同じ番号の最大4メッシュを削除する # リミットより小さいレコードは旧レコードを退避 # リミットにひっかかるレコードを再処理用リストから削除(同一親メッシュのものも削除) # 不均等分割でリミット以下のデータがある場合は last_output -> 分割不能抽出 next_output 分割不能削除 next_output -> last_output 代入 parent_code = tgid[0:-3] rmid.append(parent_code) addfeatures = [] alg_paramsg_n = { 'LAYERS': last_output, 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lmesh = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) last_output = lmesh["OUTPUT"] if type(last_output) is str: last_output = QgsVectorLayer(last_output, "mesh", "ogr") last_output.selectAll() for lf in last_output.getFeatures(): for pcode in (rmid): # feedback.pushConsoleInfo( "pcode " + pcode+ " meshid =" + lf[meshid] ) if lf[meshid] == pcode: lf["fid"] = None if not lf[value_column]: lf[value_column] = 0.0 if lf[meshid] not in appended: addfeatures.append(lf) appended[lf[meshid]] = lf # feedback.pushConsoleInfo( "add feature " + pcode ) resLayer.dataProvider().addFeatures(addfeatures) deleteFeatures = [] if type(next_output) is str: next_output = QgsVectorLayer(next_output, "mesh", "ogr") for nf in next_output.getFeatures(): for pcode in (rmid): if nf[meshid][0:-3] == pcode: deleteFeatures.append(nf.id()) feedback.pushConsoleInfo("delete id " + str(pcode)) next_output.dataProvider().deleteFeatures(deleteFeatures) last_output = next_output # 分割回数ループ for divide_c in range(1, maxdivide): if numberof_under_limit > 0: # 均等分割の場合は終了 if not uneven_div: break #------------------------------------------------------------------------------------------------------------------------ # 最小サンプルより小さいものが無い場合はメッシュ分割 #else: if type(last_output) is str: feedback.pushConsoleInfo("last output " + last_output) else: feedback.pushConsoleInfo("last output " + last_output.name()) alg_paramsg_m = { 'LAYERS': last_output, 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } spmesh = processing.run('native:package', alg_paramsg_m, context=context, feedback=feedback, is_child_algorithm=True) new_mesh = agtools.SplitMeshLayer(spmesh["OUTPUT"], meshid) # statv 行政界別集計データ # 再度メッシュ集計 param2 = { 'INPUT': statv, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'aggrefield': 'snum', 'meshid': meshid, 'meshlayer': new_mesh } res2 = processing.run('QGIS_stat:AggregateAdmbyMeshAlgorithm', param2, context=context, feedback=feedback, is_child_algorithm=True) #numberof_under_limit = res2["LIMITPOL"] numberof_under_limit = 0 # レイヤをGeoPackage化 alg_paramsg2 = { 'LAYERS': res2["OUTPUT"], 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } retg2 = processing.run('native:package', alg_paramsg2, context=context, feedback=feedback, is_child_algorithm=True) mesh_layb = retg2["OUTPUT"] if type(mesh_layb) is str: mesh_layb = QgsVectorLayer(mesh_layb, "mesh", "ogr") #features = mesh_layb.selectedFeatures() #feedback.pushConsoleInfo( "feature count " +str( len(features)) ) lower_ids = [] for f in mesh_layb.getFeatures(): # feedback.pushConsoleInfo( "value " +str( f["value"]) ) if not f[value_column] is None: if f[value_column] > 0 and f[value_column] < limit_sample: numberof_under_limit += 1 lower_ids.append(f[meshid]) if numberof_under_limit == 0: last_output = res2["OUTPUT"] next_output = retg2["OUTPUT"] else: # 不均等分割でリミット以下のデータがある場合は last_output -> 分割不能抽出 next_output 分割不能削除 next_output -> last_output 代入 # last_output = res2["OUTPUT"] next_output = retg2["OUTPUT"] # 集計結果が最小サンプルより小さいものがある場合 if numberof_under_limit > 0: # 均等分割の場合は終了 if not uneven_div: break # 不均等分割の場合は終了データを保全 それ以外のメッシュの分割 else: rmid = [] for tgid in (lower_ids): feedback.pushConsoleInfo("lower id " + str(tgid)) # next_output code の下3桁 削除 C27210-02 -> C27210 が last_output の code 番号 # next_output では last_output が同じ番号の最大4メッシュを削除する # リミットより小さいレコードは旧レコードを退避 # リミットにひっかかるレコードを再処理用リストから削除(同一親メッシュのものも削除) # 不均等分割でリミット以下のデータがある場合は last_output -> 分割不能抽出 next_output 分割不能削除 next_output -> last_output 代入 parent_code = tgid[0:-3] rmid.append(parent_code) addfeatures = [] #if type(last_output) is str: # last_output = QgsVectorLayer(last_output, "mesh", "ogr") alg_paramsg_n = { 'LAYERS': last_output, 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lmesh = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) #last_output.removeSelection() last_output = lmesh["OUTPUT"] if type(last_output) is str: last_output = QgsVectorLayer(last_output, "mesh", "ogr") last_output.selectAll() for lf in last_output.getFeatures(): for pcode in (rmid): # feedback.pushConsoleInfo( "pcode " + pcode+ " meshid =" + lf[meshid] ) if lf[meshid] == pcode: lf["fid"] = None if not lf[value_column]: lf[value_column] = 0.0 if lf[meshid] not in appended: addfeatures.append(lf) appended[lf[meshid]] = lf #addfeatures.append(lf) feedback.pushConsoleInfo("add feature " + pcode) resLayer.dataProvider().addFeatures(addfeatures) deleteFeatures = [] if type(next_output) is str: next_output = QgsVectorLayer(next_output, "mesh", "ogr") for nf in next_output.getFeatures(): for pcode in (rmid): if nf[meshid][0:-3] == pcode: deleteFeatures.append(nf.id()) feedback.pushConsoleInfo("delete id " + str(pcode)) next_output.dataProvider().deleteFeatures(deleteFeatures) last_output = next_output # Return the results of the algorithm. In this case our only result is # the feature sink which contains the processed features, but some # algorithms may return multiple feature sinks, calculated numeric # statistics, etc. These should all be included in the returned # dictionary, with keys matching the feature corresponding parameter # or output names. # 不均等分割の場合 最終作業レイヤの地物がはいってないかも if uneven_div: alg_paramsg_n = { 'LAYERS': next_output, 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lmesh = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) #last_output.removeSelection() last_output = lmesh["OUTPUT"] if type(last_output) is str: last_output = QgsVectorLayer(last_output, "mesh", "ogr") last_output.selectAll() addfeatures = [] for lf in last_output.getFeatures(): feedback.pushConsoleInfo("add features meshid =" + lf[meshid]) lf["fid"] = None if not lf[value_column]: lf[value_column] = 0.0 if lf[meshid] not in appended: addfeatures.append(lf) appended[lf[meshid]] = lf #addfeatures.append(lf) resLayer.dataProvider().addFeatures(addfeatures) option_str = '' if out_crs is None: feedback.pushConsoleInfo("output crs is not specified") else: feedback.pushConsoleInfo("output crs " + out_crs.authid()) option_str = "-t_srs " + out_crs.authid() # フォーマット変換(gdal_translate) alg_params = { 'INPUT': resLayer, 'OPTIONS': option_str, 'OUTPUT': parameters['OUTPUT'] } ocv = processing.run('gdal:convertformat', alg_params, context=context, feedback=feedback, is_child_algorithm=True) results["OUTPUT"] = ocv["OUTPUT"] return results # 均等分割の場合 else: option_str = '' if out_crs is None: feedback.pushConsoleInfo("output crs is not specified") else: feedback.pushConsoleInfo("output crs " + out_crs.authid()) option_str = "-t_srs " + out_crs.authid() # フォーマット変換(gdal_translate) alg_params = { 'INPUT': last_output, 'OPTIONS': option_str, 'OUTPUT': parameters['OUTPUT'] } ocv = processing.run('gdal:convertformat', alg_params, context=context, feedback=feedback, is_child_algorithm=True) results["OUTPUT"] = ocv["OUTPUT"] return results
def processAlgorithm(self, parameters, context, model_feedback): # Use a multi-step feedback, so that individual child algorithm progress reports are adjusted for the # overall progress through the model feedback = QgsProcessingMultiStepFeedback(1, model_feedback) results = {} outputs = {} #reference=parameters['reference'] #classification=parameters['Newfieldname'] #output_folder=parameters['Outputfolder'] # Sample raster values alg_params = { 'COLUMN_PREFIX': parameters['Newfieldname'], 'INPUT': parameters['vectorwithclassificationandreference'], 'RASTERCOPY': parameters['raster'], 'OUTPUT': parameters['Sampled'] } SampleRasterValues = processing.run('qgis:rastersampling', alg_params, context=context, feedback=feedback, is_child_algorithm=True) vlayer = QgsVectorLayer(SampleRasterValues['OUTPUT']) idx_1 = vlayer.fields().indexFromName(parameters['reference']) idx_2 = vlayer.fields().indexFromName(parameters['Newfieldname']) list_class = [] list_ref = [] features = vlayer.getFeatures() for ft in features: if ft.attributes()[idx_2] != None and ft.attributes( )[idx_1] != None: list_class.append(ft.attributes()[idx_2]) list_ref.append(ft.attributes()[idx_1]) feedback.pushInfo(str(idx_2)) error_matrix1 = pd.crosstab(pd.Series(list_class, name=parameters['Newfieldname']), pd.Series(list_ref, name=parameters['reference']), dropna=False) cls_cat = error_matrix1.index.values #extract all columns values (classes of existing dataset) ref_cat = error_matrix1.columns.values #make union of index and column values cats = (list(set(ref_cat) | set(cls_cat))) #reindex error matrix so that it has missing columns and fill the emtpy cells with 0.00000001 error_matrix = error_matrix1.reindex(index=cats, columns=cats, fill_value=0.00000001) error_matrix.index.name = error_matrix.index.name + "/" + error_matrix.columns.name # OUTPUT diag_elem = np.diagonal(np.matrix(error_matrix)) UA = (diag_elem / error_matrix.sum(axis=1)) * (diag_elem > 0.01) PA = diag_elem / error_matrix.sum(axis=0) * (diag_elem > 0.01) OA = sum(diag_elem) / error_matrix.sum(axis=1).sum() error_matrix['UA'] = UA.round(2) error_matrix['PA'] = PA.round(2) error_matrix['OA'] = np.nan error_matrix.loc[error_matrix.index[0], 'OA'] = OA error_matrix.to_csv(parameters['Outputfolder']) feedback.pushConsoleInfo('Error matrix saved in ' + parameters['Outputfolder']) return results
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpa = params['DPA_SECTOR'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps+1 feedback.setCurrentStep(steps) path = params['ENCUESTA'] file = path #p03 edad cols = ['id_vivienda','id_hogar', 'p03', 'empleo', 'desempleo'] df = pd.read_csv(file, usecols=cols, sep=";") df['id_vivienda'] = df['id_vivienda'].astype(str) df.loc[df['id_vivienda'].str.len() == 18, 'id_vivienda'] = "0" + df['id_vivienda'] df['codsec'] = df['id_vivienda'].str[0:12] df['codzon'] = df['id_vivienda'].str[0:9] df['pbt'] = df['codsec'].astype(str) # CAMBIAR A TODA LA POBLACION MAYOR DE 15 # df = df[(df['p03'] >= 15) & ((df['empleo'].astype(str) != ' ') | (df['desempleo'].astype(str) != ' '))] df = df[(df['p03'] >= 15)] df.loc[df['empleo'] == ' ', 'empleo'] = 0 df.loc[df['desempleo'] == ' ', 'desempleo'] = 0 df['desempleo'] = df['desempleo'].astype(float) aggOptions = { 'codzon' : 'first', 'pbt' : 'count', 'desempleo' : 'sum', } resManzanas = df.groupby('codzon').agg(aggOptions) resManzanas['des'] = None resManzanas['des'] = (resManzanas['desempleo'] / resManzanas['pbt'] * 100) df = resManzanas steps = steps+1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH+'/des.csv' feedback.pushConsoleInfo(str(('des en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps+1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if(exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps+1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpa, outputCsv, 'codzon', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) # steps = steps+1 # feedback.setCurrentStep(steps) # expressionNotNull = "des IS NOT '' AND des is NOT NULL" # result = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) # ----------------------CONVERTIR A NUMERICOS -------------------- steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'desempleo * 1.0' result = calculateField(result['OUTPUT'], 'desempleo_n', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = 'pbt * 1.0' result = calculateField(result['OUTPUT'], 'pbt_n', formulaDummy, context, feedback) # ----------------------PROPORCIONES AREA-------------------------- steps = steps+1 feedback.setCurrentStep(steps) blocks = calculateArea(result['OUTPUT'], 'area_bloc', context, feedback) steps = steps+1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], ['desempleo_n','pbt_n','area_bloc'], ['id_grid','area_grid'], context, feedback) steps = steps+1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) # -------------------------PROPORCIONES VALORES------------------------- steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * desempleo_n' result = calculateField(segmentsArea['OUTPUT'], 'desempleo_n_seg', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * pbt_n' result = calculateField(result['OUTPUT'], 'pbt_n_seg', formulaDummy, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) result = makeSureInside(result['OUTPUT'], context, feedback) #---------------------------------------------------------------------- steps = steps+1 feedback.setCurrentStep(steps) result = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['desempleo_n_seg','pbt_n_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps+1 feedback.setCurrentStep(steps) formulaDummy = '(desempleo_n_seg_sum/pbt_n_seg_sum) * 100' result = calculateField(result['OUTPUT'], NAMES_INDEX['ID11'][0], formulaDummy, context, feedback, params['OUTPUT']) # steps = steps+1 # feedback.setCurrentStep(steps) # gridNeto = joinByLocation(gridNeto['OUTPUT'], # result['OUTPUT'], # ['desempleo_viv_n'], # [INTERSECTA], [MEDIA], # UNDISCARD_NONMATCHING, # context, # feedback) # fieldsMapping = [ # {'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4}, # {'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6}, # {'expression': '"tenencia_viv_n_mean"', 'length': 20, 'name': NAMES_INDEX['ID11'][0], 'precision': 2, 'type': 6} # ] # steps = steps+1 # feedback.setCurrentStep(steps) # result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], # context, # feedback, params['OUTPUT']) return result
def processAlgorithm(self, parameters, context, model_feedback): # Use a multi-step feedback, so that individual child algorithm progress reports are adjusted for the # overall progress through the model feedback = QgsProcessingMultiStepFeedback(5, model_feedback) results = {} outputs = {} model_feedback.pushConsoleInfo("start") # CSVtoStatProcessing alg_params = { 'ENCODING': parameters['ENCODING'], 'INPUT': parameters['INPUT'], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } out_crs = parameters['CRS'] outputs['Csvtostatprocessing'] = processing.run( 'QGIS_stat:CSVtoStatProcessing', alg_params, context=context, feedback=feedback, is_child_algorithm=True) model_feedback.pushConsoleInfo("end csv") feedback.setCurrentStep(1) if feedback.isCanceled(): return {} # 属性テーブルで結合(table join) alg_params = { 'DISCARD_NONMATCHING': False, 'FIELD': parameters['addressfield'], 'FIELDS_TO_COPY': None, 'FIELD_2': 'address', 'INPUT': parameters['addresslayer'], 'INPUT_2': outputs['Csvtostatprocessing']['OUTPUT'], 'METHOD': 1, 'PREFIX': '', 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } outputs['TableJoin'] = processing.run('native:joinattributestable', alg_params, context=context, feedback=feedback, is_child_algorithm=True) model_feedback.pushConsoleInfo("end join") feedback.setCurrentStep(2) if feedback.isCanceled(): return {} # レイヤをGeoPackage化 alg_params = { 'LAYERS': outputs['TableJoin']['OUTPUT'], 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } outputs['Geopackage'] = processing.run('native:package', alg_params, context=context, feedback=feedback, is_child_algorithm=True) feedback.setCurrentStep(3) if feedback.isCanceled(): return {} # SpatiaLiteでSQLを実行 alg_params = { 'DATABASE': outputs['Geopackage']['OUTPUT'], 'SQL': 'update \"出力レイヤ\" set snum=0 where snum is NULL' } outputs['Spatialitesql'] = processing.run('qgis:spatialiteexecutesql', alg_params, context=context, feedback=feedback, is_child_algorithm=True) feedback.setCurrentStep(4) if feedback.isCanceled(): return {} option_str = '' if out_crs is None: feedback.pushConsoleInfo("output crs is not specified") else: feedback.pushConsoleInfo("output crs " + out_crs.authid()) option_str = "-t_srs " + out_crs.authid() # フォーマット変換(gdal_translate) alg_params = { 'INPUT': outputs['Geopackage']['OUTPUT'], 'OPTIONS': option_str, 'OUTPUT': parameters['OUTPUT'] } outputs['Gdal_translate'] = processing.run('gdal:convertformat', alg_params, context=context, feedback=feedback, is_child_algorithm=True) feedback.setCurrentStep(5) if feedback.isCanceled(): return {} results['OUTPUT'] = outputs['Gdal_translate']['OUTPUT'] return results
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 37 fieldPopulateOrHousing = params['FIELD_POPULATE_HOUSING'] DISTANCE_SHOP = 300 #MINIMARKET SON DEPOSITOS DE CILINDRO DE GAS DISTANCE_MINIMARKET = 300 DISTANCE_PHARMACY = 300 DISTANCE_BAKERY = 300 DISTANCE_STATIONERY = 300 # tomar solo los que tienen cercania simultanea (descartar lo menores de 3) MIN_FACILITIES = 5 OPERATOR_GE = 3 feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) """ ----------------------------------------------------------------- Calcular las facilidades ----------------------------------------------------------------- """ steps = steps + 1 feedback.setCurrentStep(steps) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) gridNeto = calculateField(gridNeto['OUTPUT'], 'id_grid', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) blocks = calculateArea(params['BLOCKS'], 'area_bloc', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segments = intersection( blocks['OUTPUT'], gridNeto['OUTPUT'], 'area_bloc;' + fieldPopulateOrHousing, 'id_grid', context, feedback, ) steps = steps + 1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaHousingSegments = '(area_seg/area_bloc) * ' + fieldPopulateOrHousing housingForSegments = calculateField(segmentsArea['OUTPUT'], 'h_s', formulaHousingSegments, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksWithId = calculateField(housingForSegments['OUTPUT'], 'id_block', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = createCentroids(blocksWithId['OUTPUT'], context, feedback) result = [] idxs = ['idxshop', 'idxmini', 'idxpha', 'idkbake', 'idxsta'] if (params['DISTANCE_OPTIONS'] == 0): steps = steps + 1 feedback.setCurrentStep(steps) feedback.pushConsoleInfo(str(('Cálculo de áreas de servicio'))) layers = [ [params['SHOP'], STRATEGY_DISTANCE, DISTANCE_SHOP], [params['GAS'], STRATEGY_DISTANCE, DISTANCE_MINIMARKET], [params['PHARMACY'], STRATEGY_DISTANCE, DISTANCE_PHARMACY], [params['BAKERY'], STRATEGY_DISTANCE, DISTANCE_BAKERY], [params['STATIONERY'], STRATEGY_DISTANCE, DISTANCE_STATIONERY], ] serviceAreas = multiBufferIsocrono(params['ROADS'], layers, context, feedback) iidx = -1 for serviceArea in serviceAreas: iidx = iidx + 1 idx = idxs[iidx] steps = steps + 1 feedback.setCurrentStep(steps) serviceArea = calculateField(serviceArea, idx, '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = joinByLocation(centroidsBlocks['OUTPUT'], serviceArea['OUTPUT'], [idx], [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'coalesce(idxshop_count, 0) + coalesce(idxmini_count, 0) + coalesce(idxpha_count,0) + coalesce(idkbake_count, 0) + coalesce(idxsta_count, 0)' facilitiesCover = calculateField(centroidsBlocks['OUTPUT'], 'facilities', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) facilitiesFullCover = filter(facilitiesCover['OUTPUT'], 'facilities', OPERATOR_GE, MIN_FACILITIES, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilitiesCover = joinByLocation(gridNeto['OUTPUT'], facilitiesCover['OUTPUT'], ['h_s', 'facilities'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) fieldsMapping = [{ 'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4 }, { 'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6 }, { 'expression': '"h_s_sum"', 'length': 20, 'name': 'ptotal', 'precision': 2, 'type': 6 }, { 'expression': '"facilities_sum"', 'length': 20, 'name': 'facilities', 'precision': 2, 'type': 6 }] steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilitiesCover = refactorFields( fieldsMapping, gridNetoFacilitiesCover['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilities = joinByLocation( gridNetoFacilitiesCover['OUTPUT'], facilitiesFullCover['OUTPUT'], ['h_s'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(h_s_sum,0) / coalesce(ptotal,""))*100,"")' coverageDailyBusiness = calculateField( gridNetoFacilities['OUTPUT'], NAMES_INDEX['IA09'][0], formulaProximity, context, feedback, params['OUTPUT']) result = coverageDailyBusiness else: feedback.pushConsoleInfo(str(('Cálculo de buffer radial'))) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4Shop = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_SHOP, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4Minimarket = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_MINIMARKET, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4Pharmacy = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_PHARMACY, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) BlockBuffer4Bakery = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_BAKERY, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) BlockBuffer4Stationery = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_STATIONERY, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) layerShop = calculateField(params['SHOP'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerMinimarket = calculateField(params['GAS'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerPharmacy = calculateField(params['PHARMACY'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerBakery = calculateField(params['BAKERY'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) layerStationery = calculateField(params['STATIONERY'], 'idx', '$id', context, feedback, type=1) layerShop = layerShop['OUTPUT'] layerMinimarket = layerMinimarket['OUTPUT'] layerPharmacy = layerPharmacy['OUTPUT'] layerBakery = layerBakery['OUTPUT'] layerStationery = layerStationery['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counterShop = joinByLocation(blockBuffer4Shop['OUTPUT'], layerShop, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterMinimarket = joinByLocation( blockBuffer4Minimarket['OUTPUT'], layerMinimarket, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) countePharmacy = joinByLocation(blockBuffer4Pharmacy['OUTPUT'], layerPharmacy, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterBakery = joinByLocation(BlockBuffer4Bakery['OUTPUT'], layerBakery, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterStationery = joinByLocation( BlockBuffer4Stationery['OUTPUT'], layerStationery, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksWithId['OUTPUT'], 'id_block', counterShop['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'sh_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterMinimarket['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'mk_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', countePharmacy['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'pha_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterBakery['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'bk_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterStationery['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'st_', context, feedback) #FIXME: CAMBIAR POR UN METODO BUCLE formulaParseBS = 'CASE WHEN coalesce(sh_idx_count, 0) > 0 THEN 1 ELSE 0 END' formulaParseTS = 'CASE WHEN coalesce(mk_idx_count, 0) > 0 THEN 1 ELSE 0 END' formulaParseBKS = 'CASE WHEN coalesce(pha_idx_count, 0) > 0 THEN 1 ELSE 0 END' formulaParseBW = 'CASE WHEN coalesce(bk_idx_count, 0) > 0 THEN 1 ELSE 0 END' formulaParseCW = 'CASE WHEN coalesce(st_idx_count, 0) > 0 THEN 1 ELSE 0 END' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksJoined['OUTPUT'], 'parse_bs', formulaParseBS, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_ts', formulaParseTS, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_bks', formulaParseBKS, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_bw', formulaParseBW, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_cw', formulaParseCW, context, feedback) formulaFacilities = 'parse_bs + parse_ts + parse_bks + parse_bw + parse_cw' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'facilities', formulaFacilities, context, feedback) """ ----------------------------------------------------------------- Calcular numero de viviendas por hexagano ----------------------------------------------------------------- """ # steps = steps+1 # feedback.setCurrentStep(steps) # segments = intersection(blocksFacilities['OUTPUT'], gridNeto['OUTPUT'], # 'sh_idx_count;mk_idx_count;pha_idx_count;bk_idx_count;st_idx_count;facilities;h_s', # 'id_grid', # context, feedback) # Haciendo el buffer inverso aseguramos que los segmentos # quden dentro de la malla steps = steps + 1 feedback.setCurrentStep(steps) facilitiesForSegmentsFixed = makeSureInside( blocksFacilities['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegments = joinByLocation( gridNeto['OUTPUT'], facilitiesForSegmentsFixed['OUTPUT'], 'sh_idx_count;mk_idx_count;pha_idx_count;bk_idx_count;st_idx_count;facilities;h_s', [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) facilitiesNotNullForSegmentsFixed = filter( facilitiesForSegmentsFixed['OUTPUT'], 'facilities', OPERATOR_GE, MIN_FACILITIES, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegmentsSimulta = joinByLocation( gridNeto['OUTPUT'], facilitiesNotNullForSegmentsFixed['OUTPUT'], 'h_s', [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) totalHousing = joinByAttr(gridNetoAndSegments['OUTPUT'], 'id_grid', gridNetoAndSegmentsSimulta['OUTPUT'], 'id_grid', 'h_s_sum', UNDISCARD_NONMATCHING, 'net_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(net_h_s_sum,0)/coalesce(h_s_sum,""))*100,"")' coverageDailyBusiness = calculateField(totalHousing['OUTPUT'], NAMES_INDEX['IA09'][0], formulaProximity, context, feedback, params['OUTPUT']) result = coverageDailyBusiness return result
def processAlgorithm(self, parameters, context, model_feedback): """ Here is where the processing itself takes place. """ results = {} feedback = QgsProcessingMultiStepFeedback(7, model_feedback) inputLayer = self.parameterAsVectorLayer(parameters, self.INPUT, context) if inputLayer is None: raise QgsProcessingException(self.tr('input layer missed')) meshLayer = self.parameterAsVectorLayer(parameters, "meshlayer", context) if meshLayer is None: raise QgsProcessingException(self.tr('mesh layer missed')) meshidfields = self.parameterAsFields(parameters, 'meshid', context) pareafields = self.parameterAsFields(parameters, 'pareafield', context) psamplefields = self.parameterAsFields(parameters, 'polsmpl', context) feedback.setCurrentStep(1) if feedback.isCanceled(): return {} # union # 集計用メッシュと人口×行政界メッシュの UNIONを作成する params2 = { 'INPUT': meshLayer, 'INPUT_FIELDS': [], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, # 'OUTPUT' : parameters["OUTPUT"], 'OVERLAY': inputLayer, 'OVERLAY_FIELDS': [] } # 'OUTPUT' : parameters["OUTPUT"], 'OVERLAY' : res3["OUTPUT"], 'OVERLAY_FIELDS' : [] } # ここはIntersectではなくて union #res2 = processing.run('native:intersection', params2, context=context, feedback=feedback ,is_child_algorithm=True) res2 = processing.run('qgis:union', params2, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("union OK ") # 作成UNIONのポリゴン面積算出 # 面積出力フィールド名 feedback.setCurrentStep(2) darea_column = 'divu_area' params3 = { 'INPUT': res2["OUTPUT"], 'FIELD_NAME': darea_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, 'FIELD_PRECISION': 5, 'NEW_FIELD': 1, 'FORMULA': '$area', 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res3 = processing.run('qgis:fieldcalculator', params3, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("caluculate area of union polygon OK ") # 作成UNIONと元 UNIONの面積比率 feedback.setCurrentStep(3) newFlag = True ratio_column = 'divu_ratio' ratio_str = darea_column + "/" + pareafields[0] params4 = { 'INPUT': res3["OUTPUT"], 'FIELD_NAME': ratio_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, # 'FIELD_PRECISION':5, 'NEW_FIELD':newFlag,'FORMULA':ratio_str,'OUTPUT' :parameters["OUTPUT"] } 'FIELD_PRECISION': 5, 'NEW_FIELD': newFlag, 'FORMULA': ratio_str, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res4 = processing.run('qgis:fieldcalculator', params4, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("calc ratio ok ") # 元UNIONの想定集計値に面積比率をかけて作成UNIONの想定調査値を算出する feedback.setCurrentStep(4) sv_column = 'snum_nv' sv_str = psamplefields[0] + "*" + ratio_column feedback.pushConsoleInfo("expres " + sv_str) params5 = { 'INPUT': res4["OUTPUT"], 'FIELD_NAME': sv_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, # 'FIELD_PRECISION':5, 'NEW_FIELD':newFlag,'FORMULA':sv_str,'OUTPUT' :parameters["OUTPUT"] } 'FIELD_PRECISION': 5, 'NEW_FIELD': newFlag, 'FORMULA': sv_str, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res5 = processing.run('qgis:fieldcalculator', params5, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("calc ratio2 ok ") # 作成UNIONの想定調査値をメッシュ別に集計する # 按分数値をもとにメッシュ別集計 feedback.setCurrentStep(5) meshid_f = meshidfields[0] alg_paramsg_n = { 'LAYERS': res5["OUTPUT"], 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lout = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) agar = [] tgLayer = lout["OUTPUT"] if type(tgLayer) is str: tgLayer = QgsVectorLayer(tgLayer, "intesect", "ogr") for field in tgLayer.fields(): agreg = {} agreg['input'] = '"' + field.name() + '"' #feedback.pushConsoleInfo( "name " + field.name() ) agreg['name'] = field.name() agreg['aggregate'] = 'first_value' agreg['length'] = field.length() agreg['precision'] = field.precision() agreg['type'] = field.type() if field.name() == sv_column: agreg['aggregate'] = 'sum' agar.append(agreg) if field.name() == meshid_f: agar.append(agreg) params6 = { 'INPUT': res5["OUTPUT"], 'GROUP_BY': meshid_f, 'AGGREGATES': agar, # 'OUTPUT' :parameters["OUTPUT"] } 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } feedback.pushConsoleInfo("aggregate ") res6 = processing.run('qgis:aggregate', params6, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("aggregate OK ") # レイヤ結合 qgis:joinattributestable #QgsProject.instance().addMapLayer(res7["OUTPUT"]) feedback.setCurrentStep(6) param7 = { 'DISCARD_NONMATCHING': False, 'FIELD': meshid_f, 'FIELDS_TO_COPY': [sv_column], 'FIELD_2': meshid_f, 'INPUT': meshLayer, 'INPUT_2': res6['OUTPUT'], 'METHOD': 1, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'PREFIX': '' } res7 = processing.run('qgis:joinattributestable', param7, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("joinattributetable OK") # 結果フィールド名変更 feedback.setCurrentStep(7) param8 = { 'FIELD': sv_column, 'INPUT': res7["OUTPUT"], 'NEW_NAME': 'snum', 'OUTPUT': parameters["OUTPUT"] } res8 = processing.run('qgis:renametablefield', param8, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("field rename OK") results["OUTPUT"] = res8["OUTPUT"] # results["LIMITPOL"] = matches return results
def processAlgorithm(self, parameters, context, model_feedback): constraint_lines = self.parameterAsLayerList( parameters, self.CONSTRAINT_LINES, context, ) feedback = QgsProcessingMultiStepFeedback(4 if constraint_lines else 2, model_feedback) sections_prepared = self.prepare_sections(parameters, context, feedback) feedback.setCurrentStep(1) if feedback.isCanceled(): return {} if constraint_lines: constraint_lines = self.prepare_constraint_lines( parameters, context, feedback) feedback.setCurrentStep(3) if feedback.isCanceled(): return {} axis_path = self.parameterAsCompatibleSourceLayerPath( parameters, self.AXIS, context, compatibleFormats=["shp"], ) long_step = self.parameterAsString(parameters, self.LONG_STEP, context) lat_step = self.parameterAsString(parameters, self.LAT_STEP, context) attr_cross_sections = self.parameterAsString(parameters, self.ATTR_CROSS_SECTION, context) output_path = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) command = [ PYTHON_INTERPRETER, PYTHON_SCRIPT, "-v", ] if constraint_lines: command += [ "--infile_constraint_lines", constraint_lines, ] command += [ "--long_step", long_step, "--lat_step", lat_step, "--attr_cross_sections", attr_cross_sections, axis_path, sections_prepared, output_path, ] feedback.pushCommandInfo(" ".join(command)) env = {key: value for key, value in os.environ.items()} env["PYTHONPATH"] = os.pathsep.join( [env.get("PYTHONPATH", ""), PYTHONPATH]) with subprocess.Popen( command, stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.STDOUT, env=env, encoding=ENCODING, ) as proc: while proc.poll() is None: for line in iter(proc.stdout.readline, ""): feedback.pushConsoleInfo(line.strip()) for line in iter(proc.stdout.readline, ""): feedback.pushConsoleInfo(line.strip()) if proc.returncode != 0: raise QgsProcessingException( "Failed to execute command {}".format(" ".join(command))) sections = self.parameterAsSource(parameters, self.SECTIONS, context) processing.run( "qgis:definecurrentprojection", { "INPUT": output_path, "CRS": sections.sourceCrs() }, ) return {self.OUTPUT: output_path}
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpaMan = params['DPA_MAN'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) pathCsvPoblacion = params['CENSO_POBLACION'] file = pathCsvPoblacion cols = [ 'I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I09', 'I10', 'P01', 'P23', 'P03' ] df = pd.read_csv(file, usecols=cols) # fix codes df['I01'] = df['I01'].astype(str) df['I02'] = df['I02'].astype(str) df['I03'] = df['I03'].astype(str) df['I04'] = df['I04'].astype(str) df['I05'] = df['I05'].astype(str) df['I06'] = df['I06'].astype(str) df['I09'] = df['I09'].astype(str) df['I10'] = df['I10'].astype(str) df.loc[df['I01'].str.len() < 2, 'I01'] = "0" + df['I01'] df.loc[df['I02'].str.len() < 2, 'I02'] = "0" + df['I02'] df.loc[df['I03'].str.len() < 2, 'I03'] = "0" + df['I03'] df.loc[df['I04'].str.len() == 1, 'I04'] = "00" + df['I04'] df.loc[df['I04'].str.len() == 2, 'I04'] = "0" + df['I04'] df.loc[df['I05'].str.len() == 1, 'I05'] = "00" + df['I05'] df.loc[df['I05'].str.len() == 2, 'I05'] = "0" + df['I05'] df.loc[df['I06'].str.len() < 2, 'I06'] = "0" + df['I06'] df.loc[df['I09'].str.len() == 1, 'I09'] = "00" + df['I09'] df.loc[df['I09'].str.len() == 2, 'I09'] = "0" + df['I09'] df.loc[df['I10'].str.len() < 2, 'I10'] = "0" + df['I10'] df['GREDAD'] = None df.loc[(df['P03'] >= 15), 'GREDAD'] = 5 df.loc[(df['P03'] < 15), 'GREDAD'] = 4 df['pobactive'] = 0.0 df.loc[(df['GREDAD'] >= 5) & ((df['P23'] == '9') | (df['P23'] == '09')), 'pobactive'] = 1.0 df['poblacion'] = 0.0 df.loc[(df['GREDAD'] > 0), 'poblacion'] = 1.0 # df[0:50] df['codman'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) df['pobactive'] = df['pobactive'].astype(float) df['poblacion'] = df['poblacion'].astype(float) aggOptions = { 'codman': 'first', 'poblacion': 'sum', 'pobactive': 'sum', } resManzanas = df.groupby('codman').agg(aggOptions) resManzanas['pobactuni'] = None resManzanas['pobactuni'] = (resManzanas['pobactive'] / resManzanas['poblacion']) * 100 resManzanas['pobt'] = resManzanas['poblacion'] df = resManzanas steps = steps + 1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH + '/pobactuni.csv' feedback.pushConsoleInfo(str(('pobactuni en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps + 1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if (exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps + 1 feedback.setCurrentStep(steps) steps = steps + 1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpaMan, outputCsv, 'codman', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) expressionNotNull = "pobactuni IS NOT '' AND pobactuni is NOT NULL" notNull = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'pobactuni * 1.0' result = calculateField(notNull['OUTPUT'], 'pobactuni_n', formulaDummy, context, feedback) # ----------------------CONVERTIR A NUMERICOS -------------------- steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'pobactive * 1.0' result = calculateField(result['OUTPUT'], 'pobactive_n', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'pobt * 1.0' result = calculateField(result['OUTPUT'], 'pobt_n', formulaDummy, context, feedback) # ----------------------PROPORCIONES AREA-------------------------- steps = steps + 1 feedback.setCurrentStep(steps) blocks = calculateArea(result['OUTPUT'], 'area_bloc', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], ['pobactive_n', 'pobt_n', 'area_bloc'], ['id_grid', 'area_grid'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) # -------------------------PROPORCIONES VALORES------------------------- steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * pobactive_n' result = calculateField(segmentsArea['OUTPUT'], 'pobactive_n_seg', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = '(area_seg/area_bloc) * pobt_n' result = calculateField(result['OUTPUT'], 'pobt_n_seg', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) result = makeSureInside(result['OUTPUT'], context, feedback) #---------------------------------------------------------------------- steps = steps + 1 feedback.setCurrentStep(steps) result = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['pobactive_n_seg', 'pobt_n_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = '(pobactive_n_seg_sum/pobt_n_seg_sum) * 100' result = calculateField(result['OUTPUT'], NAMES_INDEX['ID13'][0], formulaDummy, context, feedback, params['OUTPUT']) # steps = steps+1 # feedback.setCurrentStep(steps) # gridNeto = joinByLocation(gridNeto['OUTPUT'], # result['OUTPUT'], # ['pobactuni_n'], # [INTERSECTA], [MEDIA], # UNDISCARD_NONMATCHING, # context, # feedback) # fieldsMapping = [ # {'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4}, # {'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6}, # {'expression': '"pobactuni_n_mean"', 'length': 20, 'name': NAMES_INDEX['ID13'][0], 'precision': 2, 'type': 6} # ] # steps = steps+1 # feedback.setCurrentStep(steps) # result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], # context, # feedback, params['OUTPUT']) return result
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 14 fieldPopulation = params['FIELD_POPULATION'] fieldHousing = fieldPopulation DISTANCE_WALKABILITY = 300 feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) """ ----------------------------------------------------------------- Calcular las facilidades a espacios pubicos abiertos ----------------------------------------------------------------- """ steps = steps + 1 feedback.setCurrentStep(steps) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) blocks = calculateArea(params['BLOCKS'], 'area_bloc', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], 'area_bloc;' + fieldPopulation, 'id_grid', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaPopulationSegments = '(area_seg/area_bloc) * ' + fieldPopulation populationForSegments = calculateField(segmentsArea['OUTPUT'], 'pop_seg', formulaPopulationSegments, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksWithId = calculateField(populationForSegments['OUTPUT'], 'id_block', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) equipmentWithId = calculateField(params['EQUIPMENT_GREEN'], 'idx', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = createCentroids(blocksWithId['OUTPUT'], context, feedback) result = [] print(params['DISTANCE_OPTIONS']) if (params['DISTANCE_OPTIONS'] == 0): steps = steps + 1 feedback.setCurrentStep(steps) feedback.pushConsoleInfo(str(('Cálculo de áreas de servicio'))) serviceArea = bufferIsocrono(params['ROADS'], equipmentWithId['OUTPUT'], TIME_TRAVEL_COST, STRATEGY_TIME, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) housingServed = intersection(segmentsArea['OUTPUT'], serviceArea['OUTPUT'], [fieldHousing, 'area_bloc'], ['id_grid'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) areaHousingServed = calculateArea(housingServed['OUTPUT'], 'area_served', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaHousingSegmentsServed = '(area_served/area_bloc) * ' + fieldHousing housingSegmentsServed = calculateField( areaHousingServed['OUTPUT'], 'has', formulaHousingSegmentsServed, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) housingSegmentsServedFixed = makeSureInside( housingSegmentsServed['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegmentsServed = joinByLocation( gridNeto['OUTPUT'], housingSegmentsServedFixed['OUTPUT'], 'has', [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) housingForSegmentsFixed = makeSureInside( populationForSegments['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegmentsServed = joinByLocation( gridNetoAndSegmentsServed['OUTPUT'], housingForSegmentsFixed['OUTPUT'], 'pop_seg', [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(has_sum,0) / coalesce(pop_seg_sum,""))*100, "")' proximity2OpenSpace = calculateField( gridNetoAndSegmentsServed['OUTPUT'], NAMES_INDEX['IB04'][0], formulaProximity, context, feedback, params['OUTPUT']) result = proximity2OpenSpace else: feedback.pushConsoleInfo(str(('Cálculo de buffer radial'))) steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4GreenSapace = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_WALKABILITY, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) counterGreenSpace = joinByLocation( blockBuffer4GreenSapace['OUTPUT'], equipmentWithId['OUTPUT'], 'idx', [CONTIENE, INTERSECTA], [COUNT], False, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksWithId['OUTPUT'], 'id_block', counterGreenSpace['OUTPUT'], 'id_block', 'idx_count', False, 'gsp_', context, feedback) """ ----------------------------------------------------------------- Calcular numero de viviendas por hexagano ----------------------------------------------------------------- """ # Haciendo el buffer inverso aseguramos que los segmentos # quden dentro de la malla steps = steps + 1 feedback.setCurrentStep(steps) facilitiesForSegmentsFixed = makeSureInside( blocksJoined['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegments = joinByLocation( gridNeto['OUTPUT'], facilitiesForSegmentsFixed['OUTPUT'], 'gsp_idx_count;pop_seg', [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) #descartar NULL para obtener el total de viviendas que cumple steps = steps + 1 feedback.setCurrentStep(steps) facilitiesNotNullForSegmentsFixed = filter( facilitiesForSegmentsFixed['OUTPUT'], 'gsp_idx_count', NOT_NULL, '', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegmentsNotNull = joinByLocation( gridNetoAndSegments['OUTPUT'], facilitiesNotNullForSegmentsFixed['OUTPUT'], 'pop_seg', [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(pop_seg_sum_2,0) / coalesce(pop_seg_sum,""))*100,"")' proximity2OpenSpace = calculateField( gridNetoAndSegmentsNotNull['OUTPUT'], NAMES_INDEX['IB04'][0], formulaProximity, context, feedback, params['OUTPUT']) result = proximity2OpenSpace return proximity2OpenSpace
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 3 # fieldPopulation = params['FIELD_POPULATION'] fieldActivities = str(params['FIELD_ACTIVITIES']) feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) steps = steps + 1 feedback.setCurrentStep(steps) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid tempOutput = self.CURRENT_PATH + '/zaux.shp' # print(QgsProcessing.TEMPORARY_OUTPUT) steps = steps + 1 feedback.setCurrentStep(steps) activitiesGrid = joinAttrByLocation(params['TERTIARYUSES_ACTIVITIES'], gridNeto['OUTPUT'], ['id_grid'], [INTERSECTA], UNDISCARD_NONMATCHING, context, feedback, tempOutput) # steps = steps+1 # feedback.setCurrentStep(steps) # rep = calculateField(gridNeto['OUTPUT'], 'id_ter', '$id', context, # feedback, type=1) activitiesLayer = QgsVectorLayer(tempOutput, "activitiesGrid", "ogr") # activitiesLayer = convertTempOuputToObject(activitiesGrid) # layer = self.parameterAsVectorLayer(params, activitiesLayer, context) layer = activitiesLayer # layer = activitiesGrid features = layer.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in layer.fields()] # print(field_names) # print(len(features)) df = pd.DataFrame(features, columns=field_names) # df["id_grid"]= df["id_grid"].astype(int) aggregation = {fieldActivities: {'amount_class': 'count'}} grouped = df.groupby(['id_grid', fieldActivities]).agg(aggregation) grouped.columns = grouped.columns.droplevel(level=0) aggregation = { fieldActivities: { 'total_class': 'count' # conteo de todos los puntos # 'total_class':'nunique' # conteo de los puntos no repetidos } } grouped2 = df.groupby(['id_grid']).agg(aggregation) grouped2.columns = grouped2.columns.droplevel(level=0) res = grouped.join(grouped2).reset_index() print(res['amount_class']) print(res) uniqueActivities = pd.unique(df[fieldActivities]) totalActivities = len(uniqueActivities) res["total_study"] = totalActivities # cross = pd.crosstab(df['id'], df[fieldActivities]) res["proporcion"] = ((res['amount_class'] / res['total_class']) * np.log(res['amount_class'] / res['total_class'])) aggregation = {'proporcion': {'shannon': 'sum'}} res = res.groupby(['id_grid']).agg(aggregation) res.columns = res.columns.droplevel(level=0) res['shannon'] = res['shannon'] * -1 outputCsv = self.CURRENT_PATH + '/sett_shannon.csv' feedback.pushConsoleInfo(str(('Settings shannon en ' + outputCsv))) # res.to_csv(outputCsv, sep = ";", encoding='utf-8') res.to_csv(outputCsv) print(res) exitCsv = os.path.exists(outputCsv) if (exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'to_string(id_grid)' gridDummy = calculateField(gridNeto['OUTPUT'], 'griid', formulaDummy, context, feedback, QgsProcessing.TEMPORARY_OUTPUT, 2) steps = steps + 1 feedback.setCurrentStep(steps) gridShannon = joinByAttr2(gridDummy['OUTPUT'], 'griid', outputCsv, 'id_grid', 'shannon', UNDISCARD_NONMATCHING, '', 1, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'coalesce(shannon * 1, "")' result = calculateField(gridShannon['OUTPUT'], NAMES_INDEX['IA11'][0], formulaDummy, context, feedback, params['OUTPUT']) # gridShannon = joinByAttr(r'/Users/terra/llactalab/data/SHAPES_PARA_INDICADORES/SIS-OUTPUTS/ia11.shp', 'id_grid', # '/Users/terra/llactalab/data/SHAPES_PARA_INDICADORES/SIS-OUTPUTS/sett_shannon.csv', 'id_grid', # ['shannon'], # UNDISCARD_NONMATCHING, # '', # 1, # context, # feedback, params['OUTPUT']) # res.iloc[1:, [4]] = res.iloc[1:, [2]] / res.iloc[1:, [3]] # print(totalActivities) # print(grouped) # print(grouped2) # print(un) # print(cross) # print(df[fieldActivities]) return result
def processAlgorithm(self, parameters, context, model_feedback): # Use a multi-step feedback, so that individual child algorithm progress reports are adjusted for the # overall progress through the model feedback = QgsProcessingMultiStepFeedback(5, model_feedback) results = {} outputs = {} model_feedback.pushConsoleInfo("start") inputLayer = self.parameterAsVectorLayer(parameters, "inputlayer", context) agfields = self.parameterAsFields(parameters, 'agfield', context) cfields = self.parameterAsFields(parameters, 'cfield', context) # ret = inputLayer.aggregate(QgsAggregateCalculator.Sum, fieldOrExpression: str, parameters: QgsAggregateCalculator.AggregateParameters = QgsAggregateCalculator.AggregateParameters(), context: QgsExpressionContext = None, fids: object = None) # retar = sum(cfields[0], group_by:=agfields[0]) # results['OUTPUT'] = outputs['Gdal_translate']['OUTPUT'] #enc = self.parameterAsFile( # parameters, # self.ENCODING, # context # # #) # enc = self.parameterAsInt( parameters,self.ENCODING, context ) #encstring = self.encode[enc] #feedback.pushConsoleInfo( encstring) basename = "memorylayer.gpkg" tmp_path = QgsProcessingUtils.generateTempFilename(basename) conn = sqlite3.connect(tmp_path) # sqliteを操作するカーソルオブジェクトを作成 cur = conn.cursor() entbl = "sample_tbl" key_fieldname = agfields[0] value_fieldname = cfields[0] feedback.pushConsoleInfo("field name " + key_fieldname) feedback.pushConsoleInfo("value field name " + value_fieldname) # 調査結果格納テーブルの作成 crsql = 'CREATE TABLE \"' + entbl + '\"( \"' + key_fieldname + '\" STRING, \"' + value_fieldname + '\" NUMERIC);' cur.execute(crsql) # uri = csvfile #valueAsPythonString( # csv file read # read input layer isql = 'insert into \"' + entbl + '\" values (?,?);' for f in inputLayer.getFeatures(): # t = '(\'' + f[key_fieldname ] + '\',' + str(f[value_fieldname] ) + ',)' #feedback.pushConsoleInfo( "class " + f[key_fieldname].__class__.__name__ + ' ' + f[value_fieldname].__class__.__name__ ) sqv = [] if isinstance(f[value_fieldname], (int, float)): if (type(f[key_fieldname]) is str): sqv.append(f[key_fieldname]) sqv.append(f[value_fieldname]) cur.execute(isql, sqv) else: feedback.pushConsoleInfo("no value ") if (type(f[key_fieldname]) is str): sqv.append(f[key_fieldname]) sqv.append(0) cur.execute(isql, sqv) # データベースへコミット。これで変更が反映される。 conn.commit() sqlstr = 'create table temp_vlayer as select \"' + key_fieldname + '\", sum(\"' + value_fieldname + '\") vn from \"' + entbl + '\" group by \"' + key_fieldname + '\";' # 町名別集計 cur.execute(sqlstr) feedback.pushConsoleInfo("execute " + sqlstr) result_def = tmp_path + '|layername=temp_vlayer' tgttable = "temp_vlayer" #results["OUTPUT"] = result_def #return results fields = QgsFields() fields.append(QgsField(key_fieldname, QVariant.String)) fields.append(QgsField(value_fieldname, QVariant.Double)) (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, fields) feedback.pushConsoleInfo("create sink ") # Compute the number of steps to display within the progress bar and # get features from source #total = 100.0 / resultlayer.featureCount() if resultlayer.featureCount() else 0 #features = resultlayer.getFeatures() sqlstr = 'select \"' + key_fieldname + '\",' + 'vn from temp_vlayer;' c = conn.cursor() for row in c.execute(sqlstr): #for current, feature in enumerate(list1): # Stop the algorithm if cancel button has been clicked if feedback.isCanceled(): break nfeature = QgsFeature(fields) nfeature[key_fieldname] = row[0] nfeature[value_fieldname] = row[1] sink.addFeature(nfeature, QgsFeatureSink.FastInsert) # Update the progress bar #feedback.setProgress(int(current * total)) conn.close() # Return the results of the algorithm. In this case our only result is # the feature sink which contains the processed features, but some # algorithms may return multiple feature sinks, calculated numeric # statistics, etc. These should all be included in the returned # dictionary, with keys matching the feature corresponding parameter # or output names. return {self.OUTPUT: dest_id}
def processAlgorithm(self, params, context, feedback): isValid = lambda x: 0 if x is None else 1 isBusStop = isValid(params['BUSSTOP']) isTramStop = isValid(params['TRAMSTOP']) isBikeStop = isValid(params['BIKESTOP']) isBikeWay = isValid(params['BIKEWAY']) isCrossWalk = isValid(params['CROSSWALK']) isRoads = isValid(params['ROADS']) totalValides = isBusStop + isTramStop + isBikeStop + isBikeWay + isCrossWalk if (totalValides >= 3): if isRoads == 0 and params['DISTANCE_OPTIONS'] == 0: feedback.reportError( str(('Distancia isocrona requiere la red vial'))) return {} steps = 0 totalStpes = 37 fieldPopulateOrHousing = params['FIELD_POPULATE_HOUSING'] DISTANCE_BUSSTOP = 300 DISTANCE_TRAMSTOP = 500 DISTANCE_BKESTOP = 300 DISTANCE_BIKEWAY = 300 DISTANCE_CROSSWALK = 300 MIN_FACILITIES = 3 OPERATOR_GE = 3 feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) """ ----------------------------------------------------------------- Calcular las facilidades ----------------------------------------------------------------- """ steps = steps + 1 feedback.setCurrentStep(steps) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) blocks = calculateArea(params['BLOCKS'], 'area_bloc', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segments = intersection(blocks['OUTPUT'], gridNeto['OUTPUT'], 'area_bloc;' + fieldPopulateOrHousing, 'id_grid', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) segmentsArea = calculateArea(segments['OUTPUT'], 'area_seg', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaPopulationSegments = '(area_seg/area_bloc) * ' + fieldPopulateOrHousing populationForSegments = calculateField(segmentsArea['OUTPUT'], 'pop_seg', formulaPopulationSegments, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksWithId = calculateField(populationForSegments['OUTPUT'], 'id_block', '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = createCentroids(blocksWithId['OUTPUT'], context, feedback) result = [] idxs = [ 'idxbus', 'idxtram', 'idxbikestop', 'idkbikeway', 'idxwalk' ] layers = [] if (params['DISTANCE_OPTIONS'] == 0): steps = steps + 1 feedback.setCurrentStep(steps) feedback.pushConsoleInfo(str(('Cálculo de áreas de servicio'))) pointsBikeWay = pointsAlongLines(params['BIKEWAY'], 50, context, feedback) pointsCrossWalk = pointsAlongLines(params['CROSSWALK'], 50, context, feedback) if isBusStop == 1: layers.append([ params['BUSSTOP'], STRATEGY_DISTANCE, DISTANCE_BUSSTOP ]) if isTramStop == 1: layers.append([ params['TRAMSTOP'], STRATEGY_DISTANCE, DISTANCE_TRAMSTOP ]) if isBikeStop == 1: layers.append([ params['BIKESTOP'], STRATEGY_DISTANCE, DISTANCE_BKESTOP ]) if isBikeWay == 1: layers.append([ pointsBikeWay['OUTPUT'], STRATEGY_DISTANCE, DISTANCE_BIKEWAY ]) if isCrossWalk == 1: layers.append([ pointsCrossWalk['OUTPUT'], STRATEGY_DISTANCE, DISTANCE_CROSSWALK ]) serviceAreas = multiBufferIsocrono(params['ROADS'], layers, context, feedback) iidx = -1 for serviceArea in serviceAreas: iidx = iidx + 1 idx = idxs[iidx] steps = steps + 1 feedback.setCurrentStep(steps) serviceArea = calculateField(serviceArea, idx, '$id', context, feedback, type=1) steps = steps + 1 feedback.setCurrentStep(steps) centroidsBlocks = joinByLocation(centroidsBlocks['OUTPUT'], serviceArea['OUTPUT'], [idx], [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) # formulaDummy = 'idxbus_count * 1' formulaDummy = 'coalesce(idxbus_count, 0) + coalesce(idxtram_count, 0) + coalesce(idxbikestop_count,0) + coalesce(idkbikeway_count, 0) + coalesce(idxwalk_count, 0)' facilitiesCover = calculateField(centroidsBlocks['OUTPUT'], 'facilities', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) facilitiesFullCover = filter(facilitiesCover['OUTPUT'], 'facilities', OPERATOR_GE, MIN_FACILITIES, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilitiesCover = joinByLocation( gridNeto['OUTPUT'], facilitiesCover['OUTPUT'], ['pop_seg', 'facilities'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) fieldsMapping = [{ 'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4 }, { 'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6 }, { 'expression': '"pop_seg_sum"', 'length': 20, 'name': 'ptotal', 'precision': 2, 'type': 6 }, { 'expression': '"facilities_sum"', 'length': 20, 'name': 'facilities', 'precision': 2, 'type': 6 }] steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilitiesCover = refactorFields( fieldsMapping, gridNetoFacilitiesCover['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoFacilities = joinByLocation( gridNetoFacilitiesCover['OUTPUT'], facilitiesFullCover['OUTPUT'], ['pop_seg'], [CONTIENE], [SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(pop_seg_sum,0) / coalesce(ptotal,""))*100,"")' proximity2AlternativeTransport = calculateField( gridNetoFacilities['OUTPUT'], NAMES_INDEX['IC04'][0], formulaProximity, context, feedback, params['OUTPUT']) result = proximity2AlternativeTransport else: feedback.pushConsoleInfo(str(('Cálculo de buffer radial'))) blocksJoined = blocksWithId steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4BusStop = createBuffer(centroidsBlocks['OUTPUT'], DISTANCE_BUSSTOP, context, feedback) # ------------------------------------ if isBusStop == 1: steps = steps + 1 feedback.setCurrentStep(steps) layerBusStop = calculateField(params['BUSSTOP'], 'idx', '$id', context, feedback, type=1) layerBusStop = layerBusStop['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counterBusStop = joinByLocation( blockBuffer4BusStop['OUTPUT'], layerBusStop, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterBusStop['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'bs_', context, feedback) # --------------------------------------------------- if isTramStop == 1: steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4TramStop = createBuffer( centroidsBlocks['OUTPUT'], DISTANCE_TRAMSTOP, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) layerTramStop = calculateField(params['TRAMSTOP'], 'idx', '$id', context, feedback, type=1) layerTramStop = layerTramStop['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counterTramStop = joinByLocation( blockBuffer4TramStop['OUTPUT'], layerTramStop, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterTramStop['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'ts_', context, feedback) # ----------------------------------------------- if isBikeStop == 1: steps = steps + 1 feedback.setCurrentStep(steps) blockBuffer4BikeStop = createBuffer( centroidsBlocks['OUTPUT'], DISTANCE_BKESTOP, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) layerBikeStop = calculateField(params['BIKESTOP'], 'idx', '$id', context, feedback, type=1) layerBikeStop = layerBikeStop['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counteBikeStop = joinByLocation( blockBuffer4BikeStop['OUTPUT'], layerBikeStop, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counteBikeStop['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'bks_', context, feedback) # ----------------------------------------- if isBikeWay == 1: steps = steps + 1 feedback.setCurrentStep(steps) BlockBuffer4BikeWay = createBuffer( centroidsBlocks['OUTPUT'], DISTANCE_BIKEWAY, context, feedback) pointsBikeWay = pointsAlongLines(params['BIKEWAY'], 50, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) layerBikeWay = calculateField(pointsBikeWay['OUTPUT'], 'idx', '$id', context, feedback, type=1) layerBikeWay = layerBikeWay['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counterBikeWay = joinByLocation( BlockBuffer4BikeWay['OUTPUT'], layerBikeWay, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterBikeWay['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'bw_', context, feedback) # ------------------------------------------ if isCrossWalk == 1: steps = steps + 1 feedback.setCurrentStep(steps) BlockBuffer4CrossWalk = createBuffer( centroidsBlocks['OUTPUT'], DISTANCE_CROSSWALK, context, feedback) pointsCrossWalk = pointsAlongLines(params['CROSSWALK'], 50, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) layerCrossWalk = calculateField(pointsCrossWalk['OUTPUT'], 'idx', '$id', context, feedback, type=1) layerCrossWalk = layerCrossWalk['OUTPUT'] steps = steps + 1 feedback.setCurrentStep(steps) counterCrossWalk = joinByLocation( BlockBuffer4CrossWalk['OUTPUT'], layerCrossWalk, 'idx', [INTERSECTA], [COUNT], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) blocksJoined = joinByAttr(blocksJoined['OUTPUT'], 'id_block', counterCrossWalk['OUTPUT'], 'id_block', 'idx_count', UNDISCARD_NONMATCHING, 'cw_', context, feedback) # -------------------------------------------- #TODO: CAMBIAR POR UN METODO BUCLE formulaParseBS = 'CASE WHEN coalesce(bs_idx_count, 0) > 0 THEN 1 ELSE 0 END' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksJoined['OUTPUT'], 'parse_bs', formulaParseBS, context, feedback) formulaParseTS = 'CASE WHEN coalesce(ts_idx_count, 0) > 0 THEN 1 ELSE 0 END' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_ts', formulaParseTS, context, feedback) formulaParseBKS = 'CASE WHEN coalesce(bks_idx_count, 0) > 0 THEN 1 ELSE 0 END' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_bks', formulaParseBKS, context, feedback) formulaParseBW = 'CASE WHEN coalesce(bw_idx_count, 0) > 0 THEN 1 ELSE 0 END' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_bw', formulaParseBW, context, feedback) formulaParseCW = 'CASE WHEN coalesce(cw_idx_count, 0) > 0 THEN 1 ELSE 0 END' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'parse_cw', formulaParseCW, context, feedback) formulaFacilities = 'parse_bs + parse_ts + parse_bks + parse_bw + parse_cw' steps = steps + 1 feedback.setCurrentStep(steps) blocksFacilities = calculateField(blocksFacilities['OUTPUT'], 'facilities', formulaFacilities, context, feedback) # Haciendo el buffer inverso aseguramos que los segmentos # quden dentro de la malla steps = steps + 1 feedback.setCurrentStep(steps) facilitiesForSegmentsFixed = makeSureInside( blocksFacilities['OUTPUT'], context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegments = joinByLocation( gridNeto['OUTPUT'], facilitiesForSegmentsFixed['OUTPUT'], 'bs_idx_count;ts_idx_count;bks_idx_count;bw_idx_count;cw_idx_count;facilities;pop_seg', [CONTIENE], [MAX, SUM], UNDISCARD_NONMATCHING, context, feedback) # tomar solo los que tienen cercania simultanea (descartar lo menores de 3) steps = steps + 1 feedback.setCurrentStep(steps) facilitiesNotNullForSegmentsFixed = filter( facilitiesForSegmentsFixed['OUTPUT'], 'facilities', OPERATOR_GE, MIN_FACILITIES, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNetoAndSegmentsSimulta = joinByLocation( gridNeto['OUTPUT'], facilitiesNotNullForSegmentsFixed['OUTPUT'], 'pop_seg', [CONTIENE], [MAX, SUM], UNDISCARD_NONMATCHING, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) totalHousing = joinByAttr(gridNetoAndSegments['OUTPUT'], 'id_grid', gridNetoAndSegmentsSimulta['OUTPUT'], 'id_grid', 'pop_seg_sum', UNDISCARD_NONMATCHING, 'net_', context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaProximity = 'coalesce((coalesce(net_pop_seg_sum,0) / coalesce(pop_seg_sum,""))*100,"")' proximity2AlternativeTransport = calculateField( totalHousing['OUTPUT'], NAMES_INDEX['IC04'][0], formulaProximity, context, feedback, params['OUTPUT']) result = proximity2AlternativeTransport return result else: feedback.reportError( str(('Se necesita al menos tres redes de transporte'))) return {}
def processAlgorithm(self, parameters, context, model_feedback): """ Here is where the processing itself takes place. """ results = {} feedback = QgsProcessingMultiStepFeedback(1, model_feedback) inputLayer = self.parameterAsVectorLayer(parameters, self.INPUT, context) if inputLayer is None: raise QgsProcessingException(self.tr('input layer missed')) meshLayer = self.parameterAsVectorLayer(parameters, "meshlayer", context) if meshLayer is None: raise QgsProcessingException(self.tr('mesh layer missed')) meshidfields = self.parameterAsFields(parameters, 'meshid', context) aggrefields = self.parameterAsFields(parameters, 'aggrefield', context) #limit_sample = self.parameterAsInt ( parameters, # 'limit_sample', # context) feedback.setCurrentStep(1) if feedback.isCanceled(): return {} # InterSect #Stat_CSVAddressPolygon # 行政界の面積計算 # # 面積出力フィールド名 area_column = 'adm_area' params3 = { 'INPUT': inputLayer, 'FIELD_NAME': area_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, 'FIELD_PRECISION': 5, 'NEW_FIELD': 1, 'FORMULA': '$area', 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res3 = processing.run('qgis:fieldcalculator', params3, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("caluculate area OK ") # ここから関数化がいいかも # メッシュと行政界のIntesect params2 = { 'INPUT': meshLayer, 'INPUT_FIELDS': [], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'OVERLAY': res3["OUTPUT"], 'OVERLAY_FIELDS': [] } # 'OUTPUT' : parameters["OUTPUT"], 'OVERLAY' : res3["OUTPUT"], 'OVERLAY_FIELDS' : [] } # ここはIntersectではなくて union #res2 = processing.run('native:intersection', params2, context=context, feedback=feedback ,is_child_algorithm=True) res2 = processing.run('qgis:union', params2, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("union OK ") # Inter sect ポリゴンの面積計算 tgLayer = res2["OUTPUT"] if type(tgLayer) is str: tgLayer = QgsVectorLayer(tgLayer, "intesect", "ogr") #tgLayer.beginEditCommand("Feature triangulation") ad_areacolumn = 'isect_area' ratio_column = 'area_ratio' anbun_col = 'anbun_colum' tgLayer.dataProvider().addAttributes([ QgsField(ad_areacolumn, QVariant.Double), QgsField(ratio_column, QVariant.Double), QgsField(anbun_col, QVariant.Double) ]) tgLayer.updateFields() newFlag = False params4 = { 'INPUT': res2["OUTPUT"], 'FIELD_NAME': ad_areacolumn, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, # 'FIELD_PRECISION':5, 'NEW_FIELD':1,'FORMULA':'$area','OUTPUT' :parameters["OUTPUT"] } 'FIELD_PRECISION': 5, 'NEW_FIELD': newFlag, 'FORMULA': '$area', 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } #for feat in tgLayer.getFeatures(): #feat[ad_areacolumn] = feat.geometry().area() # feedback.pushConsoleInfo( "feature "+ str(feat[0]) ) # feedback.pushConsoleInfo( "area "+ str(feat.geometry().area()) ) #tgLayer.updateFeature(feat) #tgLayer.endEditCommand() #results["OUTPUT"] = res2["OUTPUT"] #return results #params4 = { 'INPUT' : res2["OUTPUT"], 'FIELD_NAME' : ad_areacolumn , 'FIELD_TYPE': 0, 'FIELD_LENGTH':12, # 'FIELD_PRECISION':5, 'NEW_FIELD':1,'FORMULA':'$area','OUTPUT' :parameters["OUTPUT"] } # 'FIELD_PRECISION':5, 'NEW_FIELD':1,'FORMULA':'$area','OUTPUT' :QgsProcessing.TEMPORARY_OUTPUT } res5 = processing.run('qgis:fieldcalculator', params4, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("calc area ok ") ratio_str = ad_areacolumn + "/" + area_column params5 = { 'INPUT': res5["OUTPUT"], 'FIELD_NAME': ratio_column, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, # 'FIELD_PRECISION':5, 'NEW_FIELD':False,'FORMULA':ratio_str,'OUTPUT' :parameters["OUTPUT"] } 'FIELD_PRECISION': 5, 'NEW_FIELD': newFlag, 'FORMULA': ratio_str, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res6 = processing.run('qgis:fieldcalculator', params5, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("calc ratio ok ") # Intersect ポリゴンと元の行政界ポリゴンの面積比とサンプル数値をかけてInterSectポリゴン単位の案分サンプル値を作成する # def CalcDataUsingRatio( intersect_output, area_column,ratio_column , out_table, ad_areacolumn) # # intersect_output Intersect 結果 # area_column 面積出力カラム名 # ratio_column 按分集計値出力カラム名 # out_table 出力テーブル名 # ad_areacolumn 行政界ポリゴンテーブルの面積値格納カラム名 # # ratio_column = 'area_ratio' # res5 = agtools.CalcDataUsingRatio( res4['OUTPUT'], area_column, ratio_column , ad_areacolumn , model_feedback) # 按分数値算出 anbun_col = 'snum' formula_str = aggrefields[0] + " * " + ratio_column params7 = { 'INPUT': res6["OUTPUT"], 'FIELD_NAME': anbun_col, 'FIELD_TYPE': 0, 'FIELD_LENGTH': 12, # 'FIELD_PRECISION':5, 'NEW_FIELD':newFlag,'FORMULA':formula_str ,'OUTPUT' :parameters["OUTPUT"] } 'FIELD_PRECISION': 5, 'NEW_FIELD': newFlag, 'FORMULA': formula_str, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res7 = processing.run('qgis:fieldcalculator', params7, context=context, feedback=feedback) # res7 = processing.run('qgis:fieldcalculator', params7, context=context, feedback=feedback ,is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("anbun ok ") # results["OUTPUT"] = res7["OUTPUT"] # return results # 按分数値をもとにメッシュ別集計 meshid_f = meshidfields[0] #QgsProject.instance().addMapLayer(res6["OUTPUT"]) #mesh_aggregate = 'aggregate(layer:=\'' + res6["OUTPUT"].id() + '\',aggregate:=\'sum\',expression:="'+ anbun_col + '", filter:="' + meshid_f +'"=attribute(@parent,\'' + meshid_f + '\'))' #feedback.pushConsoleInfo( "mesh_aggregate " + mesh_aggregate ) #mesh_exr = QgsExpression( mesh_aggregate ) #params6 = { 'INPUT' : meshLayer, 'FIELD_NAME' : anbun_col , 'FIELD_TYPE': 1, # 'NEW_FIELD':1,'FORMULA':mesh_exr ,'OUTPUT' :QgsProcessing.TEMPORARY_OUTPUT } agar = [] tgLayer = res7["OUTPUT"] if type(tgLayer) is str: tgLayer = QgsVectorLayer(tgLayer, "intesect", "ogr") for field in tgLayer.fields(): agreg = {} agreg['input'] = '"' + field.name() + '"' feedback.pushConsoleInfo("name " + field.name()) agreg['name'] = field.name() agreg['aggregate'] = 'first_value' agreg['length'] = field.length() agreg['precision'] = field.precision() agreg['type'] = field.type() if field.name() == anbun_col: agreg['aggregate'] = 'sum' agar.append(agreg) params6 = { 'INPUT': res7["OUTPUT"], 'GROUP_BY': meshid_f, 'AGGREGATES': agar, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } feedback.pushConsoleInfo("aggregate ") res8 = processing.run('qgis:aggregate', params6, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("aggregate OK ") # レイヤ結合 qgis:joinattributestable #QgsProject.instance().addMapLayer(res7["OUTPUT"]) param7 = { 'DISCARD_NONMATCHING': False, 'FIELD': meshid_f, 'FIELDS_TO_COPY': [anbun_col], 'FIELD_2': meshid_f, 'INPUT': meshLayer, 'INPUT_2': res8['OUTPUT'], 'METHOD': 1, 'OUTPUT': parameters["OUTPUT"], 'PREFIX': '' } res9 = processing.run('qgis:joinattributestable', param7, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("joinattributetable OK") # divide_f = "divide_f" # params8 = { 'INPUT' : res8["OUTPUT"], 'FIELD_NAME' : divide_f , 'FIELD_TYPE': 1, # 'NEW_FIELD':1,'FORMULA':QgsExpression('0') ,'OUTPUT' :QgsProcessing.TEMPORARY_OUTPUT } # res9 = processing.run('qgis:fieldcalculator', params8, feedback=feedback) # if feedback.isCanceled(): # return {} # feedback.pushConsoleInfo( "add divide flag OK" ) # 最低値チェック # ここが < なのか <= なのかはチェックが必要 #exp_str = '"' + anbun_col + '" <= ' + str(limit_sample ) + ' and "' + divide_f + '"=0' #exp_str = '"' + anbun_col + '" <= ' + str(limit_sample ) #feedback.pushConsoleInfo( "exp_str " + exp_str ) #nexpression =QgsExpression(exp_str) #request = QgsFeatureRequest().setFilterExpression(exp_str) #rsLayer = res9["OUTPUT"] #if type(rsLayer) is str: # rsLayer = QgsVectorLayer(rsLayer, "mesh", "memory") #tgLayer.beginEditCommand("Feature triangulation") # matches = 0 #for f in rsLayer.getFeatures(): # feedback.pushConsoleInfo( "value " +str( f[anbun_col]) ) #matches += 1 #feedback.pushConsoleInfo( "make expression OK" ) #params10 = { 'INPUT' : res8["OUTPUT"], 'EXPRESSION' : expression , 'METHOD': 0 } #outputs_statv = processing.run('QGIS_stat:Stat_CSVAddressPolygon', alg_params, context=context, feedback=feedback, is_child_algorithm=True) #res11 = processing.run('qgis:selectbyexpression', params10, feedback=feedback) # 最低値 に達した地物数の算出 #scount = res11["OUTPUT"].selectedFeatureCount() #feedback.pushConsoleInfo( "scount = "+str(matches) ) #if scount > 0: # # 最低値に達したものがある場合 # 最低値に達したメッシュのフラグを終了に変更 # Return the results of the algorithm. In this case our only result is # the feature sink which contains the processed features, but some # algorithms may return multiple feature sinks, calculated numeric # statistics, etc. These should all be included in the returned # dictionary, with keys matching the feature corresponding parameter # or output names. results["OUTPUT"] = res9["OUTPUT"] # results["LIMITPOL"] = matches return results
def processAlgorithm(self, parameters, context, model_feedback): """ Here is where the processing itself takes place. """ results = {} csvfile = self.parameterAsFile(parameters, self.INPUT, context) if csvfile is None: raise QgsProcessingException(self.tr('csv file error')) #df = QgsVirtualLayerDefinition() enc = self.parameterAsInt(parameters, 'ENCODING', context) meshLayer = self.parameterAsVectorLayer(parameters, "meshlayer", context) if meshLayer is None: raise QgsProcessingException(self.tr('mesh layer missed')) meshidfields = self.parameterAsFields(parameters, 'meshid', context) limit_sample = self.parameterAsInt(parameters, 'limit_sample', context) maxdivide = self.parameterAsInt(parameters, 'maxdivide', context) uneven_div = self.parameterAsInt(parameters, 'uneven_div', context) popmeshLayer = self.parameterAsVectorLayer(parameters, "popmeshlayer", context) if popmeshLayer is None: raise QgsProcessingException(self.tr('popmes layer missed')) popmeshidfields = self.parameterAsFields(parameters, 'popmeshid', context) popmeshpopfields = self.parameterAsFields(parameters, 'popmeshpop', context) feedback = QgsProcessingMultiStepFeedback(9 + maxdivide, model_feedback) feedback.setCurrentStep(1) if feedback.isCanceled(): return {} # 住所別集計 alg_params = { 'addresslayer': parameters['addresslayer'], 'addressfield': parameters['addressfield'], 'INPUT': csvfile, 'ENCODING': enc, 'CRS': None, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } #Stat_CSVAddressPolygon outputs_statv = processing.run('QGIS_stat:Stat_CSVAddressPolygon', alg_params, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} statv = outputs_statv["OUTPUT"] meshid = meshidfields[0] # 人口メッシュと行政界メッシュのUnion作成する new_popfield = 'pv' param_uni = { 'addresslayer': statv, 'addressfield': parameters['addressfield'][0], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'popmeshlayer': popmeshLayer, 'popmeshid': popmeshidfields[0], 'popmeshpop': popmeshpopfields[0], 'POPCOLUMN': new_popfield } feedback.setCurrentStep(2) res_uni = processing.run('QGIS_stat:UnionAdmAndPopMeshAlgorithm', param_uni, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("csvs 1 union ok ") # union pop polygon res_unit["OUTPUT"] # population pv # address parameters['addressfield'][0] # 行政界別人口の算出 feedback.setCurrentStep(3) param_pop = { 'inputlayer': res_uni['OUTPUT'], 'agfield': parameters['addressfield'], 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, # 'OUTPUT':parameters['OUTPUT'], 'cfield': new_popfield } res_adpop = processing.run('QGIS_stat:AggreagteValueAlgorithm', param_pop, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("csvs 1 caliculate pop adm ok ") # UNION mesh と 行政界別 人口の結合 feedback.setCurrentStep(4) param_join = { 'DISCARD_NONMATCHING': False, 'FIELD': parameters['addressfield'], 'FIELDS_TO_COPY': [], 'FIELD_2': parameters['addressfield'], 'INPUT': res_uni['OUTPUT'], 'INPUT_2': res_adpop['OUTPUT'], 'METHOD': 1, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, #'OUTPUT':parameters['OUTPUT'], 'PREFIX': 'op' } res_join = processing.run('qgis:joinattributestable', param_join, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("csvs 1 join union mesh and popof adm ok ") # UNION MESH 人口 と行政界人口の比率算出 feedback.setCurrentStep(5) param_ratio = { 'FIELD_LENGTH': 12, 'FIELD_NAME': 'pvratio', 'FIELD_PRECISION': 6, 'FIELD_TYPE': 0, 'FORMULA': ' \"pv\" / \"oppv\" ', 'INPUT': res_join["OUTPUT"], #'OUTPUT':parameters['OUTPUT'] 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, } res_ratio = processing.run('qgis:fieldcalculator', param_ratio, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo( "csvs 1 calc ratio of adm pop and union polygon population ok ") # Union mesh の想定集計値を算出する 住所別集計値 × ( UNION MESH 人口 と行政界人口の比率算出) feedback.setCurrentStep(6) param_ratio2 = { 'FIELD_LENGTH': 12, 'FIELD_NAME': 'pvsum', 'FIELD_PRECISION': 6, 'FIELD_TYPE': 0, 'FORMULA': ' \"snum\" * \"pvratio\" ', 'INPUT': res_ratio["OUTPUT"], #'OUTPUT':parameters['OUTPUT'] 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } res_ratio2 = processing.run('qgis:fieldcalculator', param_ratio2, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo("csvs 1 calc ratio of research sample ok ") #results["OUTPUT"] = res_ratio2["OUTPUT"] #return results # 入力メッシュとUnionメッシュのUnion feedback.setCurrentStep(7) #results["OUTPUT"] = res_ratio['OUTPUT'] #return results # 入力UNIONメッシュの保存 # レイヤをGeoPackage化 cnv_paramsg = { 'LAYERS': res_ratio2["OUTPUT"], 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT #'OUTPUT':parameters['OUTPUT'] } input_c = processing.run('native:package', cnv_paramsg, context=context, feedback=feedback, is_child_algorithm=True) feedback.pushConsoleInfo("csvs 1 cahnge to geopackage ok ") #results["OUTPUT"] = input_c["OUTPUT"] #return results # 集計用 人口+行政界 UNION input_union = input_c["OUTPUT"] feedback.setCurrentStep(8) # create union poplation mesh and input mesh param1 = { 'INPUT': input_union, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'pareafield': 'div_area', 'polsmpl': 'pvsum', 'meshid': meshid, 'meshlayer': meshLayer } #parameters['OUTPUT'] # res1 = processing.run('QGIS_stat:AggregatePopMeshbyMeshAlgorithm', param1, context=context, feedback=feedback, is_child_algorithm=True) if feedback.isCanceled(): return {} feedback.pushConsoleInfo( "csvs 1 AggregatePopMeshbyMeshAlgorithm ok ") numberof_under_limit = 0 #numberof_under_limit = res1["LIMITPOL"] # レイヤをGeoPackage化 alg_paramsg = { 'LAYERS': res1["OUTPUT"], 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } retg1 = processing.run('native:package', alg_paramsg, context=context, feedback=feedback, is_child_algorithm=True) last_output = retg1["OUTPUT"] new_mesh = retg1["OUTPUT"] mesh_layb = retg1["OUTPUT"] if type(mesh_layb) is str: mesh_layb = QgsVectorLayer(mesh_layb, "mesh", "ogr") numberof_under_limit = 0 # 作業用レイヤの作成 crs_str = mesh_layb.crs() layerURI = "Polygon?crs=" + crs_str.authid() #feedback.pushConsoleInfo( "work layer " + layerURI ) resLayer = QgsVectorLayer(layerURI, "mesh_result", "memory") appended = {} adfields = [] for field in mesh_layb.fields(): #print(field.name(), field.typeName()) adfields.append(field) #resLayer.addField(field) resLayer.dataProvider().addAttributes(adfields) resLayer.updateFields() lower_ids = [] value_column = "snum" # limit 値より小さい値のポリゴン数算出 for f in mesh_layb.getFeatures(): # feedback.pushConsoleInfo( "value " +str( f["value"]) ) if not f[value_column] is None: if f[value_column] > 0 and f[value_column] < limit_sample: numberof_under_limit += 1 lower_ids.append(f[meshid]) next_output = None stepi = 9 # 集計結果が最小サンプルより小さいものがある場合 if numberof_under_limit > 0: # 初回の場合は終了 feedback.pushConsoleInfo("最初の集計で指定値以下の集計値がありましたので集計を中止しました") results["OUTPUT"] = None return results if uneven_div: rmid = [] for tgid in (lower_ids): feedback.pushConsoleInfo("lower id " + str(tgid)) # next_output code の下3桁 削除 C27210-02 -> C27210 が last_output の code 番号 # next_output では last_output が同じ番号の最大4メッシュを削除する # リミットより小さいレコードは旧レコードを退避 # リミットにひっかかるレコードを再処理用リストから削除(同一親メッシュのものも削除) # 不均等分割でリミット以下のデータがある場合は last_output -> 分割不能抽出 next_output 分割不能削除 next_output -> last_output 代入 parent_code = tgid[0:-3] rmid.append(parent_code) addfeatures = [] alg_paramsg_n = { 'LAYERS': last_output, 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lmesh = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) last_output = lmesh["OUTPUT"] if type(last_output) is str: last_output = QgsVectorLayer(last_output, "mesh", "ogr") last_output.selectAll() for lf in last_output.getFeatures(): for pcode in (rmid): # feedback.pushConsoleInfo( "pcode " + pcode+ " meshid =" + lf[meshid] ) if lf[meshid] == pcode: lf["fid"] = None if not lf[value_column]: lf[value_column] = 0.0 if lf[meshid] not in appended: addfeatures.append(lf) appended[lf[meshid]] = lf # feedback.pushConsoleInfo( "add feature " + pcode ) resLayer.dataProvider().addFeatures(addfeatures) deleteFeatures = [] if type(next_output) is str: next_output = QgsVectorLayer(next_output, "mesh", "ogr") # add check 20210310 if next_output is None: feedback.pushConsoleInfo("no next array") else: for nf in next_output.getFeatures(): for pcode in (rmid): if nf[meshid][0:-3] == pcode: deleteFeatures.append(nf.id()) feedback.pushConsoleInfo("delete id " + str(pcode)) next_output.dataProvider().deleteFeatures(deleteFeatures) last_output = next_output # 分割回数ループ for divide_c in range(1, maxdivide): feedback.setCurrentStep(stepi) stepi = stepi + 1 if numberof_under_limit > 0: # 均等分割の場合は終了 if not uneven_div: break if last_output is None: feedback.pushConsoleInfo("last output is none") else: if type(last_output) is str: feedback.pushConsoleInfo("last output " + last_output) else: feedback.pushConsoleInfo("last output " + last_output.name()) alg_paramsg_m = { 'LAYERS': last_output, 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } spmesh = processing.run('native:package', alg_paramsg_m, context=context, feedback=feedback, is_child_algorithm=True) new_mesh = agtools.SplitMeshLayer(spmesh["OUTPUT"], meshid) # statv 行政界別集計データ # 再度メッシュ集計 param2 = { 'INPUT': input_union, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT, 'pareafield': 'div_area', 'polsmpl': 'pvsum', 'meshid': meshid, 'meshlayer': new_mesh } res2 = processing.run('QGIS_stat:AggregatePopMeshbyMeshAlgorithm', param2, context=context, feedback=feedback, is_child_algorithm=True) #numberof_under_limit = res2["LIMITPOL"] numberof_under_limit = 0 # レイヤをGeoPackage化 alg_paramsg2 = { 'LAYERS': res2["OUTPUT"], 'OVERWRITE': True, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } retg2 = processing.run('native:package', alg_paramsg2, context=context, feedback=feedback, is_child_algorithm=True) mesh_layb = retg2["OUTPUT"] if type(mesh_layb) is str: mesh_layb = QgsVectorLayer(mesh_layb, "mesh", "ogr") #features = mesh_layb.selectedFeatures() #feedback.pushConsoleInfo( "feature count " +str( len(features)) ) lower_ids = [] for f in mesh_layb.getFeatures(): # feedback.pushConsoleInfo( "value " +str( f["value"]) ) if not f[value_column] is None: if f[value_column] > 0 and f[value_column] < limit_sample: numberof_under_limit += 1 lower_ids.append(f[meshid]) if numberof_under_limit == 0: last_output = res2["OUTPUT"] next_output = retg2["OUTPUT"] else: # 不均等分割でリミット以下のデータがある場合は last_output -> 分割不能抽出 next_output 分割不能削除 next_output -> last_output 代入 # last_output = res2["OUTPUT"] next_output = retg2["OUTPUT"] # 集計結果が最小サンプルより小さいものがある場合 if numberof_under_limit > 0: # 均等分割の場合は終了 if not uneven_div: break # 不均等分割の場合は終了データを保全 それ以外のメッシュの分割 else: rmid = [] for tgid in (lower_ids): feedback.pushConsoleInfo("lower id " + str(tgid)) # next_output code の下3桁 削除 C27210-02 -> C27210 が last_output の code 番号 # next_output では last_output が同じ番号の最大4メッシュを削除する # リミットより小さいレコードは旧レコードを退避 # リミットにひっかかるレコードを再処理用リストから削除(同一親メッシュのものも削除) # 不均等分割でリミット以下のデータがある場合は last_output -> 分割不能抽出 next_output 分割不能削除 next_output -> last_output 代入 parent_code = tgid[0:-3] rmid.append(parent_code) addfeatures = [] #if type(last_output) is str: # last_output = QgsVectorLayer(last_output, "mesh", "ogr") alg_paramsg_n = { 'LAYERS': last_output, 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lmesh = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) #last_output.removeSelection() last_output = lmesh["OUTPUT"] if type(last_output) is str: last_output = QgsVectorLayer(last_output, "mesh", "ogr") last_output.selectAll() for lf in last_output.getFeatures(): for pcode in (rmid): # feedback.pushConsoleInfo( "pcode " + pcode+ " meshid =" + lf[meshid] ) if lf[meshid] == pcode: lf["fid"] = None if not lf[value_column]: lf[value_column] = 0.0 if lf[meshid] not in appended: addfeatures.append(lf) appended[lf[meshid]] = lf #addfeatures.append(lf) feedback.pushConsoleInfo("add feature " + pcode) resLayer.dataProvider().addFeatures(addfeatures) deleteFeatures = [] if type(next_output) is str: next_output = QgsVectorLayer(next_output, "mesh", "ogr") for nf in next_output.getFeatures(): for pcode in (rmid): if nf[meshid][0:-3] == pcode: deleteFeatures.append(nf.id()) feedback.pushConsoleInfo("delete id " + str(pcode)) next_output.dataProvider().deleteFeatures(deleteFeatures) last_output = next_output # Return the results of the algorithm. In this case our only result is # the feature sink which contains the processed features, but some # algorithms may return multiple feature sinks, calculated numeric # statistics, etc. These should all be included in the returned # dictionary, with keys matching the feature corresponding parameter # or output names. # 不均等分割の場合 最終作業レイヤの地物がはいってないかも if uneven_div: alg_paramsg_n = { 'LAYERS': next_output, 'OVERWRITE': False, 'SAVE_STYLES': False, 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT } lmesh = processing.run('native:package', alg_paramsg_n, context=context, feedback=feedback, is_child_algorithm=True) #last_output.removeSelection() last_output = lmesh["OUTPUT"] if type(last_output) is str: last_output = QgsVectorLayer(last_output, "mesh", "ogr") last_output.selectAll() addfeatures = [] for lf in last_output.getFeatures(): feedback.pushConsoleInfo("add features meshid =" + lf[meshid]) lf["fid"] = None if not lf[value_column]: lf[value_column] = 0.0 if lf[meshid] not in appended: addfeatures.append(lf) appended[lf[meshid]] = lf #addfeatures.append(lf) resLayer.dataProvider().addFeatures(addfeatures) # フォーマット変換(gdal_translate) alg_params = { 'INPUT': resLayer, 'OPTIONS': '', 'OUTPUT': parameters['OUTPUT'] } ocv = processing.run('gdal:convertformat', alg_params, context=context, feedback=feedback, is_child_algorithm=True) results["OUTPUT"] = ocv["OUTPUT"] return results # 均等分割の場合 else: # フォーマット変換(gdal_translate) alg_params = { 'INPUT': last_output, 'OPTIONS': '', 'OUTPUT': parameters['OUTPUT'] } ocv = processing.run('gdal:convertformat', alg_params, context=context, feedback=feedback, is_child_algorithm=True) results["OUTPUT"] = ocv["OUTPUT"] return results
def processAlgorithm(self, params, context, feedback): steps = 0 totalStpes = 17 fieldDpaMan = params['DPA_MAN'] # fieldHab = params['NUMBER_HABITANTS'] feedback = QgsProcessingMultiStepFeedback(totalStpes, feedback) if not OPTIONAL_GRID_INPUT: params['CELL_SIZE'] = P_CELL_SIZE grid, isStudyArea = buildStudyArea(params['CELL_SIZE'], params['BLOCKS'], params['STUDY_AREA_GRID'], context, feedback) gridNeto = grid steps = steps + 1 feedback.setCurrentStep(steps) fileH = params['CENSO_HOGAR'] cols = ['I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I09', 'I10', 'H12'] df = pd.read_csv(fileH, usecols=cols) # fix codes df['I01'] = df['I01'].astype(str) df['I02'] = df['I02'].astype(str) df['I03'] = df['I03'].astype(str) df['I04'] = df['I04'].astype(str) df['I05'] = df['I05'].astype(str) df['I06'] = df['I06'].astype(str) df['I09'] = df['I09'].astype(str) df['I10'] = df['I10'].astype(str) df.loc[df['I01'].str.len() < 2, 'I01'] = "0" + df['I01'] df.loc[df['I02'].str.len() < 2, 'I02'] = "0" + df['I02'] df.loc[df['I03'].str.len() < 2, 'I03'] = "0" + df['I03'] df.loc[df['I04'].str.len() == 1, 'I04'] = "00" + df['I04'] df.loc[df['I04'].str.len() == 2, 'I04'] = "0" + df['I04'] df.loc[df['I05'].str.len() == 1, 'I05'] = "00" + df['I05'] df.loc[df['I05'].str.len() == 2, 'I05'] = "0" + df['I05'] df.loc[df['I06'].str.len() < 2, 'I06'] = "0" + df['I06'] df.loc[df['I09'].str.len() == 1, 'I09'] = "00" + df['I09'] df.loc[df['I09'].str.len() == 2, 'I09'] = "0" + df['I09'] df.loc[df['I10'].str.len() < 2, 'I10'] = "0" + df['I10'] df['codman'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) df['codviv'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) \ + df['I09'].astype(str) df['codhog'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I09'].astype(str) \ + df['I10'].astype(str) df = df[(df['H12'] != '9999')] df = blanks2None(df, 'H12') df['H12'] = df['H12'].astype(float) aggOptions = { 'codviv': 'first', 'H12': 'sum', } resVivi = df.groupby('codviv').agg(aggOptions) resVivi.index.name = None colsToSave = ['codviv', 'H12'] resVivi = pd.DataFrame(resVivi, columns=colsToSave) dfH = resVivi file = params['CENSO_VIVIENDA'] cols = [ 'I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I09', 'I10', 'TOTPER' ] df = pd.read_csv(file, usecols=cols) # fix codes df['I01'] = df['I01'].astype(str) df['I02'] = df['I02'].astype(str) df['I03'] = df['I03'].astype(str) df['I04'] = df['I04'].astype(str) df['I05'] = df['I05'].astype(str) df['I06'] = df['I06'].astype(str) df['I09'] = df['I09'].astype(str) df['I10'] = df['I10'].astype(str) df.loc[df['I01'].str.len() < 2, 'I01'] = "0" + df['I01'] df.loc[df['I02'].str.len() < 2, 'I02'] = "0" + df['I02'] df.loc[df['I03'].str.len() < 2, 'I03'] = "0" + df['I03'] df.loc[df['I04'].str.len() == 1, 'I04'] = "00" + df['I04'] df.loc[df['I04'].str.len() == 2, 'I04'] = "0" + df['I04'] df.loc[df['I05'].str.len() == 1, 'I05'] = "00" + df['I05'] df.loc[df['I05'].str.len() == 2, 'I05'] = "0" + df['I05'] df.loc[df['I06'].str.len() < 2, 'I06'] = "0" + df['I06'] df.loc[df['I09'].str.len() == 1, 'I09'] = "00" + df['I09'] df.loc[df['I09'].str.len() == 2, 'I09'] = "0" + df['I09'] df.loc[df['I10'].str.len() < 2, 'I10'] = "0" + df['I10'] df['codviv'] = df['I01'].astype(str) + df['I02'].astype(str) + df['I03'].astype(str) \ + df['I04'].astype(str) + df['I05'].astype(str) + df['I06'].astype(str) \ + df['I09'].astype(str) aggOptions = { 'codviv': 'first', 'TOTPER': 'sum', } group = df.groupby('codviv').agg(aggOptions) group.reindex() colsToSave = ['codviv', 'TOTPER'] group = pd.DataFrame(df, columns=colsToSave) merge = pd.merge(group, dfH, how='left', on='codviv') df = merge # colsToSave = ['codhog', 'codviv', 'codman', 'TOTPER', 'H12'] # df = pd.DataFrame(df, columns=colsToSave) df['enerporperson'] = df['H12'] / df['TOTPER'] df['codman'] = df['codviv'].str[0:14] aggOptions = { 'codman': 'first', 'enerporperson': 'mean', } df = df.groupby('codman').agg(aggOptions) steps = steps + 1 feedback.setCurrentStep(steps) outputCsv = self.CURRENT_PATH + '/enerporperson.csv' feedback.pushConsoleInfo(str(('enerporperson en ' + outputCsv))) df.to_csv(outputCsv, index=False) steps = steps + 1 feedback.setCurrentStep(steps) exitCsv = os.path.exists(outputCsv) if (exitCsv): print("El archivo CSV existe") else: print("No se encuentra CSV") CSV = QgsVectorLayer(outputCsv, "csv", "ogr") featuresCSV = CSV.getFeatures() # fields = layer.dataProvider().fields() field_names = [field.name() for field in CSV.fields()] print(field_names) steps = steps + 1 feedback.setCurrentStep(steps) steps = steps + 1 feedback.setCurrentStep(steps) result = joinByAttr2(params['BLOCKS'], fieldDpaMan, outputCsv, 'codman', [], UNDISCARD_NONMATCHING, '', 1, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) expressionNotNull = "enerporperson IS NOT '' AND enerporperson is NOT NULL" notNull = filterByExpression(result['OUTPUT'], expressionNotNull, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) formulaDummy = 'enerporperson * 1.0' result = calculateField(notNull['OUTPUT'], 'enerporperson_n', formulaDummy, context, feedback) steps = steps + 1 feedback.setCurrentStep(steps) gridNeto = joinByLocation(gridNeto['OUTPUT'], result['OUTPUT'], ['enerporperson_n'], [INTERSECTA], [MEDIA], UNDISCARD_NONMATCHING, context, feedback) fieldsMapping = [{ 'expression': '"id_grid"', 'length': 10, 'name': 'id_grid', 'precision': 0, 'type': 4 }, { 'expression': '"area_grid"', 'length': 16, 'name': 'area_grid', 'precision': 3, 'type': 6 }, { 'expression': '"enerporperson_n_mean"', 'length': 20, 'name': NAMES_INDEX['IC09'][0], 'precision': 2, 'type': 6 }] steps = steps + 1 feedback.setCurrentStep(steps) result = refactorFields(fieldsMapping, gridNeto['OUTPUT'], context, feedback, params['OUTPUT']) return result