def node_set_boot_order(self, node_id, boot_order_list): boot_order_list = self.translate(boot_order_list) vm_name = self.get_node_property(node_id, 'libvirtName') temp_dir = tempfile.mkdtemp() log('Set boot order %s on Node %s' % (boot_order_list, vm_name)) resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp, self.parser) os = xml_dump.xpath('/domain/os') for o in os: for bootelem in ['boot', 'bootmenu']: boot = o.xpath(bootelem) for b in boot: o.remove(b) for dev in boot_order_list: b = etree.Element('boot') b.set('dev', dev) o.append(b) bmenu = etree.Element('bootmenu') bmenu.set('enable', 'no') o.append(bmenu) tree = etree.ElementTree(xml_dump) xml_file = temp_dir + '/%s.xml' % vm_name with open(xml_file, 'w') as f: tree.write(f, pretty_print=True, xml_declaration=True) exec_cmd('virsh define %s' % xml_file) delete(temp_dir)
def download_hekate(module, temp_directory): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) common.copy_module_file( 'hekate', 'hekate_ipl.ini', temp_directory.joinpath('bootloader/hekate_ipl.ini')) payload = common.find_file(temp_directory.joinpath('hekate_ctcaer_*.bin')) if len(payload) != 0: shutil.copyfile(payload[0], temp_directory.joinpath('bootloader/update.bin')) common.mkdir(temp_directory.joinpath('atmosphere')) shutil.copyfile( payload[0], temp_directory.joinpath('atmosphere/reboot_payload.bin')) common.mkdir(temp_directory.joinpath('../hekate')) common.move( payload[0], temp_directory.joinpath('../hekate/', Path(payload[0]).name)) common.delete( temp_directory.joinpath( 'nyx_usb_max_rate (run once per windows pc).reg')) return release.tag_name
def download_hekate_icons(module, temp_directory, deepsea_version, parameters, deepsea_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) shutil.move( os.path.join(temp_directory, 'bootloader', 'res', 'icon_payload.bmp'), os.path.join(temp_directory, 'bootloader', 'res', 'icon_payload_o.bmp')) shutil.move( os.path.join(temp_directory, 'bootloader', 'res', 'icon_switch.bmp'), os.path.join(temp_directory, 'bootloader', 'res', 'icon_switch_o.bmp')) shutil.move( os.path.join(temp_directory, 'bootloader', 'res', 'payload.bmp'), os.path.join(temp_directory, 'bootloader', 'res', 'icon_payload.bmp')) shutil.move( os.path.join(temp_directory, 'bootloader', 'res', 'switch.bmp'), os.path.join(temp_directory, 'bootloader', 'res', 'icon_switch.bmp')) return get_version(module, release, 0)
def stop(conf): if (common.exists(conf['pidfile']) == True): if (common.kill(int(open(conf['pidfile'], "r").read())) == True): common.delete(conf['pidfile']) return True common.delete(conf['pidfile']) return False
def download_atmosphere(module, temp_directory, isotope_version, isotope_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) common.delete(temp_directory.joinpath('switch/reboot_to_payload.nro')) common.delete(temp_directory.joinpath('switch')) common.delete(temp_directory.joinpath('atmosphere/reboot_payload.bin')) payload_path = download_asset(module, release, 1) if payload_path is None: return None common.mkdir(temp_directory.joinpath('bootloader/payloads')) common.move(payload_path, temp_directory.joinpath( 'bootloader/payloads/fusee-primary.bin')) common.copy_module_file('atmosphere', 'system_settings.ini', temp_directory.joinpath( 'atmosphere/config/system_settings.ini')) if not isotope_build: common.delete(temp_directory.joinpath('hbmenu.nro')) return get_version(module, release, 0)
def patch(self, tmp_new_dir, new_iso): log('Patching...') patch_dir = '%s/%s' % (CWD, PATCH_DIR) ks_path = '%s/ks.cfg.patch' % patch_dir with cd(tmp_new_dir): exec_cmd('cat %s | patch -p0' % ks_path) delete('.rr_moved') isolinux = 'isolinux/isolinux.cfg' log('isolinux.cfg before: %s' % exec_cmd('grep ip= %s' % isolinux)) self.update_fuel_isolinux(isolinux) log('isolinux.cfg after: %s' % exec_cmd('grep ip= %s' % isolinux)) iso_label = self.parse_iso_volume_label(self.iso_file) log('Volume label: %s' % iso_label) iso_linux_bin = 'isolinux/isolinux.bin' exec_cmd('mkisofs -quiet -r -J -R -b %s ' '-no-emul-boot -boot-load-size 4 ' '-boot-info-table -hide-rr-moved ' '-joliet-long ' '-x "lost+found:" -V %s -o %s .' % (iso_linux_bin, iso_label, new_iso)) delete(tmp_new_dir)
def run_deploy(self): WAIT_LOOP = 180 SLEEP_TIME = 60 LOG_FILE = 'cloud.log' log('Starting deployment of environment %s' % self.env_id) run_proc('fuel --env %s deploy-changes | strings | tee %s' % (self.env_id, LOG_FILE)) ready = False for i in range(WAIT_LOOP): env = parse(exec_cmd('fuel env --env %s' % self.env_id)) log('Environment status: %s' % env[0][E['status']]) r, _ = exec_cmd('tail -2 %s | head -1' % LOG_FILE, False) if r: log(r) if env[0][E['status']] == 'operational': ready = True break elif (env[0][E['status']] == 'error' or env[0][E['status']] == 'stopped'): break else: time.sleep(SLEEP_TIME) delete(LOG_FILE) if ready: log('Environment %s successfully deployed' % self.env_id) else: self.collect_error_logs() err('Deployment failed, environment %s is not operational' % self.env_id)
def copy(self, tmp_orig_dir, tmp_new_dir): log('Copying...') os.makedirs(tmp_orig_dir) os.makedirs(tmp_new_dir) exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir)) with cd(tmp_orig_dir): exec_cmd('find . | cpio -pd %s' % tmp_new_dir) exec_cmd('fusermount -u %s' % tmp_orig_dir) delete(tmp_orig_dir) exec_cmd('chmod -R 755 %s' % tmp_new_dir)
def patch_iso(self, new_iso): tmp_orig_dir = '%s/origiso' % self.tmp_dir tmp_new_dir = '%s/newiso' % self.tmp_dir try: self.copy(tmp_orig_dir, tmp_new_dir) self.patch(tmp_new_dir, new_iso) except Exception as e: exec_cmd('fusermount -u %s' % tmp_orig_dir, False) delete(self.tmp_dir) err(e)
def clean_dependencies(projecthome, verbose=False): """ Delete BLCI dependency metadata file **Positional Arguments:** projecthome: - The path to the root of the project """ delete(join(projecthome, BL_DEFAULT_DEPS_FN), verbose)
def patch_iso(self, new_iso): tmp_orig_dir = '%s/origiso' % self.tmp_dir tmp_new_dir = '%s/newiso' % self.tmp_dir try: self.copy(tmp_orig_dir, tmp_new_dir) self.patch(tmp_new_dir, new_iso) except Exception as e: exec_cmd('fusermount -u %s' % tmp_orig_dir, False) os.environ.pop(MOUNT_STATE_VAR, None) delete(self.tmp_dir) err(e)
def download_sys_ftpd_light(module, temp_directory): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) return release.tag_name
def copy(self, tmp_orig_dir, tmp_new_dir): log('Copying...') os.makedirs(tmp_orig_dir) os.makedirs(tmp_new_dir) exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir)) os.environ[MOUNT_STATE_VAR] = tmp_orig_dir with cd(tmp_orig_dir): exec_cmd('find . | cpio -pd %s' % tmp_new_dir) exec_cmd('fusermount -u %s' % tmp_orig_dir) os.environ.pop(MOUNT_STATE_VAR, None) delete(tmp_orig_dir) exec_cmd('chmod -R 755 %s' % tmp_new_dir)
def download_es_patches(module, temp_directory, isotope_version, isotope_build): release = get_latest_release(module) es_patches_path = download_asset(module, release, 0) if es_patches_path is None: return None with zipfile.ZipFile(es_patches_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(es_patches_path) return get_version(module, release, 0)
def download_tesla_menu(module, temp_directory, deepsea_version, deepsea_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) return get_version(module, release, 0)
def node_zero_mbr(self, node_id): vm_name = self.get_node_property(node_id, 'libvirtName') resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp) disks = xml_dump.xpath('/domain/devices/disk') for disk in disks: if disk.get('device') == 'disk': sources = disk.xpath('source') for source in sources: disk_file = source.get('file') disk_size = exec_cmd('ls -l %s' % disk_file).split()[4] delete(disk_file) exec_cmd('fallocate -l %s %s' % (disk_size, disk_file))
def undefine_vm_delete_disk(self, printout, vm_name): disk_files = [] xml_dump = etree.fromstring(printout, self.parser) disks = xml_dump.xpath('/domain/devices/disk') for disk in disks: sources = disk.xpath('source') for source in sources: source_file = source.get('file') if source_file: disk_files.append(source_file) log('Deleting VM %s with disks %s' % (vm_name, disk_files)) exec_cmd('virsh destroy %s' % vm_name, False) exec_cmd('virsh undefine %s' % vm_name, False) for file in disk_files: delete(file)
def download_appstore(module, temp_directory, isotope_version, isotope_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) common.mkdir(temp_directory.joinpath('switch/appstore')) common.move(temp_directory.joinpath('appstore.nro'), temp_directory.joinpath('switch/appstore/appstore.nro')) return get_version(module, release, 0)
def create_vms(self): temp_dir = tempfile.mkdtemp() disk_sizes = self.dha.get_disks() for node_id in self.node_ids: vm_name = self.dha.get_node_property(node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) self.create_storage(node_id, disk_path, disk_sizes) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) self.define_vm(vm_name, temp_vm_file, disk_path) delete(temp_dir)
def download_sys_ftpd_light(module, temp_directory, isotope_version, isotope_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) if isotope_build: common.delete(temp_directory.joinpath( 'atmosphere/contents/420000000000000E/flags/boot2.flag')) return get_version(module, release, 0)
def download_hekate_icons(module, temp_directory): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) common.copy_module_file( 'hekate_icons', 'hekate_ipl.ini', temp_directory.joinpath('bootloader/hekate_ipl.ini')) return release.tag_name
def configure_environment(self): log('Configure environment') delete(self.yaml_config_dir) create_dir_if_not_exists(self.yaml_config_dir) env_name = self.dea.get_env_name() env_net_segment_type = self.dea.get_env_net_segment_type() log('Creating environment %s release %s net-segment-type %s' % (env_name, self.release_id, env_net_segment_type)) exec_cmd('fuel env create --name "%s" --release %s --net-segment-type %s' % (env_name, self.release_id, env_net_segment_type)) if not self.env_exists(env_name): err('Failed to create environment %s' % env_name) self.config_settings() self.config_network() self.config_nodes()
def clean_bl_config(projecthome, verbose=False): """ Delete BLCI configuration file **Positional Arguments:** projecthome: - The path to the root of the project **Optional Arguments:** verbose: - Print messages when actions are taken """ delete(join(projecthome, BL_DEFAULT_CONFIG_FN), verbose)
def node_zero_mbr(self, node_id): vm_name = self.get_node_property(node_id, 'libvirtName') resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp) disks = xml_dump.xpath('/domain/devices/disk') for disk in disks: if disk.get('device') == 'disk': sources = disk.xpath('source') for source in sources: disk_file = source.get('file') disk_size = exec_cmd('qemu-img info ' '%s |grep \"virtual size:\"' % disk_file).split()[2] delete(disk_file) exec_cmd('qemu-img create -f qcow2 %s %s' % (disk_file, disk_size))
def create_vm(self): temp_dir = tempfile.mkdtemp() vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( self.fuel_node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) disk_sizes = self.dha.get_disks() disk_size = disk_sizes['fuel'] exec_cmd('fallocate -l %s %s' % (disk_size, disk_path)) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) self.set_vm_nic(temp_vm_file) self.define_vm(vm_name, temp_vm_file, disk_path) delete(temp_dir)
def clean_base_ci_config(projecthome, verbose=False): """ Delete Travis-CI configuration file **Positional Arguments:** projecthome: - The path to the root of the project **Optional Arguments:** verbose: - Print messages when actions are taken """ delete(join(projecthome, BASE_CI_CONFIG_FN), verbose)
def download_ldn_mitm(module, temp_directory): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) common.copy_module_file( 'ldn_mitm', 'toolbox.json', temp_directory.joinpath( 'atmosphere/contents/4200000000000010/toolbox.json')) return release.tag_name
def clean_git(projecthome, verbose=False): """ Delete Git ignore file **Positional Arguments:** projecthome: - The path to the root of the project **Optional Arguments:** verbose: - Print messages when actions are taken """ delete(join(projecthome, ".git")) delete(join(projecthome, GIT_IGNORE_FN))
def node_zero_mbr(self, node_id): vm_name = self.get_node_property(node_id, 'libvirtName') resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp) disks = xml_dump.xpath('/domain/devices/disk') for disk in disks: if disk.get('device') == 'disk': sources = disk.xpath('source') for source in sources: disk_file = source.get('file') disk_size = exec_cmd('qemu-img info ' '%s |grep \"virtual size:\"' % disk_file).split()[2] delete(disk_file) exec_cmd('qemu-img create -f raw %s %s' % (disk_file, disk_size))
def download_sys_con(module, temp_directory, deepsea_version, parameters, deepsea_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) if deepsea_build: common.delete( temp_directory.joinpath( 'atmosphere/contents/690000000000000D/flags/boot2.flag')) return get_version(module, release, 0)
def download_sys_clk(module, temp_directory): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) common.delete(temp_directory.joinpath('README.md')) common.copy_module_file( 'sys-clk', 'toolbox.json', temp_directory.joinpath( 'atmosphere/contents/00FF0000636C6BFF/toolbox.json')) return release.tag_name
def download_ldn_mitm(module, temp_directory, isotope_version, isotope_build): release = get_latest_release(module) bundle_path = download_asset(module, release, 0) if bundle_path is None: return None with zipfile.ZipFile(bundle_path, 'r') as zip_ref: zip_ref.extractall(temp_directory) common.delete(bundle_path) if isotope_build: common.delete(temp_directory.joinpath( 'atmosphere/contents/4200000000000010/flags/boot2.flag')) common.copy_module_file('ldn_mitm', 'toolbox.json', temp_directory.joinpath( 'atmosphere/contents/4200000000000010/toolbox.json')) return get_version(module, release, 0)
def configure_environment(self): log('Configure environment') delete(self.yaml_config_dir) create_dir_if_not_exists(self.yaml_config_dir) env_name = self.dea.get_env_name() env_net_segment_type = self.dea.get_env_net_segment_type() log('Creating environment %s release %s net-segment-type %s' % (env_name, self.release_id, env_net_segment_type)) exec_cmd( 'fuel env create --name "%s" --release %s --net-segment-type %s' % (env_name, self.release_id, env_net_segment_type)) if not self.env_exists(env_name): err('Failed to create environment %s' % env_name) self.config_settings() self.config_network() self.config_nodes()
def deploy(self): self.set_boot_order_nodes() self.check_prerequisites() self.get_mac_addresses() self.wait_for_discovered_blades() self.merge_plugin_config_files_to_dea_file() self.upload_cloud_deployment_files() delete(self.updated_dea_file) return self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
def create_vms(self): temp_dir = tempfile.mkdtemp() disk_sizes = self.dha.get_disks() for node_id in self.node_ids: vm_name = self.dha.get_node_property(node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) self.create_storage(node_id, disk_path, disk_sizes) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) vm_definition_overwrite = self.dha.get_vm_definition( self.dea.get_node_main_role(node_id, self.fuel_node_id)) self.define_vm(vm_name, temp_vm_file, disk_path, vm_definition_overwrite) delete(temp_dir)
def intro(self): delete(self.dea_file) delete(self.dha_file) self.temp_dir = tempfile.mkdtemp() date = time.strftime('%c') self.write(self.dea_file, DEA_1.format(date=date, comment=self.comment), False) self.write(self.dha_file, DHA_1.format(date=date, comment=self.comment)) self.get_env() # Need to download deployment with explicit node ids node_list = parse(exec_cmd('fuel node')) real_node_ids = [node[N['id']] for node in node_list] real_node_ids.sort() self.download_node_config(','.join(real_node_ids)) self.download_config('settings') self.download_config('network')
def run_deploy(self): SLEEP_TIME = 60 LOG_FILE = 'cloud.log' log('Starting deployment of environment %s' % self.env_id) p = run_proc('fuel --env %s deploy-changes | strings > %s' % (self.env_id, LOG_FILE)) ready = False for i in range(int(self.deploy_timeout)): env = parse(exec_cmd('fuel env --env %s' % self.env_id)) log('Environment status: %s' % env[0][E['status']]) r, _ = exec_cmd('tail -2 %s | head -1' % LOG_FILE, False) if r: log(r) if env[0][E['status']] == 'operational': ready = True break elif (env[0][E['status']] == 'error' or env[0][E['status']] == 'stopped'): break else: time.sleep(SLEEP_TIME) p.poll() if p.returncode == None: log('The process deploying the changes has not yet finished.') log('''The file %s won't be deleted''' % LOG_FILE) else: delete(LOG_FILE) if ready: log('Environment %s successfully deployed' % self.env_id) else: self.collect_error_logs() err('Deployment failed, environment %s is not operational' % self.env_id)
def patch(self, tmp_new_dir, new_iso): log('Patching...') patch_dir = '%s/%s' % (CWD, PATCH_DIR) ks_path = '%s/ks.cfg.patch' % patch_dir with cd(tmp_new_dir): exec_cmd('cat %s | patch -p0' % ks_path) delete('.rr_moved') isolinux = 'isolinux/isolinux.cfg' log('isolinux.cfg before: %s' % exec_cmd('grep ip= %s' % isolinux)) self.update_fuel_isolinux(isolinux) log('isolinux.cfg after: %s' % exec_cmd('grep ip= %s' % isolinux)) iso_label = self.parse_iso_volume_label(self.iso_file) log('Volume label: %s' % iso_label) iso_linux_bin = 'isolinux/isolinux.bin' exec_cmd('mkisofs -quiet -r -J -R -b %s ' '-no-emul-boot -boot-load-size 4 ' '-boot-info-table -hide-rr-moved ' '-x "lost+found:" -V %s -o %s .' % (iso_linux_bin, iso_label, new_iso))
def main(): com.DB = com.connect() for row in com.select_all() : com.delete(row[0])
regions, idFld, overlapTable, idFrom, idTo, overlapFld, outBorders = parameters common.progress('creating borders') tmpBords = common.featurePath(os.path.dirname(outBorders), TMP_BORDS) arcpy.PolygonToLine_management(regions, tmpBords, 'IDENTIFY_NEIGHBORS') # dissolve to remove line duplication on shared borders common.progress('removing border duplicates') arcpy.Dissolve_management(tmpBords, outBorders, [PTOL_LEFT_FLD, PTOL_RIGHT_FLD]) common.progress('loading region identifiers') oidFld = arcpy.Describe(regions).OIDFieldName idReader = loaders.DictReader(regions, {'id' : idFld, 'fid' : oidFld}) regions = idReader.read() regIDType = type(next(regions.itervalues())) common.progress('loading region overlap values') overlapReader = loaders.OverlapReader(overlapTable, {'from' : idFrom, 'to' : idTo, 'value' : overlapFld}) overlaps = overlapReader.read() # remap to FIDs tofid = {} for id, valdict in overlaps.iteritems(): fid = regions[id]['fid'] tofid[fid] = {} for toid, toval in valdict.iteritems(): tofid[fid][regions[toid]['fid']] = {'from' : id, 'to' : toid, 'value' : toval} common.progress('writing border effects') marker = loaders.OverlapMarker(outBorders, inSlots={'from' : PTOL_LEFT_FLD, 'to' : PTOL_RIGHT_FLD}, outSlots={'from' : common.NEIGH_FROM_FLD, 'to' : common.NEIGH_TO_FLD, 'value' : OVERLAP_FLD}, where=('{} <> -1 AND {} <> -1'.format(PTOL_LEFT_FLD, PTOL_RIGHT_FLD))) marker.mark(tofid) common.progress('removing temporary files') common.delete(tmpBords)
for inRow in inCur: # load input geometry pt = inRow.getValue(inShpFld).getPart(0) id = inRow.getValue(ptsIDFld) coor = (pt.X, pt.Y) # load radius radius = math.sqrt(inRow.getValue(weightFld)) * normalization print inRow.getValue(weightFld), radius, normalization ptCount = max(int(pi2 * radius / tolerance), 3) delta = pi2 / ptCount angle = 0 while angle < pi2: outRow = outCur.newRow() outRow.setValue(circShapeFld, arcpy.Point(coor[0] + radius * math.cos(angle), coor[1] + radius * math.sin(angle))) outRow.setValue(ptsIDFld, id) outCur.insertRow(outRow) angle += delta prog.move() del inCur, outCur, inRow, outRow prog.end() common.progress('building voronoi polygons') singlePolys = common.featurePath(location, TMP_ALLPOLY) mergedPolys = common.featurePath(location, outName) arcpy.CreateThiessenPolygons_analysis(circLayer, singlePolys, 'ALL') common.progress('dissolving to weighted polygons') arcpy.Dissolve_management(singlePolys, mergedPolys, ptsIDFld) common.progress('joining transfer fields') arcpy.JoinField_management(mergedPolys, ptsIDFld, points, ptsIDFld, transferFlds) common.progress('deleting temporary files') common.delete(singlePolys)
def create_tmp_dir(self): self.tmp_dir = '%s/fueltmp' % CWD delete(self.tmp_dir) create_dir_if_not_exists(self.tmp_dir)
#!/usr/bin/env python """ # ---------------------------------------------- # # PARKLAB, Author: RPARK API example script for deleting workflows # ---------------------------------------------- # Example calls: python workflow_delete.py <api_key> <galaxy_url>/api/workflows/<workflow id> True """ import os import sys from common import delete try: assert sys.argv[2] except IndexError: print 'usage: %s key url [purge (true/false)] ' % os.path.basename( sys.argv[0] ) sys.exit( 1 ) try: data = {} data[ 'purge' ] = sys.argv[3] except IndexError: pass delete( sys.argv[1], sys.argv[2], data )
# except colors.ColorCodeError, warn: # if invalid color code # common.warning(warn) # color = colors.BLACK_RGB # black # for i in range(len(BANDS)): # row.setValue(bandFields[BANDS[i]], color[i]) # zoneRows.updateRow(row) # del zoneRows # convert color band weights to raster common.progress('converting to raster') main, ext = os.path.splitext(output) bandRasters = {} for band in BANDS: bandRasters[band] = main + '_' + band + '.img' arcpy.PolygonToRaster_conversion(zones, bandFields[band], bandRasters[band], 'MAXIMUM_COMBINED_AREA', '', cellSize) # merge R, G, B bands into one - use IMG format to avoid holes where some of the bands is zero common.progress('combining color bands') common.debug(bandRasters) compRaster = main + '_c.img' arcpy.CompositeBands_management([bandRasters[band] for band in BANDS], compRaster) # and copy... this was more bugproof common.progress('creating result') arcpy.CopyRaster_management(compRaster, output) common.progress('defining result projection') arcpy.DefineProjection_management(output, arcpy.Describe(zones).spatialReference) # delete temporary mess common.progress('deleting temporary fields') arcpy.DeleteField_management(zones, bandFields.values()) common.progress('deleting temporary files') common.delete(*(list(bandRasters.values()) + [compRaster]))
interPath = common.addExt(os.path.join(location, INTER_FILE)) common.progress('preparing connections') arcpy.CopyFeatures_management(conns, duplPath) arcpy.Merge_management([conns, duplPath], mergePath) common.progress('intersecting connections') arcpy.Intersect_analysis([mergePath], interPath, 'ALL', tolerance, 'INPUT') # create a shape length field common.progress('marking traffic') arcpy.AddField_management(interPath, SHPLEN_FLD, 'Double') arcpy.CalculateField_management(interPath, SHPLEN_FLD, '!shape.length!', 'PYTHON_9.3') common.progress('summarizing traffic') arcpy.Dissolve_management(interPath, outPath, [SHPLEN_FLD], [[trafFld, 'SUM']], 'SINGLE_PART') common.progress('initializing traffic fields') arcpy.AddField_management(outPath, trafFld, common.typeOfField(conns, trafFld)) sumFld = 'SUM_' + trafFld # TODO: shapefile field name length adjustments... Halver(outPath, {'doubled' : sumFld}, {'halved' : trafFld}).decompose('computing traffic fields') # prog = common.progressor('adjusting total flow counts', common.count(outPath)) # outRows = arcpy.UpdateCursor(outPath) # for row in outRows: # unduplVal = row.getValue(sumFld) / 2 # row.setValue(trafFld, unduplVal) # outRows.updateRow(row) # prog.move() # del row, outRows # prog.end() common.progress('deleting temporary fields') arcpy.DeleteField_management(outPath, [sumFld, SHPLEN_FLD]) common.progress('deleting temporary files') common.delete(duplPath, mergePath, interPath)