def do_fpga_image_list(args): """Query FPGA images of a tenant""" kwargs = OrderedDict() if args.page is not None and args.size is not None: kwargs['page'] = args.page kwargs['size'] = args.size elif args.page is not None and args.size is None\ or args.page is None and args.size is not None: utils.print_err('Error: argument --page and --size ' 'must exist or not exist at the same time') return utils.check_param(**kwargs) status_code, reason, body = rest.fpga_image_list(*_get_config(), params=kwargs) if status_code != 200 or not isinstance(body, dict): raise FisException(_invalid_resp(status_code, reason, body)) fi_list = body.get('fpgaimages', []) _do_resp(status_code, reason) columns = [ 'id', 'name', 'status', 'protected', 'size', 'createdAt', 'description', 'metadata', 'message' ] utils.print_list(fi_list, columns)
def uninstall(args): if len(args) > 0: utils.print_err('too many args') return if __read_input('really want to remove sman?'): install.check_exists(True)
def write(self, data, extra={}): if len(data) % self.block_size != 0: print_err("BUG: writing %d bytes not aligned to block size %d" % (len(data), self.block_size)) return 0 q = self.encryptor.encrypt(data) return self.upstream.write(q)
def __init__(self, config): """ Constructor :param config: a mprov Config object. :type config: mprove.Config.Config """ self.__path = config.get_conf_val("path") self.__config = config self.__status = "outdated" if not os.path.exists(self.__path): utils.print_err("Error: Path " + self.__path + " doesn't exist! Exiting.") exit(1) self.__my_uuid = str(uuid.uuid4()) # set up our listen socket. self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # type: socket self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind(("", 4018)) # convert the "heartbeat" parameter to seconds tmp_timer_str = self.__config.get_conf_val("heartbeat") self.__hb_timer_interval = int(re.sub("\D", "", tmp_timer_str)) if tmp_timer_str[-1:] == "m": self.__hb_timer_interval = self.__hb_timer_interval * 60
def create_users(argvs): ''' create little_finger access user :param argvs: :return: ''' if '-f' in argvs: user_file = argvs[argvs.index("-f") +1 ] else: print_err("invalid usage, should be:\ncreateusers -f <the new users file>",quit=True) source = yaml_parser(user_file) if source: for key,val in source.items(): print(key,val) obj = models.UserProfile(username=key,password=val.get('password')) if val.get('groups'): groups = session.query(models.Group).filter(models.Group.name.in_(val.get('groups'))).all() if not groups: print_err("none of [%s] exist in group table." % val.get('groups'),quit=True) obj.groups = groups if val.get('bind_hosts'): bind_hosts = common_filters.bind_hosts_filter(val) obj.bind_hosts = bind_hosts #print(obj) session.add(obj) session.commit()
def run(self): """ Run the WorkerServer :return: none """ self.sock.listen(1024) self.__master_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) master_address = (self.__config.get_conf_val("ms"), 4017) # self.__master_connection.settimeout(60) utils.print_err("Connecting to " + self.__config.get_conf_val("ms")) try: self.__master_connection.connect(master_address) except Exception as e: utils.print_err("Error: Unable to connect to master. Will Retry.") self.__master_connection.close() self.__master_connection = None # register with the master server, starts a timer that runs every "heartbeat" interval self._register_with_master() while True: try: connection, address = self.sock.accept() except KeyboardInterrupt as kbd_int: self.signal_handler(signal.SIGINT, None) return connection.settimeout(600) threading.Thread(target=self._handle_connection, args=(connection, address)).start()
def _create_installers(self, arch): print_ok('Creating installer (%s) ...' % (ArchNames.names[arch])) ptversion = get_cmd_output([ CMD[HG], 'log', '-b', self._hgbranch, '-l', '1', '--style', self._paths[VERSTYFILE] ]) with open(self._paths[ISSFILE][arch]) as issfile: iss_script = issfile.readlines() i = 0 while i < len(iss_script): line = iss_script[i].replace('{{versionstring}}', ptversion) line = line.replace('{{changelogfile}}', self._paths[CHLOGFILE]) line = line.replace( '{{outputbasename}}', self._INST_NAME_PATTERN % (self._release_date, ArchNames.names[arch])) iss_script[i] = line.replace('{{bindir}}', self._paths[BINDIR][arch]) i += 1 iscc_proc = Popen([CMD[ISCC], '/O' + self._paths[PKGBASEDIR], '-'], stdin=PIPE) iscc_proc.communicate(input=bytes('\n'.join(iss_script), 'latin_1')) if iscc_proc.returncode != 0: print_err('ERROR: Creating installer (%s) failed.' % (ArchNames.names[arch])) return False return True
def main(cli_params): if len(cli_params) == 0: print_err('Not implemented yet.') return False elif len(cli_params) == 3: srcdir = os.path.abspath(cli_params[0]) destdir = os.path.abspath(cli_params[1]) arch = "win" + cli_params[2] else: print_err('Wrong number of arguments. Must be none or three.') return False if not os.path.isdir(srcdir): print_err('ERROR: Source base directory "%s" missing.' % srcdir) return False os.makedirs(destdir, exist_ok=True) if not os.path.isdir(destdir): print_err('ERROR: Destination base "%s" is not a directory.' % destdir) return False if not arch in ['win32', 'win64']: print_err('ERROR: Unknow architecture. Must be "32" or "64".') return False if not kill_old_libs(destdir): return False src_mingw = os.path.join(srcdir, arch, 'bin') src_qt = os.path.join(srcdir, arch, 'dev', 'qt', 'bin') src_dev = os.path.join(srcdir, arch, 'dev', 'bin') file_dict = FILE_LIST[arch] file_list = [] for file in file_dict['mingw']: file_list.append([os.path.join(src_mingw, file), destdir]) for file in file_dict['qt']: destfile = file if '\\' in file: file_list.append([ os.path.join(os.path.dirname(src_qt), file), os.path.join(destdir, destfile) ]) os.makedirs(os.path.dirname(os.path.join(destdir, destfile)), exist_ok=True) else: file_list.append( [os.path.join(src_qt, file), os.path.join(destdir, destfile)]) for file in file_dict['dev']: file_list.append([os.path.join(src_dev, file), destdir]) if not copy_libs(file_list): return False print_ok('Libraries successfully updated.') return True
def _create_installers(self, arch): print_ok("Creating installer (%s) ..." % (ArchNames.names[arch])) ptversion = get_cmd_output( [CMD[HG], "log", "-b", self._hgbranch, "-l", "1", "--style", self._paths[VERSTYFILE]] ) with open(self._paths[ISSFILE][arch]) as issfile: iss_script = issfile.readlines() i = 0 while i < len(iss_script): line = iss_script[i].replace("{{versionstring}}", ptversion) line = line.replace("{{changelogfile}}", self._paths[CHLOGFILE]) line = line.replace( "{{outputbasename}}", self._INST_NAME_PATTERN % (self._release_date, ArchNames.names[arch]) ) iss_script[i] = line.replace("{{bindir}}", self._paths[BINDIR][arch]) i += 1 iscc_proc = Popen([CMD[ISCC], "/O" + self._paths[PKGBASEDIR], "-"], stdin=PIPE) iscc_proc.communicate(input=bytes("\n".join(iss_script), "latin_1")) if iscc_proc.returncode != 0: print_err("ERROR: Creating installer (%s) failed." % (ArchNames.names[arch])) return False return True
def match_domains_to_services(domain_wildcard, name, env, app_id, service_to_addresses): result = {} service_index = (name, env, app_id) service_index_pattern = [create_named_pattern_for_wildcard(wildcard) for wildcard in service_index] if any(named_pattern and '%' in named_pattern for named_pattern in service_index_pattern): print_err('Invalid wildcard in {service_index}'.format(**locals())) return result for service, addresses in service_to_addresses.items(): replacements = {} matched = True for i, named_pattern in enumerate(service_index_pattern): if not named_pattern and not service[i]: continue if not named_pattern or not service[i]: matched = False break match = re.match('^' + named_pattern + '$', service[i]) if not match: matched = False break replacements.update(match.groupdict()) if matched: domain = str(domain_wildcard) for pattern_name, replacement in replacements.items(): domain = domain.replace('%{0}%'.format(pattern_name), replacement) if not re.match(DOMAIN_PATTERN, domain): print_err('Invalid domain: {domain}'.format(**locals())) continue result[domain] = addresses return result
def configure_intranet_dns_ecs(region): try: dns = endpoints.get(region, {}).get('dns') if dns is None: return configure_dns = True if os.path.exists(DNS_CONFIG_FILE): with open(DNS_CONFIG_FILE) as resolv: record = [] for line in resolv: record = line.split() if len(record) < 2: continue if record[0] == 'nameserver': break if len(record) >= 2 and record[0] == 'nameserver' and record[ 1] in dns: configure_dns = False if configure_dns: with open('/etc/resolv.conf', 'w') as resolv: resolv.write( '; generated by fisclient\nsearch openstacklocal novalocal\n' ) resolv.write('nameserver %s\n' % dns[0]) resolv.write('nameserver %s\n' % dns[1]) except Exception as e: utils.print_err('Configure private DNS of ECS failed: %s' % encode.exception_to_unicode(e))
def configure_intranet_dns_vpc(ak, sk, project_id, region, ecs_host, vpc_host): try: dns = endpoints.get(region, {}).get('dns') instance_id = rest.get_instance_id_from_metadata() if dns is None or instance_id is None: return nics = rest.get_os_interface(ak, sk, project_id, region, ecs_host, instance_id) for nic in nics.get('interfaceAttachments', []): net_id = nic.get('net_id') subnet = rest.get_subnet(ak, sk, project_id, region, vpc_host, net_id).get('subnet', {}) if subnet.get('primary_dns') in dns: continue vpc_id = subnet.get('vpc_id') dns_body = { 'subnet': { 'name': subnet.get('name'), 'primary_dns': dns[0], 'secondary_dns': dns[1] } } rest.put_subnet(ak, sk, project_id, region, vpc_host, vpc_id, net_id, json.dumps(dns_body)) except Exception as e: msg = encode.exception_to_unicode(e) if getattr(e, 'code', None) == 404: msg += ', \033[31mTips=Maybe you are not in your own ECS\033[0m' utils.print_err('Check private DNS of VPC failed: %s' % msg)
def init_ip_info(): global USE_MIDDLE_PROXY global PREFER_IPV6 global my_ip_info TIMEOUT = 5 try: with urllib.request.urlopen('https://v4.ifconfig.co/ip', timeout=TIMEOUT) as f: if f.status != 200: raise Exception("Invalid status code") my_ip_info["ipv4"] = f.read().decode().strip() except Exception: pass if PREFER_IPV6: try: with urllib.request.urlopen('https://v6.ifconfig.co/ip', timeout=TIMEOUT) as f: if f.status != 200: raise Exception("Invalid status code") my_ip_info["ipv6"] = f.read().decode().strip() except Exception: PREFER_IPV6 = False else: print_err("IPv6 found, using it for external communication") if USE_MIDDLE_PROXY: if ((not PREFER_IPV6 and not my_ip_info["ipv4"]) or (PREFER_IPV6 and not my_ip_info["ipv6"])): print_err("Failed to determine your ip, advertising disabled") USE_MIDDLE_PROXY = False
def auth(): """ do the user login authentication :return: """ count = 0 while count < 3: username = raw_input("\033[32;1mUsername:\033[0m").strip() if len(username) == 0: continue password = raw_input("\033[32;1mPassword:\033[0m").strip() if len(password) == 0: continue user_obj = ( session.query(models.UserProfile) .filter(models.UserProfile.username == username, models.UserProfile.password == password) .first() ) if user_obj: return user_obj else: print("wrong username or password, you have %s more chances." % (3 - count - 1)) count += 1 else: print_err("too many attempts.")
def create_groups(argvs): ''' create groups :param argvs: :return: ''' if '-f' in argvs: group_file = argvs[argvs.index("-f") + 1] else: print_err( "invalid usage, should be:\ncreategroups -f <the new groups file>", quit=True) source = yaml_parser(group_file) if source: for key, val in source.items(): print(key, val) obj = models.Group(name=key) if val.get('bind_hosts'): bind_hosts = common_filters.bind_hosts_filter(val) obj.bind_hosts = bind_hosts if val.get('user_profiles'): user_profiles = common_filters.user_profiles_filter(val) obj.user_profiles = user_profiles session.add(obj) session.commit()
def create_users(argvs): """ create little_finger access user :param argvs: :return: """ if "-f" in argvs: user_file = argvs[argvs.index("-f") + 1] else: print_err("invalid usage, should be:\ncreateusers -f <the new users file>", quit=True) source = yaml_parser(user_file) if source: for key, val in source.items(): print(key, val) obj = models.UserProfile(username=key, password=val.get("password")) if val.get("groups"): groups = session.query(models.Group).filter(models.Group.name.in_(val.get("groups"))).all() if not groups: print_err("none of [%s] exist in group table." % val.get("groups"), quit=True) obj.groups = groups if val.get("bind_hosts"): bind_hosts = common_filters.bind_hosts_filter(val) obj.bind_hosts = bind_hosts # print(obj) session.add(obj) session.commit()
async def read(self, buf_size): msg_len_bytes = await self.upstream.readexactly(4) msg_len = int.from_bytes(msg_len_bytes, "little") # skip paddings while msg_len == 4: msg_len_bytes = await self.upstream.readexactly(4) msg_len = int.from_bytes(msg_len_bytes, "little") len_is_bad = (msg_len % len(PADDING_FILLER) != 0) if not MIN_MSG_LEN <= msg_len <= MAX_MSG_LEN or len_is_bad: print_err("msg_len is bad, closing connection", msg_len) return b"" msg_seq_bytes = await self.upstream.readexactly(4) msg_seq = int.from_bytes(msg_seq_bytes, "little", signed=True) if msg_seq != self.seq_no: print_err("unexpected seq_no") return b"" self.seq_no += 1 data = await self.upstream.readexactly(msg_len - 4 - 4 - 4) checksum_bytes = await self.upstream.readexactly(4) checksum = int.from_bytes(checksum_bytes, "little") computed_checksum = binascii.crc32(msg_len_bytes + msg_seq_bytes + data) if computed_checksum != checksum: return b"" return data
def get_load_balancers(): load_balancer_configs = hermes.get_config('load-balancers.json') if not load_balancer_configs: load_balancer_configs = [{ 'type': 'main-haproxy', 'env': os.environ.get('MICROSERVICE_ENV'), }] for load_balancer_config in load_balancer_configs: load_balancer_type = load_balancer_config['type'] if load_balancer_type == 'haproxy': load_balancer = haproxy.Haproxy(load_balancer_config) stats_config = load_balancer_config.get('stats') load_balancer.configure_stats(stats_config) yield load_balancer elif load_balancer_type == 'main-haproxy': main_haproxies = get_matching_main_haproxies(load_balancer_config.get('env')) for main_haproxy in main_haproxies: load_balancer = haproxy.MainHaproxy(main_haproxy) haproxy_parameters = load_balancer_config.get('haproxy_parameters') load_balancer.override_haproxy_parameters(haproxy_parameters) stats_config = load_balancer_config.get('stats') load_balancer.configure_stats(stats_config) yield load_balancer else: print_err("Unknown load-balancer type: {load_balancer_type}".format(**locals()))
def parse_scpcmd_file(filename, bl_scp, options): """ :param cmds_file: :param file_dir: :param bl_scp: :param options: :return: """ if options.verbose >= VERBOSE: print 'Open file: ' + filename cmds_file = open(filename, 'r') file_dir = os.path.dirname(filename) packet_list = [] # Get number of packets to send for line in cmds_file: file_name = line.strip() s_m = re.search( '(\w+[_-]*\w+)\.(\d+)\.(\w+[_-]*\w+)\.((\w+[_-]*)*)\.\w+', file_name) if s_m is not None: id = s_m.group(2) cmd = s_m.group(4) way_str = s_m.group(3) else: print_err("error: wrong filename: " + file_name) raise Exception() if way_str == 'bl': way = False ^ options.bl_emulation elif way_str == 'host': way = True ^ options.bl_emulation else: print_err("error: wrong filename: " + file_name) raise Exception() if cmd == "connection_request" or cmd == "connection_reply": packet = ConnectionPacket(file_dir, file_name, bl_scp, options, cmd, id, way) elif cmd == "hello_reply": packet = HelloReplyPacket(file_dir, file_name, bl_scp, options, cmd, id, way) elif cmd == "erase_mem" or cmd == "del_mem": packet = ErasePacket(file_dir, file_name, bl_scp, options, cmd, id, way) elif cmd == "dump": packet = DumpPacket(file_dir, file_name, bl_scp, options, cmd, id, way) else: packet = ScpPacket(file_dir, file_name, bl_scp, options, cmd, id, way) packet_list.append(packet) cmds_file.close() return packet_list
def user_profiles_filter(vals): user_profiles = session.query(models.UserProfile).filter( models.UserProfile.username.in_(vals.get('user_profiles'))).all() if not user_profiles: print_err("none of [%s] exist in user_profile table." % vals.get('user_profiles'), quit=True) return user_profiles
def _handle_connection(self, client, address): """ Handle the incoming connection :param client: the worker or client wanting to talk to us. :type client: socket.socket :type address: List[str] :param address: possibly the INET address? It's returned from sock.accept() :return: True if we close OK, False if we don't. I don't think this matters. """ size = 1024 while True: try: packet = utils.parse_packet(client.recv(size)) if packet is not None: if "execmd" in packet: self._handle_cmd(client, address, packet["raw_packet"]) elif "worker" in packet: self._handle_worker_req(client, address, packet["raw_packet"]) elif "client" in packet: if "worker_state" in packet: # the client thinks the worker is bad, let's see if they are a valid client. if packet['worker_state'] == "error": req = self._find_req_by_uuid(packet["uuid"]) # valid request if req is not None: worker = self._find_worker_by_uuid( req.get_worker_uuid()) if worker is not None: worker.set_status("error") else: utils.print_err( "Error: client reported error on unknown worker." ) else: utils.print_err( "Error: unknown request attempted to report worker error" ) self._handle_client_req(client, address, packet["raw_packet"]) elif "verify" in packet: req_uuid = packet["uuid"] client_req = self._find_req_by_uuid( req_uuid) # type: MasterServerClientRequest if client_req is None: client.send("err: unable to find request") else: client.send(client_req.serialize() + "uuid=" + req_uuid) else: client.send("Error: Unrecognized command.\n") else: client.close() return False except socket.timeout: # print e client.close() return False
def get_instance_id_from_metadata(): try: resp = requests.get( 'http://169.254.169.254/openstack/latest/meta_data.json', timeout=10) return resp.json().get('uuid') except Exception as e: utils.print_err('Get instance_id from ECS metadata failed: %s' % encode.exception_to_unicode(e))
def bind_hosts_filter(vals): print('**>', vals.get('bind_hosts')) bind_hosts = session.query(models.BindHost).filter( models.Host.hostname.in_(vals.get('bind_hosts'))).all() if not bind_hosts: print_err("none of [%s] exist in bind_host table." % vals.get('bind_hosts'), quit=True) return bind_hosts
def create_bindhosts(argvs): ''' create bind hosts :param argvs: :return: ''' if '-f' in argvs: bindhosts_file = argvs[argvs.index("-f") + 1] else: print_err( "invalid usage, should be:\ncreate_hosts -f <the new bindhosts file>", quit=True) source = yaml_parser(bindhosts_file) if source: for key, val in source.items(): # print(key,val) host_obj = session.query(models.Host).filter( models.Host.hostname == val.get('hostname')).first() assert host_obj for item in val['remote_users']: print(item) assert item.get('auth_type') if item.get('auth_type') == 'ssh-passwd': remoteuser_obj = session.query(models.RemoteUser).filter( models.RemoteUser.username == item.get('username'), models.RemoteUser.password == item.get( 'password')).first() else: remoteuser_obj = session.query(models.RemoteUser).filter( models.RemoteUser.username == item.get('username'), models.RemoteUser.auth_type == item.get('auth_type'), ).first() if not remoteuser_obj: print_err("RemoteUser obj %s does not exist." % item, quit=True) bindhost_obj = models.BindHost(host_id=host_obj.id, remoteuser_id=remoteuser_obj.id) session.add(bindhost_obj) # for groups this host binds to if source[key].get('groups'): group_objs = session.query(models.Group).filter( models.Group.name.in_( source[key].get('groups'))).all() assert group_objs print('groups:', group_objs) bindhost_obj.groups = group_objs # for user_profiles this host binds to if source[key].get('user_profiles'): userprofile_objs = session.query( models.UserProfile).filter( models.UserProfile.username.in_( source[key].get('user_profiles'))).all() assert userprofile_objs print("userprofiles:", userprofile_objs) bindhost_obj.user_profiles = userprofile_objs # print(bindhost_obj) session.commit()
def read(self, args, c): self.id, c = utils.get_int(args, c) self.x, c = utils.get_int(args, c) self.y, c = utils.get_int(args, c) self.health, c = utils.get_int(args, c) self.max_health, c = utils.get_int(args, c) self.shield, c = utils.get_int(args, c) self.max_shield, c = utils.get_int(args, c) self.energy, c = utils.get_int(args, c) self.maxCD, c = utils.get_int(args, c) self.groundCD, c = utils.get_int(args, c) self.airCD, c = utils.get_int(args, c) self.idle, c = utils.get_int(args, c) self.visible, c = utils.get_int(args, c) self.type, c = utils.get_int(args, c) self.armor, c = utils.get_int(args, c) self.shieldArmor, c = utils.get_int(args, c) self.size, c = utils.get_int(args, c) self.pixel_x, c = utils.get_int(args, c) self.pixel_y, c = utils.get_int(args, c) self.pixel_size_x, c = utils.get_int(args, c) self.pixel_size_y, c = utils.get_int(args, c) self.groundATK, c = utils.get_int(args, c) self.airATK, c = utils.get_int(args, c) self.groundDmgType, c = utils.get_int(args, c) self.airDmgType, c = utils.get_int(args, c) self.groundRange, c = utils.get_int(args, c) self.airRange, c = utils.get_int(args, c) n_orders, c = utils.get_int(args, c) if n_orders < 0: utils.print_err("Corrupted replay: n_orders < 0") return self.orders = [] for i in xrange(0, n_orders): self.orders.append(Order()) self.orders[i].first_frame, c = utils.get_int(args, c) self.orders[i].type, c = utils.get_int(args, c) self.orders[i].targetId, c = utils.get_int(args, c) self.orders[i].targetX, c = utils.get_int(args, c) self.orders[i].targetY, c = utils.get_int(args, c) self.velocityX, c = utils.get_float(args, c) self.velocityY, c = utils.get_float(args, c) self.playerId, c = utils.get_int(args, c) self.resources, c = utils.get_int(args, c) return c
def main(cli_params): if len(cli_params) == 0: print_err('Not implemented yet.') return False elif len(cli_params) == 3: srcdir = os.path.abspath(cli_params[0]) destdir = os.path.abspath(cli_params[1]) arch = "win" + cli_params[2] else: print_err('Wrong number of arguments. Must be none or three.') return False if not os.path.isdir(srcdir): print_err('ERROR: Source base directory "%s" missing.'%srcdir) return False os.makedirs(destdir, exist_ok=True) if not os.path.isdir(destdir): print_err('ERROR: Destination base "%s" is not a directory.'%destdir) return False if not arch in ['win32', 'win64']: print_err('ERROR: Unknow architecture. Must be "32" or "64".') return False if not kill_old_libs(destdir): return False src_mingw = os.path.join(srcdir, arch, 'bin') src_qt = os.path.join(srcdir, arch, 'dev', 'qt', 'bin') src_dev = os.path.join(srcdir, arch, 'dev', 'bin') file_dict = FILE_LIST[arch] file_list = [] for file in file_dict['mingw']: file_list.append([os.path.join(src_mingw, file), destdir]) for file in file_dict['qt']: destfile = file if '\\' in file: file_list.append([os.path.join(os.path.dirname(src_qt), file), os.path.join(destdir, destfile)]) os.makedirs(os.path.dirname(os.path.join(destdir, destfile)), exist_ok=True) else: file_list.append([os.path.join(src_qt, file), os.path.join(destdir, destfile)]) for file in file_dict['dev']: file_list.append([os.path.join(src_dev, file), destdir]) if not copy_libs(file_list): return False print_ok('Libraries successfully updated.') return True
def _handle_client_req(self, connection, address, data): """ sub function to handle a client sync request. :param connection: :type connection: socket.socket :param address: list str :param data: str :return: """ client_obj = MasterServerClientRequest( data + " ip=" + address[0]) # type: MasterServerClientRequest with self.__master_data_lock: # look for this client_request already in the list, and just update it's hb entry. for m_client in self.client_requests: # type: MasterServerClientRequest if client_obj.get_uuid() == m_client.get_uuid(): m_client.set_last_hb(time()) tmp_worker = self._find_worker_by_uuid( m_client.get_worker_uuid()) if tmp_worker is None: utils.print_err( "Error: unable to get worker by uuid for client. Worker UUID: " + m_client.get_worker_uuid()) utils.print_err("Error: Purging client.") self.client_requests.remove(m_client) if client_obj.get_done() == "done": tmp_worker.set_slots_in_use( tmp_worker.get_slots_in_use() - 1) #utils.print_err("Client: " + m_client.get_ip() + " complete. Removing request " + m_client.get_uuid()) self.client_requests.remove(m_client) connection.sendall("ok uuid=" + client_obj.get_uuid() + " worker_ip=" + tmp_worker.get_ip()) return # # look up the least used worker and send the client there. #self.__master_data_lock.release() free_worker = self._find_least_updated_worker( ) # type: MasterServerWorkerEntry if free_worker is None: connection.sendall("err no workers found.") #print "Removing request " + m_client.get_uuid() #self.__master_data_lock.acquire() #self.client_requests.remove(client_obj) #self.__master_data_lock.release() return self.__master_data_lock.acquire() client_obj.set_last_hb(time()) self.client_requests.append(client_obj) client_obj.set_worker_uuid(free_worker.get_uuid()) self.__master_data_lock.release() connection.sendall("ok uuid=" + client_obj.get_uuid() + " worker_ip=" + free_worker.get_ip())
def kill_old_libs(destdir): status = True for file in glob.glob(os.path.join(destdir, '*.dll')): try: os.remove(file) except OSError as err: print_err('Could not delete ' + file) print_err(str(err)) status = False return status
def pass3(dirs_list, files_list): # TODO: Remove dirs print_verbose("Removing files ") for file_name in files_list: os.remove(file_name) dirs_list.append(args.work_dir) for d in dirs_list: try: os.rmdir(d) except OSError as e: print_err("Cannot delete directory '{}': {}".format(d, e.strerror)) print_verbose("Files removed. Test finished")
def check_bin(exec_cmd): """ Tests if a command is present. exec_cmd list command that should be tested <return> bool true if command can be executed """ with open(os.devnull, "w") as devnull: try: subprocess.call(exec_cmd, stdout=devnull, stderr=devnull) return True except OSError: print_err("ERROR: Required command not found: " + exec_cmd[0]) return False
def check_bin(exec_cmd): """ Tests if a command is present. exec_cmd list command that should be tested <return> bool true if command can be executed """ with open(os.devnull, 'w') as devnull: try: subprocess.call(exec_cmd, stdout=devnull, stderr=devnull) return True except OSError: print_err('ERROR: Required command not found: ' + exec_cmd[0]) return False
def get_author_affiliation(affiliation_row_data): from vivopump import replace_initials affiliation_list_out = [] affiliation_parts = affiliation_row_data.split('. ') utils.print_err("affiliation_parts = \n{}".format(affiliation_parts)) for affiliation in affiliation_parts: utils.print_err("affiliation = \n{}".format(affiliation)) if '(Reprint Author)' in affiliation: utils.print_err("\nReprint Author found \n") if len(affiliation_list_out) > 0: affiliation_list_out += ';true' else: utils.print_err("\naffiliation_list_out < 0\n") affiliation_list_out = 'true' else: if len(affiliation_list_out) > 0: affiliation_list_out += ';false' else: affiliation_list_out = 'false' return affiliation_list_out
def _handle_stop_master_sync(self, connection, address): """ should stop the rsync daemon :param connection: socket :param address: address of who is connected. :return: """ if self.__rsyncd_pid > 0: # attempt to terminate the rsyncd process try: os.kill(self.__rsyncd_pid, signal.SIGTERM) except Exception as e: utils.print_err("Error: rsync died unexpectedly!") return
def configs_checking(self): for cfg in self.task_configs: cfg_name = cfg['PipeProcessor'] default_cfg = self.default_configs_map[cfg_name] if default_cfg['args'] is not None: for arg, val in default_cfg['args'].items(): if val == 'AVP::REQUIRED': if (not dict_has('args', cfg)) or (arg not in cfg['args']): print_err(f"{cfg_name}.{arg} undefined!") if ('args' in cfg) and (cfg['args'] is not None): for arg, val in cfg['args'].items(): if arg not in default_cfg['args']: print_err(f"Unknown arg: {cfg_name}.{arg}")
def build(self, arch): """ Build Photivo for the given architecture. arch can be either Arch.win32 or Arch.win64 <return> bool True if build succeeded, False otherwise """ if not self._change_tc_arch(ArchNames.bits[arch]): return False try: os.chdir(self._paths[BUILDDIR][arch]) except OSError as err: print_err('ERROR: Changing directory to "%s" failed.' % self._paths[PKGBASEDIR]) print_err(str(err)) return False print_ok('Building Photivo and ptClear (%s) ...' % ArchNames.names[arch]) # Build production Photivo build_result = run_cmd([CMD[QMAKE], \ os.path.join('..', '..', 'photivo.pro'), \ 'CONFIG+=WithoutGimp', \ 'CONFIG-=debug'], env=self._env) \ and run_cmd([CMD[MAKE]], env=self._env) if not build_result \ or not os.path.isfile(os.path.join(self._paths[BUILDDIR][arch], 'photivo.exe')) \ or not os.path.isfile(os.path.join(self._paths[BUILDDIR][arch], 'ptClear.exe')) \ : print_err('ERROR: Building Photivo failed.') return False # Move fresh binaries to bin dir try: shutil.move( os.path.join(self._paths[BUILDDIR][arch], 'photivo.exe'), self._paths[BINDIR][arch]) shutil.copy( os.path.join(self._paths[BUILDDIR][arch], 'ptClear.exe'), self._paths[BINDIR][arch]) except OSError as err: print_err('ERROR: Copying binaries to "%s" failed.' % self._paths[BINDIR]) print_err(str(err)) return False return True
def list(args): sman_dir = install.get_install_dir() if len(args) > 0: utils.print_err('too many args') return base_scripts_dirs = install.get_base_scripts_dirs(sman_dir) for fetch_dir in base_scripts_dirs: abs_fetch_dir = path.join(sman_dir, fetch_dir) __find_script_display(fetch_dir, abs_fetch_dir) custom_scripts_dirs = install.get_custom_dirs(sman_dir) for custom_scripts_dir in custom_scripts_dirs: abs_fetch_dir = install.get_custom_scripts_dir(custom_scripts_dir) __find_script_display(path.basename(custom_scripts_dir), abs_fetch_dir)
def _change_tc_arch(self, archname): """ Changes the toolchain architecture between 32bit and 64bit. Calls the external switchtc script and parses its output. """ try: for line in get_cmd_output(["switchtc", TC_NAME, archname, "--listenv"], use_shell=True).split("\n"): key, _, val = line.strip().partition("=") self._env[key] = val return True except Exception as err: print_err(str(err)) print_err("ERROR: Failed to switch toolchain to %s %s." % (TC_NAME, archname)) return False
def _copy_data_dlls(self, arch): """ Updates libs and data files in the bin dir. """ print_ok("Packaging files (%s)..." % (ArchNames.names[arch])) # Changelog: make sure it is up to date (i.e. edited today) while True: chlog_moddate = datetime.fromtimestamp(os.path.getmtime(self._paths[CHLOGFILE])).date() if chlog_moddate >= datetime.today().date(): break else: print_warn("Changelog not edited today, but on " + str(chlog_moddate) + ". It is probably outdated.") print("Note that any changes you make after this point will probably not be present") print("in the installers.") cont = wait_for_key("(R)etry, (c)ontinue or (a)bort?", ["r", "c", "a"]) if cont == "r": continue elif cont == "c": break elif cont == "a": raise KeyboardInterrupt shutil.copy(self._paths[CHLOGFILE], self._paths[BINDIR][arch]) # copy licence files shutil.copy(self._paths[LICFILE], os.path.join(self._paths[BINDIR][arch], "License.txt")) shutil.copy(self._paths[LIC3FILE], os.path.join(self._paths[BINDIR][arch], "License 3rd party.txt")) # Call util scripts to updata data files and DLLs if not ptupdata.main([self._paths[PTBASEDIR], self._paths[BINDIR][arch]]): return False try: if not ptuplibs.main( [os.path.dirname(os.environ["tcpath"]), self._paths[BINDIR][arch], ArchNames.bits[arch]] ): return False except KeyError: print_err("Environment variable tcpath not set.") return False # strip unnecessary symbols from binaries for files in ["*.exe", "*.dll"]: if not run_cmd([CMD[STRIP], os.path.join(self._paths[BINDIR][arch], files)]): print_warn("WARNING: Failed to strip " + os.path.join(self._paths[BINDIR][arch], files)) return True
def prepare_dirs(paths): try: if os.path.exists(paths[PKGBASEDIR]): shutil.rmtree(paths[PKGBASEDIR]) os.makedirs(paths[BUILDDIR][Arch.win32]) os.makedirs(paths[BUILDDIR][Arch.win64]) os.makedirs(paths[BINDIR][Arch.win32]) os.makedirs(paths[BINDIR][Arch.win64]) return True except OSError as err: print_err('ERROR: Setup of build directory tree "%s" failed.' % paths[PKGBASEDIR]) print_err(str(err)) return False
def update(args=None): if args and len(args) > 0: utils.print_err('too many args') return utils.print_tip('updating sman...') os.system('git -C "%s" pull' % install_dir) custom_scripts_dirs = install.get_custom_dirs(install_dir) for custom_scripts_dir in custom_scripts_dirs: utils.print_tip('updating custom module folder \'%s\'...' % path.basename(custom_scripts_dir)) os.system('git -C "%s" pull' % custom_scripts_dir) install.walk_and_gen_tab_complete(install_dir, True) __set_last_updated_day()
def get_author_name_parts(author_data, max_list_length=50): author_list = [] author_names = author_data.split(' and ') list_length = 0 for display_name in author_names: list_length += 1 if list_length > max_list_length: break # occasional leading '-' before some initials display_name = display_name.replace(' -', ' ') author_dict = {'display_name': display_name, 'suffix': '', 'corresponding': 'false', 'uf': 'false'} if ' Jr.,' in display_name: author_dict['suffix'] = 'Jr.' display_name = display_name.replace(' Jr.,', '') if ' III,' in display_name: author_dict['suffix'] = 'III' display_name = display_name.replace(' III,', '') if ',' in display_name: k = display_name.find(',') author_dict['last'] = display_name[0:k] remainder = display_name[k + 2:] if ' ' in remainder: k = remainder.find(' ') author_dict['first'] = remainder[0:k].replace('.', '') if ' ' in remainder: k = remainder.find(' ') author_dict['first'] = remainder[0:k].replace('.', '') author_dict['middle'] = remainder[k + 1:].replace('.', '') else: author_dict['first'] = remainder.replace('.', '') author_dict['middle'] = '' else: author_dict['last'] = display_name author_dict['first'] = '' author_dict['middle'] = '' author_list.append(author_dict) utils.print_err("{} Authors in list: {}".format(len(author_list), author_list)) return author_list
def create_hosts(argvs): """ create hosts :param argvs: :return: """ if "-f" in argvs: hosts_file = argvs[argvs.index("-f") + 1] else: print_err("invalid usage, should be:\ncreate_hosts -f <the new hosts file>", quit=True) source = yaml_parser(hosts_file) if source: for key, val in source.items(): print(key, val) obj = models.Host(hostname=key, ip_addr=val.get("ip_addr"), port=val.get("port") or 22) session.add(obj) session.commit()
def create_remoteusers(argvs): """ create remoteusers :param argvs: :return: """ if "-f" in argvs: remoteusers_file = argvs[argvs.index("-f") + 1] else: print_err("invalid usage, should be:\ncreate_remoteusers -f <the new remoteusers file>", quit=True) source = yaml_parser(remoteusers_file) if source: for key, val in source.items(): print(key, val) obj = models.RemoteUser( username=val.get("username"), auth_type=val.get("auth_type"), password=val.get("password") ) session.add(obj) session.commit()
def build(self, arch): """ Build Photivo for the given architecture. arch can be either Arch.win32 or Arch.win64 <return> bool True if build succeeded, False otherwise """ if not self._change_tc_arch(ArchNames.bits[arch]): return False try: os.chdir(self._paths[BUILDDIR][arch]) except OSError as err: print_err('ERROR: Changing directory to "%s" failed.' % self._paths[PKGBASEDIR]) print_err(str(err)) return False print_ok("Building Photivo and ptClear (%s) ..." % ArchNames.names[arch]) # Build production Photivo build_result = run_cmd( [CMD[QMAKE], os.path.join("..", "..", "photivo.pro"), "CONFIG+=WithoutGimp", "CONFIG-=debug"], env=self._env ) and run_cmd([CMD[MAKE]], env=self._env) if ( not build_result or not os.path.isfile(os.path.join(self._paths[BUILDDIR][arch], "photivo.exe")) or not os.path.isfile(os.path.join(self._paths[BUILDDIR][arch], "ptClear.exe")) ): print_err("ERROR: Building Photivo failed.") return False # Move fresh binaries to bin dir try: shutil.move(os.path.join(self._paths[BUILDDIR][arch], "photivo.exe"), self._paths[BINDIR][arch]) shutil.copy(os.path.join(self._paths[BUILDDIR][arch], "ptClear.exe"), self._paths[BINDIR][arch]) except OSError as err: print_err('ERROR: Copying binaries to "%s" failed.' % self._paths[BINDIR]) print_err(str(err)) return False return True
def create_groups(argvs): """ create groups :param argvs: :return: """ if "-f" in argvs: group_file = argvs[argvs.index("-f") + 1] else: print_err("invalid usage, should be:\ncreategroups -f <the new groups file>", quit=True) source = yaml_parser(group_file) if source: for key, val in source.items(): print(key, val) obj = models.Group(name=key) if val.get("bind_hosts"): bind_hosts = common_filters.bind_hosts_filter(val) obj.bind_hosts = bind_hosts if val.get("user_profiles"): user_profiles = common_filters.user_profiles_filter(val) obj.user_profiles = user_profiles session.add(obj) session.commit()
def cleanup(self): if ARCHIVE_DIR != "": try: if not os.path.isdir(ARCHIVE_DIR): raise OSError(ARCHIVE_DIR + " is missing or not a folder.") for arch in Arch.archs: shutil.move(self._install_files[arch], ARCHIVE_DIR) except OSError as err: print_err("Cleanup failed. Could not move installers.") print_err(str(err)) return False try: os.chdir(self._paths[PTBASEDIR]) shutil.rmtree(self._paths[PKGBASEDIR]) except Exception as err: print_err("Cleanup failed.") print_err(str(err)) return False return True
def copy_libs(file_list): status = True for entry in file_list: srcfile, dest = entry try: print(os.path.split(srcfile)[1]) shutil.copy(srcfile, dest) except OSError as err: print_err(str(err)) print_err('Source: ' + srcfile) print_err('Dest : ' + dest) status = False return status
def publisher_name_filter(input_publisher): new_pub_name = input_publisher print_err("publisher is: {}".format(input_publisher)) for row in names.values(): if input_publisher == row['original']: print_err("We found a match at {}".format(row['original'])) new_pub_name = row['improved'] print_err("improve name is: {}".format(new_pub_name)) # line = input_publisher.replace(row['original'], row['improved']) return new_pub_name #print_err("returned_publisher is: {}".format(input_publisher)) return new_pub_name
def get_author_uris(author_row_data,title,disamb_dict,paper_uri): author_list_out = [] author_data = get_author_name_parts(author_row_data) #utils.print_err("Author data is:\n {}".format(author_data)) for author in author_data: utils.print_err("author is: \n{}".format(author)) author_uris = utils.get_author_disambiguation_data( vivo_auth_disambig_data, author['last'], author['first'], author['middle']) #utils.print_err("author_uris: \n{}".format(author_uris)) count = len(author_uris) if count == 1: author_list_builder = author_uris[0] else: author_list_builder = author_uris[0] utils.print_err("Disamb: {}".format(author_uris)) disamb_dict.append("Paper: {} -- at {}\n{} : \n{} \n\n".format(title, paper_uri, author['display_name'], author_uris)) if len(author_list_out) == 0: author_list_out = author_list_builder utils.print_err("author_list_out: \n{}".format(author_list_out)) elif len(author_list_out) >=1 and len(author_list_builder) >0: author_list_out += ";" author_list_out += author_list_builder return author_list_out
# Piped in file data_in = read_csv_fp(sys.stdin) print >>sys.stderr, len(data_in) # file_name = '/Users/asura/git/vivo-pump/author_list.csv' # @TODO: pass file name path as a command line parameter file_name = 'vivo_author_list.csv' #utils.print_err("Using static disambiguation file: {}".format(file_name)) vivo_journals = get_vivo_journals(parms) # get dictionaries of authors keyed by name parts vivo_auth_disambig_data = utils.get_vivo_disambiguation_data_from_csv( file_name) utils.print_err("Finished loading {} entries from: {}" .format(len(vivo_auth_disambig_data), file_name)) data_out = {} row_out = 0 for row_index, row_data in data_in.items(): utils.print_err("\nrow_index is: \n{}".format(row_index)) utils.print_err("\nrow_data is: \n{}".format(row_data)) data_out['author'] = get_author_uris(row_data['author'],row_data['title'],disamb_dict,row_data['uri']) data_out['affiliation'] = get_author_affiliation(row_data['affiliation']) try: if len(vivo_journals.get(row_data['issn'])) > 0: issn_uri = vivo_journals.get(row_data['issn']) else:
def bind_hosts_filter(vals): print('**>',vals.get('bind_hosts') ) bind_hosts = session.query(models.BindHost).filter(models.Host.hostname.in_(vals.get('bind_hosts'))).all() if not bind_hosts: print_err("none of [%s] exist in bind_host table." % vals.get('bind_hosts'),quit=True) return bind_hosts
for affiliation in affiliations: # look in each affiliation group if affiliation['affiliation_string'].find(find_string) > -1: author_dict['uf'] = affiliation['uf'] # if you find the author, use the affiliation of the group # and don't look further. If you don't find the author # the default affiliation is uf false continue #print_err("{} Authors in list: {}".format(len(author_list), author_list)) return author_list data_in = read_csv_fp(sys.stdin) column_names = data_in[1].keys() print_err("==> {} columns in the input: {} " .format(len(column_names), column_names)) data_out = {} row_out = 0 keep_names = set(['remove', 'uri', 'display_name', 'suffix', 'first', 'last', 'middle', 'corresponding', 'uf']) for row, data in data_in.items(): new_data = dict(data) author_data = parse_author_data(new_data['author'], new_data['affiliation']) # Add these columns new_data['remove'] = '' new_data['uri'] = '' new_data['display_name'] = ''
def main(cli_params): print("\nPhotivo for Windows package builder", SCRIPT_VERSION) print(DIVIDER, end="\n\n") if not os.path.isfile(os.path.join(os.getcwd(), "photivo.pro")): print_err("ERROR: Photivo repository not found. Please run this script from the folder") print_err('where "photivo.pro" is located.') return False # setup, config and pre-build checks if not load_ini_file(): return False paths = build_paths(os.getcwd()) if not check_build_env(paths): return False if not prepare_dirs(paths): return False archlist = Arch.archs fullrelease = True if len(cli_params) > 0: if cli_params[0] == "32": print_warn("Only building 32bit package!") archlist = [Arch.win32] fullrelease = False elif cli_params[0] == "64": print_warn("Only building 64bit package!") archlist = [Arch.win64] fullrelease = False # build and package everything builder = PhotivoBuilder(paths) for arch in archlist: if not builder.build(arch): return False if not builder.package(arch): return False # final summary and option to clean up if not builder.show_summary(): print_err("Something went wrong along the way.") return False if fullrelease: print_ok("Everything looks fine.") print("You can test and upload the release now.") print("\nAfterwards I can clean up automatically, i.e.:") if ARCHIVE_DIR == "": print("* delete everything created during the build process.") else: print("* move installers to", ARCHIVE_DIR) print("* delete everything else created during the build process") if wait_for_yesno("\nShall I clean up now?"): if not builder.cleanup(): return False else: print("OK. The mess stays.") else: print_warn("Remember: Only the " + Archnames.names[archlist[0]] + " installer was built.") print_ok("All done.") return True
if ARCHIVE_DIR != "": try: if not os.path.isdir(ARCHIVE_DIR): raise OSError(ARCHIVE_DIR + " is missing or not a folder.") for arch in Arch.archs: shutil.move(self._install_files[arch], ARCHIVE_DIR) except OSError as err: print_err("Cleanup failed. Could not move installers.") print_err(str(err)) return False try: os.chdir(self._paths[PTBASEDIR]) shutil.rmtree(self._paths[PKGBASEDIR]) except Exception as err: print_err("Cleanup failed.") print_err(str(err)) return False return True # ----------------------------------------------------------------------- if __name__ == "__main__": try: sys.exit(0 if main(sys.argv[1:]) else 1) except KeyboardInterrupt: print_err("\nAborted by the user.") sys.exit(1)
def check_build_env(paths): # Force English output from Mercurial os.environ["HGPLAIN"] = "true" # Check presence of required commands cmds_ok = True for cmd in CMD: cmds_ok = check_bin([CMD[cmd]] + CMD_PARAMS_FOR_TEST[cmd]) and cmds_ok if not cmds_ok: return False hgbranch = get_cmd_output([CMD[HG], "branch"]) if hgbranch != "default": print_warn('Working copy is set to branch "%s" instead of "default".' % (hgbranch)) if not wait_for_yesno("Continue anyway?"): return False # Working copy should be clean. The only exception is the Changelog.txt file. # Ignoring that makes it possible to start the release script and edit the # changelos while it is running. if not "commit: (clean)" in get_cmd_output([CMD[HG], "summary"]): hgstatus = get_cmd_output([CMD[HG], "status"]).split("\n") for file_entry in hgstatus: if (len(file_entry) > 0) and (not "Changelog.txt" in file_entry): print_warn("Working copy has uncommitted changes.") if wait_for_yesno("Continue anyway?"): break else: return False files_ok = True # files must be present if not os.path.isfile(paths[ISSFILE][Arch.win32]): print_err('ERROR: Installer script "%s" missing.' % paths[ISSFILE][Arch.win32]) files_ok = False if not os.path.isfile(paths[ISSFILE][Arch.win64]): print_err('ERROR: Installer script "%s" missing.' % paths[ISSFILE][Arch.win64]) files_ok = False if not os.path.isfile(paths[CHLOGFILE]): print_err('ERROR: File "%s" missing.' % paths[CHLOGFILE]) files_ok = False if not os.path.isfile(paths[LICFILE]): print_err('ERROR: File "%s" missing.' % paths[LICFILE]) files_ok = False if not os.path.isfile(paths[LIC3FILE]): print_err('ERROR: File "%s" missing.' % paths[LIC3FILE]) files_ok = False if not os.path.isfile(paths[DATESTYFILE]): print_err('ERROR: Style file "%s" missing.' % paths[DATESTYFILE]) files_ok = False if not os.path.isfile(paths[VERSTYFILE]): print_err('ERROR: Style file "%s" missing.' % paths[VERSTYFILE]) files_ok = False return files_ok
def create_bindhosts(argvs): """ create bind hosts :param argvs: :return: """ if "-f" in argvs: bindhosts_file = argvs[argvs.index("-f") + 1] else: print_err("invalid usage, should be:\ncreate_hosts -f <the new bindhosts file>", quit=True) source = yaml_parser(bindhosts_file) if source: for key, val in source.items(): # print(key,val) host_obj = session.query(models.Host).filter(models.Host.hostname == val.get("hostname")).first() assert host_obj for item in val["remote_users"]: print(item) assert item.get("auth_type") if item.get("auth_type") == "ssh-passwd": remoteuser_obj = ( session.query(models.RemoteUser) .filter( models.RemoteUser.username == item.get("username"), models.RemoteUser.password == item.get("password"), ) .first() ) else: remoteuser_obj = ( session.query(models.RemoteUser) .filter( models.RemoteUser.username == item.get("username"), models.RemoteUser.auth_type == item.get("auth_type"), ) .first() ) if not remoteuser_obj: print_err("RemoteUser obj %s does not exist." % item, quit=True) bindhost_obj = models.BindHost(host_id=host_obj.id, remoteuser_id=remoteuser_obj.id) session.add(bindhost_obj) # for groups this host binds to if source[key].get("groups"): group_objs = ( session.query(models.Group).filter(models.Group.name.in_(source[key].get("groups"))).all() ) assert group_objs print("groups:", group_objs) bindhost_obj.groups = group_objs # for user_profiles this host binds to if source[key].get("user_profiles"): userprofile_objs = ( session.query(models.UserProfile) .filter(models.UserProfile.username.in_(source[key].get("user_profiles"))) .all() ) assert userprofile_objs print("userprofiles:", userprofile_objs) bindhost_obj.user_profiles = userprofile_objs # print(bindhost_obj) session.commit()
?uri ?doi WHERE { ?uri a vivo:InformationResource . ?uri bibo:doi ?doi . } """ results = vivo_query(query, parms) bindings = results['results']['bindings'] doi_list = [b['doi']['value'] for b in bindings] uri_list = [b['uri']['value'] for b in bindings] return dict(zip(doi_list, uri_list)) parms = get_parms() date = time.strftime("%Y_%m_%d") data_in = read_csv_fp(sys.stdin) utils.print_err("{} rows in the input".format(len(data_in))) data_out = {} # get dictionary of pub uri keyed by doi vivo_pubs = get_vivo_academic_articles(parms) pubs_missing_doi_file = open('data_out/pubs_missing_doi_'+date+'.txt', 'w+') pubs_missing_doi_dict = {} vivo_journals = get_vivo_journals(parms) utils.print_err('{} publications found in VIVO'.format(len(vivo_pubs))) # print >>sys.stderr, vivo_pubs for row, data in data_in.items():
def err_msg(): print_err("ERROR: Missing or incomplete config file (ptrelease.ini)!") print_err('Must at least contain section [paths] with entry "toolchain".')