def color_from_string(s): """ todo: add more, see matplotlib.colors.cnames """ colors = {'r': (255, 0, 0), 'g': (0, 255, 0), 'b': (0, 0, 255)} if s in colors: return colors[s] else: ut.fail('unknown color: %s' % s)
def load_data(): global banned_users global badsubs global root_only_subs global totalposted global imgur_client_id global banned_users_comment global badsubs_comment global root_only_subs_comment global totalposted_comment imgur_client_id = datafile_lines[2].strip() banned_users_comment = "t1_"+datafile_lines[3].strip() badsubs_comment = "t1_"+datafile_lines[4].strip() root_only_subs_comment = "t1_"+datafile_lines[5].strip() totalposted_comment = "t1_"+datafile_lines[6].strip() try: banned_users = r.get_info(thing_id=banned_users_comment).body.split() badsubs = r.get_info(thing_id=badsubs_comment).body.split() root_only_subs = r.get_info(thing_id=root_only_subs_comment).body.split() totalposted = int(float(r.get_info(thing_id=totalposted_comment).body)) success("DATA LOADED") except Exception as e: traceback.print_exc() fail("DATA LOAD FAILED: %s"%e) exit()
def post_reply(reply,post): global badsubs global submissioncount global totalposted # This is a quick hack to fix the double list issue (git issue #12) # Please find the actual source of this bug, and delete this hack # It removes any sentences that are posted more than once lines = [] for line in reply.split("\n"): if line not in lines: lines.append(line) reply = '\n'.join(lines) try: reply = "#####	\n\n######	\n\n####	\n"+reply+"\n^Parent ^commenter ^can [^toggle ^NSFW](http://www.np.reddit.com/message/compose?to=autowikiabot&subject=AutoWikibot NSFW toggle&message=%2Btoggle-nsfw+____id____) ^or[](#or) [^delete](http://www.np.reddit.com/message/compose?to=autowikiabot&subject=AutoWikibot Deletion&message=%2Bdelete+____id____)^. ^Will ^also ^delete ^on ^comment ^score ^of ^-1 ^or ^less. ^| [^(FAQs)](http://www.np.reddit.com/r/autowikiabot/wiki/index) ^| [^Source](https://github.com/Timidger/autowikiabot-py)\n ^(Please note this bot is in testing. Any help would be greatly appreciated, even if it is just a bug report! Please checkout the) [^source ^code](https://github.com/Timidger/autowikiabot-py) ^(to submit bugs)" a = post.reply('[#placeholder-awb]Comment is being processed... It will be automatically replaced by new text within a minute or will be deleted if that fails.') postsuccess = r.get_info(thing_id='t1_'+str(a.id)).edit(reply.replace('____id____',str(a.id))) if not postsuccess: raise Exception ('reply unsuccessful') totalposted = totalposted + 1 submissioncount[str(post.submission.id)]+=1 success("[OK] #%s "%totalposted) return True except Exception as e: warn("REPLY FAILED: %s @ %s"%(e,post.subreddit)) if str(e) == '(TOO_LONG) `this is too long (max: 15000.0)` on field `text`': a.delete() elif str(e) == '403 Client Error: Forbidden' and str(post.subreddit) not in badsubs: badsubs = badsubs_page.content_md.strip().split() badsubs.append(str(post.subreddit)) editsummary = 'added '+str(post.subreddit) save_changing_variables(editsummary) else: fail(e) a.delete() return False
def calculate_range( repository_branch: str, git_helper: GitHelper, github_helper: GitHubRepositoryHelper, ) -> str: repo = git_helper.repo branch_head = git_helper.fetch_head(ref=repository_branch) if not branch_head: fail('could not determine branch head of {branch} branch'.format( branch=repository_branch)) range_start = _.head( reachable_release_tags_from_commit(github_helper, repo, branch_head)) try: # better readable range_end by describing head commit range_end = repo.git.describe(branch_head, tags=True) except GitError as err: warning( 'failed to describe branch head, maybe the repository has no tags? ' 'GitError: {err}. Falling back to branch head commit hash.'.format( err=err)) range_end = branch_head.hexsha commit_range = "{start}..{end}".format(start=range_start, end=range_end) return commit_range
def path_from_im(x, output_dir, img_encoding = '.png'): if type(x) == type((1,)): x = list(x) assert x[0] in ('img', 'img_mv') mv_im = (x[0] == 'img_mv') if type(x[1]) == type(''): x[1] = os.path.abspath(x[1]) im = x[1] else: mv_im = False im = x if type(im) == type(''): if mv_im: new_path = ut.make_temp(os.path.splitext(im)[1], dir = output_dir) #new_path = ut.make_temp(img_encoding, dir = output_dir) #os.rename(im, new_path) ut.sys_check_silent('mv', im, new_path) return os.path.split(new_path)[1] else: return im elif type(im) == type(np.array([])): path = ut.make_temp(img_encoding, dir = output_dir) ig.save(path, im) # use relative path in webpage so the directory can be moved return os.path.split(path)[1] else: ut.fail("Don't know how to handle image type: %s" % str(type(im)))
def buildTerminationSignal(check_config): """ Builds a termination signal and returns it. """ kind = check_config.kind if not kind in TERMINATION_SIGNALS: fail('Unknown termination signal kind: ' + kind) return TERMINATION_SIGNALS[kind](check_config)
def buildHealthCheck(check_config): """ Builds a health check to run and returns it. """ kind = check_config.kind if not kind in HEALTH_CHECKS: fail('Unknown health check: ' + kind) return HEALTH_CHECKS[kind](check_config)
def install_collector(client, collector, params): fail = False installer = download_installer(client, collector, params) # ensure installer is executable os.chmod(installer, 0755) logging.debug('installing ' + str(installer)) result = util.shell([str(installer), ' -y']) if result['code'] != 0 or result['stderr'] != '': err = result['stderr'] # if we failed but there's no stderr, set err msg to stdout if err == '': err = result['stdout'] logging.debug('Collector install failed') logging.debug('stdout: ' + str(result['stdout'])) logging.debug('stderr: ' + str(result['stderr'])) logging.debug('Cleaning up collector install directory') util.remove_path(config.INSTALL_PATH + config.AGENT_DIRECTORY) fail = True # be nice and clean up logging.debug('Cleaning up downloaded installer') util.remove_path(installer) if fail: util.fail(err)
def create_or_update_file( self, repository_branch: str, repository_version_file_path: str, file_contents: str, commit_message: str )-> str: try: contents = self.repository.file_contents( path=repository_version_file_path, ref=repository_branch ) except NotFoundError: contents = None # file did not yet exist if contents: decoded_contents = contents.decoded.decode('utf-8') if decoded_contents == file_contents: # Nothing to do return util.info("Repository file contents are identical to passed file contents.") else: response = contents.update(commit_message, file_contents.encode('utf-8'), branch=repository_branch) else: response = self.repository.create_file( path=repository_version_file_path, message=commit_message, content=file_contents.encode('utf-8'), branch=repository_branch ) if not response: util.fail('failed to update or create file (missing privileges?)') return response["commit"].sha
def load_data(): global banned_users global badsubs global root_only_subs global summon_only_subs global imgur_client_id global banned_users_page global badsubs_page global root_only_subs_page global summon_only_subs_page imgur_client_id = datafile_lines[2].strip() banned_users_page = r.get_wiki_page('autowikiabot','userblacklist') badsubs_page = r.get_wiki_page('autowikiabot','excludedsubs') root_only_subs_page = r.get_wiki_page('autowikiabot','rootonlysubs') summon_only_subs_page = r.get_wiki_page('autowikiabot','summononlysubs') try: banned_users = banned_users_page.content_md.strip().split() badsubs = badsubs_page.content_md.strip().split() root_only_subs = root_only_subs_page.content_md.strip().split() summon_only_subs = summon_only_subs_page.content_md.strip().split() success("DATA LOADED") except Exception as e: #traceback.print_exc() fail("DATA LOAD FAILED: %s"%e) exit()
def copy_data(data_file, table, mycursor): """ Copy the data contents of DATA_FILE to temporary table TABLE. Returns TRUE if successful, FALSE otherwise. Uses cursor MYCURSOR. """ sql = """ COPY %s ( time_stamp_utc, reading ) FROM '%s' WITH DELIMITER ',' NULL AS 'Null' CSV HEADER """ % (table, data_file) print("Copying data to temporary table '%s' ..." % (table)), try: exec_and_commit(mycursor, sql) util.done() return True except pyodbc.Error, copy_err: util.fail() print(copy_err) return False
def load_data(): global banned_users global badsubs #banned subs? global root_only_subs global summon_only_subs global imgur_client_id global banned_users_page global badsubs_page global root_only_subs_page global summon_only_subs_page imgur_client_id = datafile_lines[2].strip() #load wiki pages for lists of users and sub settings #TODO change sub name banned_users_page = r.get_wiki_page('autowikibot','userblacklist') badsubs_page = r.get_wiki_page('autowikibot','excludedsubs') root_only_subs_page = r.get_wiki_page('autowikibot','rootonlysubs') summon_only_subs_page = r.get_wiki_page('autowikibot','summononlysubs') #extract info from sub wiki pages try: banned_users = banned_users_page.content_md.strip().split() badsubs = badsubs_page.content_md.strip().split() root_only_subs = root_only_subs_page.content_md.strip().split() summon_only_subs = summon_only_subs_page.content_md.strip().split() success("DATA LOADED") except Exception as e: #traceback.print_exc() fail("DATA LOAD FAILED: %s"%e) exit()
def load_data(): global banned_users global badsubs global root_only_subs global totalposted global imgur_client_id global banned_users_comment global badsubs_comment global root_only_subs_comment global totalposted_comment imgur_client_id = datafile_lines[2].strip() banned_users_comment = "t1_" + datafile_lines[3].strip() badsubs_comment = "t1_" + datafile_lines[4].strip() root_only_subs_comment = "t1_" + datafile_lines[5].strip() totalposted_comment = "t1_" + datafile_lines[6].strip() try: banned_users = r.get_info(thing_id=banned_users_comment).body.split() badsubs = r.get_info(thing_id=badsubs_comment).body.split() root_only_subs = r.get_info( thing_id=root_only_subs_comment).body.split() totalposted = int(float(r.get_info(thing_id=totalposted_comment).body)) success("DATA LOADED") except Exception as e: traceback.print_exc() fail("DATA LOAD FAILED: %s" % e) exit()
def find_collector_group_id(client, collector_group_name): logging.debug('Finding collector group ' + str(collector_group_name)) # if the root group is set, no need to search if collector_group_name == '/': return 1 # trim leading / if it exists collector_group_name = collector_group_name.lstrip('/') collector_groups = None try: collector_groups = client.get_collector_group_list(size=-1) except ApiException as e: err = ('Exception when calling get_collector_group_list: ' + str(e) + '\n') util.fail(err) if collector_groups.status != 200: err = ('Error ' + str(collector_groups.status) + ' calling get_collector_group_list: ' + str(collector_groups.errmsg) + '\n') util.fail(err) # look for matching collector group for item in collector_groups.data.items: if item.name == collector_group_name: return item.id return None
def collector(client, params): obj = None kwargs = { 'enable_fail_back': params['enable_fail_back'], 'escalating_chain_id': params['escalating_chain_id'], 'need_auto_create_collector_device': False } if 'backup_collector_id' in params and params['backup_collector_id']: kwargs['backup_agent_id'] = params['backup_collector_id'] if 'description' in params and params['description']: kwargs['description'] = params['description'] else: kwargs['description'] = socket.getfqdn() if 'collector_id' in params and params['collector_id']: kwargs['collector_id'] = params['collector_id'] if 'resend_interval' in params and params['resend_interval']: kwargs['resend_ival'] = params['resend_interval'] if 'suppress_alert_clear' in params and params['suppress_alert_clear']: kwargs['suppress_alert_clear'] = params['suppress_alert_clear'] collector_group = find_collector_group_id(client, params['collector_group']) if collector_group is not None: kwargs['collector_group_id'] = collector_group else: err = ('Collecor group ' + params['collector_group'] + ' does not exist.') util.fail(err) try: obj = lm_sdk.RestCollector(**kwargs) return obj except Exception as e: err = 'Exception creating object: ' + str(e) + '\n' util.fail(err)
def color_from_string(s): """ todo: add more, see matplotlib.colors.cnames """ colors = {'r' : (255, 0, 0), 'g' : (0, 255, 0), 'b' : (0, 0, 255)} if s in colors: return colors[s] else: ut.fail('unknown color: %s' % s)
def load_ids(id_list, mycursor): """ Insert ID list ID_LIST into the 'meter' table and return the meter ID created by the insertion. Returns -1 in case of failure. The order of ID_LIST is as follows: [ description, unit_id, commodity_id, source_system_id, reading_type_id ] Uses cursor MYCURSOR. """ sql = """ INSERT INTO meter ( description, unit_id, commodity_id, source_system_id, reading_type_id ) VALUES ('%s', %d, %d, %d, %d) RETURNING id """ % (id_list[0], id_list[1], id_list[2], id_list[3], id_list[4]) print("Inserting ID's into 'meter' table ..."), try: exec_and_commit(mycursor, sql) result = mycursor.fetchone() util.done() return result.id except pyodbc.Error, get_meter_id_err: util.fail() print(get_meter_id_err) return -1
def download_installer(client, collector, params): logging.debug('downloading collector ' + str(collector.id)) os_and_arch = None if sys.maxsize > 2**32: os_and_arch = config.DEFAULT_OS + '64' else: os_and_arch = config.DEFAULT_OS + '32' resp = None kwargs = { 'collector_size': params['collector_size'], 'use_ea': params['use_ea'] } if 'collector_version' in params and params['collector_version']: kwargs['collector_version'] = params['collector_version'] try: resp = client.install_collector(str(collector.id), os_and_arch, **kwargs) except ApiException as e: err = 'Exception when calling install_collector: ' + str(e) + '\n' util.fail(err) # detect cases where we download an invalid installer statinfo = os.stat(resp) if statinfo.st_size < 1000: err = ( 'Downloaded collector installer is ' + str(statinfo.st_size) + ' bytes. This indicates an issue with ' + 'the download process. Most likely the collector_version ' + 'is invalid. See ' + 'https://www.logicmonitor.com/support/settings/collectors/collector-versions/ ' + 'for more information on collector versioning.') util.fail(err) return resp
def path_from_im(x, output_dir, img_encoding='.png'): if type(x) == type((1, )) and (x[0] in ('img', 'img_mv')): x = list(x) mv_im = (x[0] == 'img_mv') if type(x[1]) == type(''): x[1] = os.path.abspath(x[1]) im = x[1] elif type(x) == type((1, )) and (x[0] in ('img-png', )): assert type(x[1]) == type('') path = ut.make_temp('.png', dir=output_dir) f = open(path, 'wb') f.write(x[1]) f.close() mv_im = False im = os.path.split(path)[1] else: mv_im = False im = x if type(im) == type(''): if mv_im: new_path = ut.make_temp(os.path.splitext(im)[1], dir=output_dir) #new_path = ut.make_temp(img_encoding, dir = output_dir) #os.rename(im, new_path) ut.sys_check_silent('mv', im, new_path) return os.path.split(new_path)[1] else: return im elif type(im) == type(np.array([])): path = ut.make_temp(img_encoding, dir=output_dir) ig.save(path, im) # use relative path in webpage so the directory can be moved return os.path.split(path)[1] else: ut.fail("Don't know how to handle image type: %s" % str(type(im)))
def get_reading_type(meter_id, quantity_id, start_date, end_date, my_cursor): """ Return the reading type for meter METER_ID with quantity id QUANTITY_ID between START_DATE and END_DATE (Totalization, Interval) or NONE if an error occurs. Uses cursor MY_CURSOR. """ monotonic_sql = """ SELECT CASE WHEN COUNT(*) = 0 THEN 1 ELSE 0 END AS IsMonotone FROM ( SELECT TOP 100 ROW_NUMBER() OVER (ORDER BY TimestampUTC) AS RowNum, Value FROM DataLog2 WHERE SourceID = %d AND QuantityID = %d AND TimestampUTC >= CAST('%s' AS datetime2) AND TimestampUTC < CAST('%s' AS datetime2) ) T1 INNER JOIN ( SELECT TOP 100 ROW_NUMBER() OVER (ORDER BY Value) AS RowNum, Value FROM DataLog2 WHERE SourceID = %d AND QuantityID = %d AND TimestampUTC >= CAST('%s' AS datetime2) AND TimestampUTC < CAST('%s' AS datetime2) ) T2 ON T1.RowNum = T2.RowNum WHERE T1.Value <> T2.Value """ % (meter_id, quantity_id, start_date, end_date, meter_id, quantity_id, start_date, end_date) print("Getting reading type ..."), try: my_cursor.execute(monotonic_sql) except pyodbc.Error, monotonic_err: util.fail() print(monotonic_err) return None
def find_collector_by_description(client, params): if 'description' not in params or not params['description']: return None logging.debug('finding collector ' + str(params['description'])) collectors = None try: collectors = client.get_collector_list(size=-1) except ApiException as e: err = 'Exception when calling get_collector_list: ' + str(e) + '\n' util.fail(err) if collectors.status != 200: err = ( 'Error ' + str(collectors.status) + ' calling get_collector_list: ' + str(collectors.errmsg) + '\n' ) util.fail(err) if 'description' in params and params['description']: for item in collectors.data.items: if item.description == params['description']: return item return None
def go(driver, lsg): while True: try: temp = lsg.get() if temp == ins.state.go_forward: print("Forward") driver.tankDrive(0.5, 0.5) elif temp == ins.state.go_left: print("Left") driver.tankDrive(-0.5, 0.5) elif temp == ins.state.go_right: print("Right") driver.tankDrive(0.5, -0.5) elif temp == ins.state.wtf: print("WTF?") driver.tankDrive(0,0) util.fail() elif temp == ins.state.stop: print("Stop") driver.tankDrive(0,0) break elif temp == ins.state.split_detected: driver.tankDrive(0,0) print("Split") except: util.fail() break
def find_collector_group_id(client, collector_group_name): logging.debug('finding collector group ' + str(collector_group_name)) # if the root group is set, no need to search if collector_group_name == '/': return 1 # trim leading / if it exists collector_group_name = collector_group_name.lstrip('/') collector_groups = None try: collector_groups = client.get_collector_group_list(size=-1) except ApiException as e: err = ( 'Exception when calling get_collector_group_list: ' + str(e) + '\n' ) util.fail(err) if collector_groups.status != 200: err = ( 'Error ' + str(collector_groups.status) + ' calling get_collector_group_list: ' + str(collector_groups.errmsg) + '\n' ) util.fail(err) # look for matching collector group for item in collector_groups.data.items: if item.name == collector_group_name: return item.id return None
def post_reply(reply,post): global badsubs global submissioncount global totalposted try: #TODO change name #possibly remove? not gonna be nsfw reply = "#####	\n\n######	\n\n####	\n"+reply+"^Parent ^commenter ^can [^toggle ^NSFW](/message/compose?to=autowikibot&subject=AutoWikibot NSFW toggle&message=%2Btoggle-nsfw+____id____) ^or[](#or) [^delete](/message/compose?to=autowikibot&subject=AutoWikibot Deletion&message=%2Bdelete+____id____)^. ^Will ^also ^delete ^on ^comment ^score ^of ^-1 ^or ^less. ^| [^(FAQs)](http://www.np.reddit.com/r/autowikibot/wiki/index) ^| [^Mods](http://www.np.reddit.com/r/autowikibot/comments/1x013o/for_moderators_switches_commands_and_css/) ^| [^Magic ^Words](http://www.np.reddit.com/r/autowikibot/comments/1ux484/ask_wikibot/)" a = post.reply('[#placeholder-awb]Comment is being processed... It will be automatically replaced by new text within a minute or will be deleted if that fails.') postsuccess = r.get_info(thing_id='t1_'+str(a.id)).edit(reply.replace('____id____',str(a.id))) if not postsuccess: raise Exception ('reply unsuccessful') totalposted = totalposted + 1 submissioncount[str(post.submission.id)]+=1 success("[OK] #%s "%totalposted) return True except Exception as e: warn("REPLY FAILED: %s @ %s"%(e,post.subreddit)) if str(e).find('TOO_LONG') > -1: a.delete() elif str(e) == '403 Client Error: Forbidden' and str(post.subreddit) not in badsubs: badsubs = badsubs_page.content_md.strip().split() badsubs.append(str(post.subreddit)) editsummary = 'added '+str(post.subreddit) save_changing_variables(editsummary) else: fail(e) a.delete() return False
def file_warning(): fail("One or more of data files is not found or is corrupted.") log("Have them configured as follows:") log("already_done_dump - Create empty file if running for first time.") log("totalposted - Create empty file if running for first time.") log("badsubs - Create empty file if running for first time. Add excluded subreddits if you are using \"all\" as allowed subreddit. Leave empty if allowing specific subreddits.") log("banned_users - Create empty file if running for first time. Bot will add banned users automatically. Add manually on separate lines.") log("imgur_client_id - Put your imgur client_id in that file")
def test_bb_static(): for BB in [MyBlackBox3, MyBlackBox4, MyBlackBox5, MyBlackBox6, MyBlackBox7]: try: mm = BB("MaMaMa") fail() except ecto.BlackBoxError as e: print "Good:" print str(e)
def index_ref(pargs, kwargs, node, content): """Handle [% i "some" "key" %]...text...[% /i %] index shortcodes.""" # Badly formatted. if len(pargs) == 0: util.fail(f"Badly-formatted 'i' shortcode {pargs} in {node.filepath}") # Format. joined = ";".join(pargs) return f'<span class="indexref" key="{joined}" markdown="1">{content}</span>'
def do_fail(): debug = False try: fail('Error: ' + str(e).encode('ascii', errors='replace').decode('ascii')) except SystemExit: pass except AssertionError: debug = True return debug
def figure_ref(pargs, kwargs, node): """Handle [% f slug %] figure reference.""" # Badly-formatted shortcode. if len(pargs) != 1: util.fail(f"Badly-formatted 'f' shortcode {pargs} in {node.filepath}") # Haven't collected information yet. if (figures := util.get_config("figures")) is None: return ""
def _pull_image(image_reference: str, outfileobj=None): import util util.not_none(image_reference) transport = _mk_transport() image_reference = normalise_image_reference(image_reference) image_reference = _parse_image_reference(image_reference) creds = _mk_credentials(image_reference=image_reference) # OCI Image Manifest is compatible with Docker Image Manifest Version 2, # Schema 2. We indicate support for both formats by passing both media types # as 'Accept' headers. # # For reference: # OCI: https://github.com/opencontainers/image-spec # Docker: https://docs.docker.com/registry/spec/manifest-v2-2/ accept = docker_http.SUPPORTED_MANIFEST_MIMES try: # XXX TODO: use streaming rather than writing to local FS # if outfile is given, we must use it instead of an ano outfileobj = outfileobj if outfileobj else tempfile.TemporaryFile() with tarfile.open(fileobj=outfileobj, mode='w:') as tar: util.verbose(f'Pulling manifest list from {image_reference}..') with image_list.FromRegistry(image_reference, creds, transport) as img_list: if img_list.exists(): platform = image_list.Platform({ 'architecture': _PROCESSOR_ARCHITECTURE, 'os': _OPERATING_SYSTEM, }) # pytype: disable=wrong-arg-types with img_list.resolve(platform) as default_child: save.tarball(_make_tag_if_digest(image_reference), default_child, tar) return outfileobj # pytype: enable=wrong-arg-types util.info(f'Pulling v2.2 image from {image_reference}..') with v2_2_image.FromRegistry(image_reference, creds, transport, accept) as v2_2_img: if v2_2_img.exists(): save.tarball(_make_tag_if_digest(image_reference), v2_2_img, tar) return outfileobj util.info(f'Pulling v2 image from {image_reference}..') with v2_image.FromRegistry(image_reference, creds, transport) as v2_img: with v2_compat.V22FromV2(v2_img) as v2_2_img: save.tarball(_make_tag_if_digest(image_reference), v2_2_img, tar) return outfileobj except Exception as e: outfileobj.close() util.fail(f'Error pulling and saving image {image_reference}: {e}')
def table_ref(pargs, kwargs, node): """Handle [% t slug %] table reference.""" # Shortcode used improperly. if len(pargs) != 1: util.fail(f"Badly-formatted 't' shortcode {pargs} in {node.filepath}") # Haven't collected information yet. if (tables := util.get_config("tables")) is None: return ""
def test_modules_spec(): g = ecto_test.Generate(start=0, step=2) x = g["out"] x = g["out","out"] try: x = g[2.0] util.fail() except TypeError, e: print e
def find_template_file(template_name: str, template_path: [str]): # TODO: do not hard-code file name extension template_file_name = template_name + '.yaml' for path in template_path: for dirpath, _, filenames in os.walk(path): if template_file_name in filenames: return os.path.join(dirpath, template_file_name) fail('could not find template {t}, tried in {p}'.format( t=str(template_name), p=','.join(map(str, template_path))))
def patch_deployment(self, name: str, namespace: str, body: dict): '''Patches a deployment with a given name in the given namespace.''' not_empty(name) not_empty(namespace) not_empty(body) if not self.get_deployment(namespace, name): fail(f'Deployment {name} in namespace {namespace} does not exist') self.apps_api.patch_namespaced_deployment(name, namespace, body)
def ensureImage(self, client): """ Ensures that the image for this component is present locally. If not, we attempt to pull the image. """ images = client.images(name = self.config.getFullImage()) if not images or not len(images) > 0: try: client.pull(self.config.repo) except Exception as e: fail('Could not pull repo ' + self.config.repo, container = self, exception = str(e))
def get_collector_id_list_from_env(): collector_ids = os.getenv('COLLECTOR_IDS') if not collector_ids: err = 'Environment variable COLLECTOR_IDS not set\n' util.fail(err) collector_ids = collector_ids.split(',') if len(collector_ids) < 1: err = 'Unable to parse ids from COLLECTOR_IDS\n' util.fail(err) return collector_ids
def typeconv(E): p = ecto.Plasm() e = E() a = Accept_none() p.connect(e[:] >> a[:]) try: p.execute(niter=1) util.fail() except ecto.TypeMismatch, va: print "yeah, got typeconv error"
def test_bb_static(): for BB in [ MyBlackBox3, MyBlackBox4, MyBlackBox5, MyBlackBox6, MyBlackBox7 ]: try: mm = BB("MaMaMa") fail() except ecto.BlackBoxError as e: print "Good:" print str(e)
def set_qos_status(self, status=0): if (status != 0 and status != 1): return fail('Invalid status, send 0 for disable, 1 for enable') req = requests.get(self.endpoint_url + '/api/misystem/qos_switch?on={}'.format(status)) if (req.status_code == 200): return success('QoS status changed to {}'.format(status)) return fail( 'There was an error while setting the status of QoS to {}'.format( status))
def bib_ref(pargs, kwargs, node): """Handle [% b "key1,key2" %] biblography reference shortcodes.""" if len(pargs) == 0: util.fail(f"Empty 'b' shortcode in {node.filepath}") citations = util.make_config("citations") citations |= set(pargs) keys = [f'<a href="@root/bibliography/#{k}">{k}</a>' for k in pargs] return f"[{', '.join(keys)}]"
def diff_pipelines(left_file: CliHints.yaml_file(), right_file: CliHints.yaml_file()): from deepdiff import DeepDiff from pprint import pprint diff = DeepDiff(left_file, right_file, ignore_order=True) if diff: pprint(diff) fail('diffs were found') else: info('the yaml documents are equivalent')
def createContainer(self, client): """ Creates a docker container for this component and returns it. """ command = self.getCommand() if not command: fail('No command defined in either gantry config or docker image for component ' + self.getName(), component = self) self.logger.debug('Starting container for component %s with command %s', self.getName(), command) container = client.create_container(self.config.getFullImage(), command, user = self.config.getUser(), ports = [str(p) for p in self.config.getContainerPorts()]) return container
def get_collector_id(): set_index = get_set_index() collector_ids = get_collector_id_list_from_env() set_index = parse_id(set_index) if len(collector_ids) < set_index + 1: err = ('Set index ' + str(set_index) + ' is greater than number of collector ids ' + str(len(collector_ids)) + '\n') util.fail(err) return collector_ids[set_index]
def _include_file(node, filepath, file, *filters): """Include a file, filtering if asked to.""" kind = filepath.split(".")[-1] try: with open(filepath, "r") as reader: lines = reader.readlines() for f in filters: lines = f(lines) return _make_html(filepath, file, kind, lines) except OSError: util.fail(f"Unable to read inclusion '{filepath}' in {node.filepath}.")
def _process_tables(node, tables): """Collect table information.""" tables[node.slug] = [] for (i, match) in enumerate(util.TABLE.finditer(node.text)): if (caption := util.TABLE_CAPTION.search(match.group(0))) is None: util.fail( f"Table div '{match.group(0)}' without caption in {node.filepath}" ) if (slug := util.TABLE_ID.search(match.group(0))) is None: util.fail( f"Table div '{match.group(0)}' without ID in {node.filepath}")
def ex_async_twice(s, ex): s.execute_async(niter=5) print "once..." assert s.running() t = time.time() try: print "twice..." s.execute_async(niter=5) fail("that should have thrown") except ecto.EctoException, e: print "okay, threw" print "whee"
def get_kubecfg(self): if self.kubeconfig: return kubernetes.client.ApiClient(configuration=self.kubeconfig) kubeconfig = os.environ.get('KUBECONFIG', None) args = global_ctx().args if args and hasattr(args, 'kubeconfig') and args.kubeconfig: kubeconfig = args.kubeconfig if self.kubeconfig: kubeconfig = self.kubeconfig if not kubeconfig: fail('KUBECONFIG env var must be set') return config.load_kube_config(existing_file(kubeconfig))
def getConfigJSON(self): """ Returns the project's config JSON or raises an exception if none. """ # Lookup the project on etcd. If none, report an error. config_json = None try: self.logger.debug('Looking up configuration for project %s in etcd', self.project_name) config_json = self.etcd_client.get(getProjectConfigPath(self.project_name)).value except KeyError as k: self.logger.exception(k) fail('Unknown project ' + self.project_name, project = self.project_name) return config_json
def ensureImage(self, client): """ Ensures that the image for this component is present locally. If not, we attempt to pull the image. """ images = client.images(name=self.config.getFullImage()) if not images or not len(images) > 0: try: client.pull(self.config.repo) except Exception as e: fail('Could not pull repo ' + self.config.repo, component=self, exception=str(e))
def test_required_param(): plasm = ecto.Plasm() print "<DOC>", ecto_test.RequiredParam.__doc__, "</DOC>" # test assert "REQUIRED" in ecto_test.RequiredParam.__doc__ # test doc default value printing printing assert "2.1253" in ecto_test.RequiredParam.__doc__ try: req = ecto_test.RequiredParam("Required") print "egh, didn't throw" util.fail() except RuntimeError, e: print "Yup, there is our throw:", e
def get_collector_id(): set_index = get_set_index() collector_ids = get_collector_id_list_from_env() set_index = parse_id(set_index) if len(collector_ids) < set_index + 1: err = ( 'Set index ' + str(set_index) + ' is greater than number of collector ids ' + str(len(collector_ids)) + '\n' ) util.fail(err) return collector_ids[set_index]
def load(im_fname, gray = False): if im_fname.endswith('.gif'): print "GIFs don't load correctly for some reason" ut.fail('fail') im = from_pil(Image.open(im_fname)) # use imread, then flip upside down #im = np.array(list(reversed(pylab.imread(im_fname)[:,:,:3]))) if gray: return luminance(im) elif not gray and np.ndim(im) == 2: return rgb_from_gray(im) else: return im
def getConfig(self): """ Returns the project's config or raises an exception if none. """ config_json = self.getConfigJSON() # Parse the project's configuration and save it. try: self.config = Configuration.parse(config_json) except ConfigParseException as cpe: fail('Error parsing gantry config', project = self.project_name, exception = cpe) except Exception as e: self.logger.exception(e) return self.config