def qstat(args): clusters, pending_jobs = build_cluster(args.options, args.user_pattern) pending_jobs = pending_jobs if args.pending_jobs else None if not (args.cluster_only or args.expand or args.full): print_status(clusters, pending_jobs, args.watch) elif args.required_memory: jobids = flatten(c.get_jobids() for c in clusters) if isinstance(args.required_memory, str): vmem_list = get_resource_option(jobids, args.required_memory) else: vmem_list = get_resource_option(jobids) for cluster in clusters: add_host_info(cluster.queues) cluster.set_host_info() cluster.set_vmem(vmem_list) cluster.set_queue_printlen() elif args.physical_memory or args.swapped_memory: for cluster in clusters: add_host_info(cluster.queues) cluster.set_host_info() cluster.set_queue_printlen() if args.cluster_only: print_cluster_status(clusters) elif args.expand or args.full: print_full_status(clusters, pending_jobs, not args.no_sort, args.full)
def sample(): if sep: if m == 0: return [] return lib.flatten((self.sample(), sep.sample()) for _ in range(0, m - 1)) + [self.sample()] return [self.sample() for _ in range(0, m - 1)]
def loop(self): while 1: self.buffer += self.irc.recv(4096) temp_buffer = self.buffer.split("\n") self.buffer = temp_buffer.pop() for line in temp_buffer: responses = [] # Stubbing out functionality for custom functions. Eventually the big block below will be gone. for function in flatten(self.functions): try: responses.append(function(self, line)) except IndexError: # I put this in here just to be safe. (Dirk) pass # Standard control library try: message = Message(line) except IndexError: pass line = line.strip().split() if line[0] == "PING": self.irc.send("PONG %s\r\n" % line[1]) else: try: # Disconnect functionality (revisited) command = message.command(self.command) if command.__str__() == 'disconnect': if not message.argv(self.command) == self.password: if message.is_public(): koomar.send_message('Incorrect password.') else: koomar.send_private_message("Dear %s, you gave an invalid password." % message.sender, message.sender) else: koomar.send_message('Correct password. Disconnecting...') self.disconnect() return if line[3] == ":%s" % (self.command): if len(line) <= 4: self.send_message("Type `%s quote`" % (self.command)) else: pass #if line[4] == "disconnect": # if line[5] == self.password: # self.disconnect() # return # else: # if not line[2].startswith('#'): # matches = re.match(':([A-Za-z0-9_-]+)!', line[0]) # sender = matches.groups()[0] # self.send_private_message("Dear %s, you gave an invalid password." % sender, sender) # else: # self.send_message("Invalid password.") #if not responses.__contains__(True): #self.send_message("I don't know that command!") except IndexError: pass # Each line may not be a conversation
def convert(r): def internal(e): m = extractorPattern.match(e) return int(m.groups()[3]) print(r) i = int(guardBeginPattern.match(r[0]).groups()[0]) print(r[1:]) l = map(internal, r[1:]) return (i, flatten(map(lambda x: range(x[0], x[1]), partition(2, l))))
def format_result(result): # TODO this is hacky. if type(result) == dict and result.keys() == ['redirect']: return ( generateCookieHeader() + '\n' + 'Location: ' + result['redirect'] + '\n' + # Necessary to force an external redirect which is necessary for cookies to # work across the redirect. 'Status: 302\n\n') else: return ('Content-type: text/html\n' + generateCookieHeader() + '\n\n' + lib.flatten(result).encode('utf-8'))
def main(): # CHECK ENVIRON # listpath = flatten(os.listdir(path) for path in os.environ["PATH"].split(':') if os.path.exists(path)) if not (("qstat" in listpath) and ("qhost" in listpath)): sys.exit("Error: qstat or qhost command not found in your path.") try: qstat.main() except IOError as (errno, strerror): if errno == 32: # Broken pipe pass else: raise IOError(errno, strerror)
def step(rng, i, opt_state): rng_spt, rng_qry, rng_reinit = split(rng, 3) rln_params, pln_params = outer_get_params(opt_state) x_spt, y_spt, sampled_tasks = inner_loop_sampler( rng_spt, train_images, train_labels) if not args.no_reset: for sampled_task in sampled_tasks: cls_w = pln_params[-2][0] pln_params[-2] = ( ops.index_update( cls_w, ops.index[:, [sampled_task]], nn.initializers.he_normal()(rng_reinit, (cls_w.shape[0], 1)), ), # Reset W *pln_params[-2][1:], # Keep bias (?) ) x_qry, y_qry = outer_loop_sampler(rng_qry, train_images_flat, train_labels_flat) x_spt, y_spt = flatten(x_spt, 1), flatten(y_spt, 1) x_qry, y_qry = ( jnp.concatenate((x_spt, x_qry), 0), jnp.concatenate((y_spt, y_qry), 0), ) x_spt, y_spt = x_spt[:, None, ...], y_spt[:, None, ...] (outer_loss, info), outer_grads = value_and_grad(outer_loop_loss_fn, argnums=(0, 1), has_aux=True)(rln_params, pln_params, x_spt, y_spt, x_qry, y_qry) opt_state = outer_opt_update(i, outer_grads, opt_state) return opt_state, info
def main(): # CHECK ENVIRON # listpath = flatten( os.listdir(path) for path in os.environ["PATH"].split(':') if os.path.exists(path)) if not (("qstat" in listpath) and ("qhost" in listpath)): sys.exit("Error: qstat or qhost command not found in your path.") try: qstat.main() except IOError as (errno, strerror): if errno == 32: # Broken pipe pass else: raise IOError(errno, strerror)
def operate(self, operation, operands): """ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OPERATE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note: Profiles on which operations are made must have same limits. They also cannot have "None" values within them! """ # Verify validity of operation self.validate(operands) # Copy profile on which operation is done new = copy.deepcopy(self) # Reset its components new.reset() # Merge all steps new.T = lib.uniqify(self.T + lib.flatten([p.T for p in operands])) # Compute each step of new profile for T in new.T: # Compute partial result with base profile result = self.f(T) # Look within each profile for p in operands: # Compute partial result on current profile result = operation(result, p.f(T)) # Store result for current step new.y.append(result) # Normalize it new.normalize() # Derivate it new.derivate() # Return new profile return new
def operate(self, op, profiles): """ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OPERATE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note: Profiles on which operations are made cannot have "None" values within them! """ # Copy profile on which operation is done new = copy.deepcopy(self) # Reset its components new.reset() # Re-define time references new.define(self.start, self.end) # Merge all steps new.T = lib.uniqify(self.T + lib.flatten([p.T for p in profiles])) # Compute each step of new profile for T in new.T: # Compute partial result with base profile y = self.f(T) # Look within each profile for p in profiles: # Compute partial result on current profile y = op(y, p.f(T)) # Store result for current step new.y.append(y) # Get min/max values [new.min, new.max] = [min(new.y), max(new.y)] # Normalize it new.normalize() # Return new profile return new
def loop(self): while 1: self.buffer += self.irc.recv(4096) temp_buffer = self.buffer.split("\n") self.buffer = temp_buffer.pop() for line in temp_buffer: # First make sure if its a PING request. parts = line.strip().split() if parts[0] == "PING": self.irc.send("PONG %s\r\n" % parts[1]) continue try: message = Message(line) except IndexError: pass # Now move on to the main functionality. responses = [] # Stubbing out functionality for custom functions. Eventually the big block below will be gone. for function in flatten(self.functions): # Making it very verbose just to be safe. response = function(self, message) if not response == "disconnect": responses.append({"function": function, "response": response}) elif response == "disconnect": # If the function returns False, it is telling koomar to kill itself. self.send_message("%s is disconnecting..." % self.nickname) self.disconnect() return # Checks to see if at least one of the parsers responded with either true or a string. def check(r): # The old algorithm, I'm keeping it here just for good times' sake. # (r['response'] == True or (not type(r['response']) == bool and r['response'].isalnum())) if r == True or type(r) == str: return True return False if not [check(r["response"]) for r in responses].__contains__(True): # and... if message.is_command(self.command, sender_exceptions): if message.is_public(): self.send_message("I don't know that command!") else: self.send_private_message("I don't know that command!", message.sender)
def export(self): '''Export the config for shell usage. Keys are uppercased. List-like keys are flattened. >>> c = Config('activity: hanggliding') >>> c.export() 'export ACTIVITY="hanggliding"' ''' retval = '' for key in self: val = dfl(self[key]) # Make list-like keys shell friendly if isinstance(val, (list, tuple)): val = flatten(val) val = ' '.join([shlex_quote(e) for e in val]) elif isinstance(val, dict): val = repr(val) retval += 'export {}="{}"\n'.format( key.upper().replace(' ', '_'), val) return retval[:-1]
def export(self): '''Export the config for shell usage. Keys are uppercased. List-like keys are flattened. >>> c = Config('activity: hanggliding') >>> c.export() 'export ACTIVITY="hanggliding"' ''' retval = '' for key in self: val = dfl(self[key]) # Make list-like keys shell friendly if isinstance(val, (list, tuple)): val = flatten(val) val = ' '.join([shlex_quote(e) for e in val]) elif isinstance(val, dict): val = repr(val) retval += 'export {}="{}"\n'.format(key.upper().replace(' ', '_'), val) return retval[:-1]
def op(self, op, profiles): """ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Execute a given operation on profiles, and return the resulting profile. Said operation is executed on each step of the combined time axes. """ # Test profile limits if not all( [p.start == self.start and p.end == self.end for p in profiles]): raise errors.MismatchedLimits # Copy profile on which operation is done new = copy.deepcopy(self) # Reset its components new.reset() # Re-define time references new.define(self.start, self.end) # Merge all steps new.T = lib.uniqify(self.T + lib.flatten([p.T for p in profiles])) # Compute each step of new profile for T in new.T: new.y += [self.f(T)] for p in profiles: new.y[-1] = op(new.y[-1], p.f(T)) # Normalize it if new.norm is not None: new.normalize() # Return it return new
def f(acc): (x,xs) = acc return [x]+ lib.flatten(xs)
def test_flatten_compress(): assert_true('flatten', lib.flatten([[1, 2], [3, 4], [5, 6]]) == [1, 2, 3, 4, 5, 6]) assert_true('compress', lib.compress([10, 11, 12, 13, 14], [0, 2, 4]) == [10, 12, 14])
def test_flatten(): l = [['F', 'G'], ['C', 'Am7'], 'Em7'] assert flatten(l) == ['F', 'G', 'C', 'Am7', 'Em7']
# take a[0] and a[1] elements and turn them into a range, multiple if spans over a day def rangify(r): def internal(a): if a[0][0] == a[1][0]: # same day return [range(a[0][1], a[1][1])] else: print('overday', r) return [range(a[0][1], 60), range(0, a[1][1])] return flatMap(internal, r) # expand all shift items into ranges describing each minute ranges = groupP( lambda p, n: p[0] == n[0], sorted(map(lambda x: (x[0], flatten(rangify(x[1]))), translatedShifts), key=lambda x: x[0])) # merges [ (id, [range]), ... ] collections into (id, [range1, range2, ...]) def merge(r): def internal(acc, n): return (n[0], acc[1] + n[1]) return reduce(internal, r, (0, [])) mergedRanges = map(lambda x: (x[0], sorted(x[1])), map(merge, ranges)) lenRanges = map(lambda x: (x[0], len(x[1])), mergedRanges)
def get_raw(etoks): """extract the raw fields from a nested list of Etoks and LexTokens""" return lib.flatten([Etok._get_raw1(e) for e in lib.fflatten(etoks)])
def parse(s): n = s.split('@') p = n[1].strip().split(':') l = map(int, flatten([p[0].split(','), p[1].split('x')])) return [l[0], l[1], l[2] + l[0], l[3] + l[1]]
grid = [['.'] * (maxX + 1) for i in range(maxY + 10)] print('grid size', maxX, maxY) def fill(g, r): for y in range(r[1], r[3]): for x in range(r[0], r[2]): g[y][x] = '-' if g[y][x] == '.' else 'x' for r in rects: fill(grid, r) #rowprint(grid) print(len(filter(lambda x: x == 'x', flatten(grid)))) # part 2 def check(rs, j): for i in range(len(rs)): if i == j: continue if intersects((rs[i], rs[j])): return False return True for i in range(len(rects)): if check(rects, i): print(rects[i], i + 1)
try: input_entries = lib.parse_inside(index, inside, args.upstream) except exc.KSError as err: print("Error: %s" % (err,), file=sys.stderr) sys.exit(1) try: sorted_entries = lib.series_sort(index, input_entries) except exc.KSError as err: print("Error: %s" % (err,), file=sys.stderr) sys.exit(1) new_inside = lib.flatten([ lib.series_header(inside), lib.series_format(sorted_entries), lib.series_footer(inside), ]) to_update = list(filter(lib.tag_needs_update, input_entries)) if args.check: result = 0 if inside != new_inside: print("Input is not sorted.") result = 2 if len(to_update): print("Git-repo tags are outdated.") result = 2 sys.exit(result) else: output = lib.flatten([
def make_flat_set(arr): return flatten(arr, 1)
try: input_entries = lib.parse_inside(index, inside, args.upstream) except exc.KSError as err: print("Error: %s" % (err, ), file=sys.stderr) sys.exit(1) try: sorted_entries = lib.series_sort(index, input_entries) except exc.KSError as err: print("Error: %s" % (err, ), file=sys.stderr) sys.exit(1) new_inside = lib.flatten([ lib.series_header(inside), lib.series_format(sorted_entries), lib.series_footer(inside), ]) to_update = list(filter(lib.tag_needs_update, input_entries)) if args.check: result = 0 if inside != new_inside: print("Input is not sorted.") result = 2 if len(to_update): print("Git-repo tags are outdated.") result = 2 sys.exit(result) else: output = lib.flatten([
def get_jobids(self): return flatten(q.get_jobids() for q in self.queues)