def update_will_roster_and_rooms(self): internal_roster = self.load('will_roster', {}) for roster_id in self.roster: cur_roster = self.roster[roster_id] for user_id in cur_roster: user_data = cur_roster[user_id] if user_data["name"] != "": if not user_id in internal_roster: internal_roster[user_id] = Bunch() hipchat_id = user_id.split("@")[0].split("_")[1] internal_roster[user_id].update({ "name": user_data["name"], "jid": user_id, "hipchat_id": hipchat_id, }) if not hasattr(internal_roster[user_id], "nick"): user_data = self.get_hipchat_user(hipchat_id) internal_roster[user_id].nick = user_data["mention_name"] internal_roster[user_id].mention_name = user_data["mention_name"] if internal_roster[user_id]["name"] == self.nick: self.me = internal_roster[user_id] self.save("will_roster", internal_roster)
def update_will_roster_and_rooms(self): internal_roster = self.load('will_roster', {}) # Loop through the connected rooms for roster_id in self.roster: cur_roster = self.roster[roster_id] # Loop through the users in a given room for user_id in cur_roster: user_data = cur_roster[user_id] if user_data["name"] != "": # If we don't have this user in the internal_roster, add them. if not user_id in internal_roster: internal_roster[user_id] = Bunch() hipchat_id = user_id.split("@")[0].split("_")[1] # Update their info internal_roster[user_id].update({ "name": user_data["name"], "jid": user_id, "hipchat_id": hipchat_id, }) # If we don't have a nick yet, pull it and mention_name off the master user list. if not hasattr(internal_roster[user_id], "nick"): user_data = self.full_hipchat_user_list[hipchat_id] internal_roster[user_id].nick = user_data["mention_name"] internal_roster[user_id].mention_name = user_data["mention_name"] # If it's me, save that info! if internal_roster[user_id]["name"] == self.nick: self.me = internal_roster[user_id] self.save("will_roster", internal_roster) self.update_available_rooms()
def load_model_path(path, config=None): if config is None: config = get_config(path) if type(config) is dict: config = Bunch(**config) config.model_path = path # net = BasicNetwork(config) net = M2Net(config) net.eval() return net
def getFilterLinesExtended(self, includeMS1=True, includeMS2=False, includePosPolarity=True, includeNegPolarity=True): filterLines = {} if includeMS1: for scan in self.MS1_list: if (includePosPolarity and scan.polarity == "+") or ( includeNegPolarity and scan.polarity == "-"): if scan.filter_line not in filterLines.keys(): filterLines[scan.filter_line] = Bunch( scanType="MS1", polarity=scan.polarity, targetStartTime=10000000, targetEndTime=0) filterLines[scan.filter_line].targetStartTime = min( filterLines[scan.filter_line].targetStartTime, scan.retention_time) filterLines[scan.filter_line].targetEndTime = min( filterLines[scan.filter_line].targetEndTime, scan.retention_time) if includeMS2: for scan in self.MS2_list: if (includePosPolarity and scan.polarity == "+") or ( includeNegPolarity and scan.polarity == "-"): if scan.filter_line not in filterLines.keys(): filterLines[scan.filter_line] = Bunch( scanType="MS2", polarity=scan.polarity, targetStartTime=10000000, targetEndTime=0, preCursorMz=[], colisionEnergy=0) filterLines[scan.filter_line].targetStartTime = min( filterLines[scan.filter_line].targetStartTime, scan.retention_time) filterLines[scan.filter_line].targetEndTime = max( filterLines[scan.filter_line].targetEndTime, scan.retention_time) filterLines[scan.filter_line].preCursorMz.append( scan.precursor_mz) filterLines[ scan.filter_line].colisionEnergy = scan.colisionEnergy for k, v in filterLines.items(): if v.scanType == "MS2": v.preCursorMz = sum(v.preCursorMz) / len(v.preCursorMz) return filterLines
def _post_exec(self): if self.count: return phi = DefaultOrderedDict(list) psi = DefaultOrderedDict(list) if self.compare_pdb and self.compare_sele: s = self.compare_sele b = Bunch(chain1=s["chain"], resno1=s["resno"][0], chain2=s["chain"], resno2=s["resno"][1], pdb_id="COMP", no=0) n = numpdb.NumPdb(self.compare_pdb) self.elements = [(b, n)] it = itertools.groupby(self.records, operator.attrgetter('pdb_id')) sf = SstrucFinderRefine([list(r) for pdb_id, r in it], pdb_archive=self.pdb_archive, window=self.window, parallel="data") self.elements += sf.elements # rama_plot( # zip( phi.values(), psi.values() ), # titles = map( str, window ) # ) self._superpose_elements() self._msa_elements()
def select_bootstrapper(f,argv): parser = OptionParser() parser.add_option( '--select-bootstrapper', dest="boot", action='store_true', help="the bootstrapper version to use") (values,args)=parser.parse_args(argv[1:],Bunch(boot=False)) if not values.boot: raise XmakeException("inconsistent use of bootstrapper option") if len(args) == 0: "check and notify about the actually configured bootstrapper" if isfile(f): log.info("determining bootstrapper") v=get_first_line(f,'cannot read '+f) if v!=None: log.info("actual bootstrapper version is "+v) else: log.info("no bootstrapper version configured in "+f) else: log.info("no bootstrapper version configured") else: "set or change the bootstrapper version to use" if len(args)!=1: raise XmakeException("only one version argument possible") (v,_)=load_latest(args[0]) with open(f,"w") as b: b.write(v+"\n") log.info("setting bootstrapper to version "+v)
def load_model_path(path, config): if type(config) is dict: config = Bunch(**config) config.model_path = path if config.net == 'basic': net = BasicNetwork(config) elif config.net == 'state': net = StateNet(config) elif config.net == 'hypothesis': net = HypothesisNet(config) else: raise NotImplementedError net.eval() return net
def test_fixed_pts(): torch.manual_seed(4) np.random.seed(3) random.seed(0) D2 = 3 D1 = 50 fixed_pts = 1 t_len = 1000 args = Bunch( N=500, D1=D1, D2=D2, # fixed_pts=fixed_pts, fixed_beta=1.5, res_x_seed=0, res_seed=0, res_init_g=1.5) reservoir = M2Reservoir(args) if fixed_pts > 0: # patterns = (2 * np.eye(args.N)-1)[:fixed_pts, :] reservoir.add_fixed_points(fixed_pts) # print(len(patterns)) # pdb.set_trace() us = np.random.normal(0, .3, (16, D1)) # us = np.zeros((16, D1)) # us = np.random.normal(0, 1, (1, D1)) + np.random.normal(0, 0.1, (16, D1)) us = torch.as_tensor(us, dtype=torch.float) vs = [] for i, u in enumerate(us): reservoir.reset() trial_vs = [] for t in range(t_len): if t < 500: v = reservoir(u) else: v = reservoir(None) trial_vs.append(v.detach()) print(reservoir.x.detach().numpy()[0, :5]) trial_vs = torch.cat(trial_vs) vs.append(trial_vs.numpy()) fig, axes = plt.subplots(nrows=4, ncols=4, sharex=True, sharey=True, figsize=(16, 12)) cools = plt.cm.cool(np.linspace(0, 1, D2)) xaxis = range(t_len) for i, ax in enumerate(axes.ravel()): for j in range(D2): ax.plot(xaxis, vs[i][:, j], color=cools[j]) fig_format.hide_frame(*axes.ravel()) plt.show()
def getPeaksFor(self, times, eic, scales=None, snrTh=0.1, startIndex=None, endIndex=None): peaks=self.findPeaks(times, eic) ret=[] for peak in peaks: ret.append(Bunch(peakIndex=peak.peakIndex, peakScale=(peak.peakRightFlank+peak.peakLeftFlank)/2, peakSNR=100, peakArea=peak.peakArea, peakLeftFlank=peak.peakLeftFlank, peakRightFlank=peak.peakRightFlank)) return ret
def pairwise_align(sequences_by_species): sequences = [] for key, value in sequences_by_species.items(): sequences += value sequences_count = len(sequences) alignments = [[None for i in range(sequences_count)] for j in range(sequences_count)] for i in range(sequences_count): print(sequences[i].specie) for j in range(sequences_count): alignments[i][j] = protein_align(sequences[i], sequences[j]) return Bunch(sequences=sequences, alignments=alignments)
def calcNucleotide(s1, s2, step, mutations): nseq1 = nseq2 = numbers = cursors = '' tmp = 0 cnumber = step mismatch_count = 0 for i in range(len(s1)): # print i if ((i + 1) % step == 0): # print 'i' numbers += str(cnumber) tmp = len(str(cnumber)) - 1 # print 'tmp', tmp cursors += '|' for x in range(len(str(cnumber)) - 1): # print '|', str(cnumber) cursors += ' ' cnumber += step else: if (tmp != 0): tmp -= 1 else: numbers += ' ' cursors += ' ' if (s1[i] != s2[i]): color = 'yellow' if mutations[i] == 'nucleotide' else 'red' nseq1 += pystache.render( '<span class=\"' + color + '\">{{symbol}}</span>', {'symbol': s1[i]}) nseq2 += pystache.render( '<span class=\"' + color + '\">{{symbol}}</span>', {'symbol': s2[i]}) mismatch_count += 1 else: nseq1 += s1[i] nseq2 += s2[i] return Bunch(nseq1=nseq1, nseq2=nseq2, numbers=numbers, cursors=cursors, mismatch_count=mismatch_count)
def plot_c(): g = 1.5 N = 2000 n_unique_nets = 9 n_steps = 2000 n_reps = 4 all_corrs = [] for rep in range(n_unique_nets): b = Bunch(N=N, Z=1, res_init_g=g, bias=False) net = Reservoir(b) corrs = [] for j in range(n_reps): init_x = np.random.normal(0, 1, (1, N)) net.reset(res_state=init_x) xs = np.zeros(n_steps) for i in range(n_steps): out = net() xs[i] = out[0, 0].item() corr = np.correlate(xs, xs, 'full')[n_steps:] corrs.append(corr) all_corrs.append(corrs) # use this for getting the mean for each rep # all_corrs.append(np.mean(corrs, axis=0)) plt.figure(figsize=(15, 8)) for rep in range(n_unique_nets): ax = plt.subplot(3, 3, rep + 1) # ax.plot(all_corrs[rep], lw=2) for j in range(n_reps): ax.plot(all_corrs[rep][j], lw=2) ax.grid(True, which='major', lw=1, color='lightgray', alpha=0.4) ax.tick_params(axis='both', color='white') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_ylabel('distance') ax.set_xlabel('timestep') plt.suptitle(f'g = {g}') plt.show()
def calcProtein(s1, s2, step): nseq1 = nseq2 = numbers = cursors = '' tmp = 0 cnumber = step mismatch_count = 0 for i in range(len(s1)): # print i if ((i + 1) % step == 0): # print 'i' numbers += str(cnumber) tmp = len(str(cnumber)) - 1 # print 'tmp', tmp cursors += '|' for x in range(len(str(cnumber)) - 1): # print '|', str(cnumber) cursors += ' ' cnumber += step else: if (tmp != 0): tmp -= 1 else: numbers += ' ' cursors += ' ' if (s1[i] != s2[i]): nseq1 += pystache.render('<span class=\"red\">{{symbol}}</span>', {'symbol': s1[i]}) nseq2 += pystache.render('<span class=\"red\">{{symbol}}</span>', {'symbol': s2[i]}) mismatch_count += 1 else: nseq1 += s1[i] nseq2 += s2[i] return Bunch(nseq1=nseq1, nseq2=nseq2, numbers=numbers, cursors=cursors, mismatch_count=mismatch_count)
def getData(self, cols=None, where=None, orderby=None, getResultsAsBunchObjects=False): ret = [] if where is None: where = "" else: where = " WHERE " + where if orderby is None: orderby = "" else: orderby = "ORDER BY " + orderby if cols is None: cols = [x.getName() for x in self.getColumns()] tCols = [] for col in cols: if col[0].isdigit(): tCols.append("_" + col) else: tCols.append(col) cols = tCols for row in self.curs.execute( self.__updateTableName( "SELECT %s FROM :table: %s %s" % (",".join([col for col in cols]), where, orderby))): if getResultsAsBunchObjects: ret.append(Bunch(_addFollowing=dict(zip(cols, row)))) else: if len(row) == 1: ret.append(row[0]) else: ret.append(row) return ret
def parse_data(inp): merged_sequences = dict() for s in sequences(inp): seq = s[1].replace('\n', '').replace('\r', '') matchObj = re.match(r'.*(Vibrio [^\s.]*).*', s[0], re.M | re.I) specie = matchObj.group(1) if specie != 'Vibrio sp' and specie != 'Vibrio genomosp': sequence = merged_sequences.get( seq, Bunch(seq=seq, specie=specie, merged_count=0, row_headers=[], aseq=translate_dna(seq))) sequence.merged_count += 1 sequence.row_headers.append(s[0]) merged_sequences[seq] = sequence return merged_sequences
def protein_align(seq1, seq2): aseq1 = aseq2 = '' apseq1, apseq2 = align(seq1.aseq[:-1], seq2.aseq[:-1]) mismatch = pos1 = pos2 = 0 mutations = {} for n in range(len(apseq1)): triplet_nucl1 = get_triplet(seq1.seq, pos1, apseq1[n]) pos1 += 3 if triplet_nucl1 != '---' else 0 aseq1 += triplet_nucl1 triplet_nucl2 = get_triplet(seq2.seq, pos2, apseq2[n]) pos2 += 3 if triplet_nucl2 != '---' else 0 aseq2 += triplet_nucl2 each_mismatch = 0 for i in range(3): if (triplet_nucl1[i] != triplet_nucl2[i]): if (apseq1[n] != apseq2[n]): mutations[3 * n + i] = 'protein' else: mutations[3 * n + i] = 'nucleotide' each_mismatch += 1 if triplet_nucl1[i] != triplet_nucl2[i] else 0 if apseq1[n] != apseq2[n]: each_mismatch *= 2 mismatch += each_mismatch return Bunch(score=mismatch, mutations=mutations, aseq1=aseq1, aseq2=aseq2, aaseq1=apseq1, aaseq2=apseq2)
import matplotlib.pyplot as plt import argparse import pdb import os import sys sys.path.append('../') from utils import Bunch, load_rb from train import Trainer, parse_args, adjust_args from network import HypothesisNetwork b = Bunch() b.dataset = '../datasets/temp.pkl' b.out_act = 'none' b.L = 2 b.Z = 2 dset = load_rb(b.dataset) net = HypothesisNetwork(b) seq1 = dset[0] t1 = seq1[0] t1 = torch.Tensor(t1) net(t1)
action='store_true', help='whether to do a quick test') parser.add_argument('--out', type=str, help='results output file') args = parser.parse_args(sys.argv[1:]) # log_codes = dict(e=tf.logging.ERROR, # i=tf.logging.INFO, # w=tf.logging.WARN, # d=tf.logging.DEBUG) if args.out is not None: exp_path = os.path.join(args.out, exp_path) exp = Experiment(exp_path) params = Bunch(yaml.load(open(args.hparams))) grid = get_param_grid(params) params.pop('grid') # tf.logging.set_verbosity(log_codes.get(params.log.lower()[0], # tf.logging.ERROR)) # if platform.system() == 'Linux': # tf.compat.v1.disable_eager_execution() # print(f'******** TF EAGER MODE DISABLED on {platform.system()} *****') def run_exp(params): exp.tag(params) URL = 'mushroom/all.csv' dataframe = pd.read_csv(URL) dataframe.head()
def getPeaksFor(self, times, eici, scales=[11, 66], snrTh=0.1, minScans=3, startIndex=None, endIndex=None): snrTh=3 # precheck if sum(eici) == 0: return [] if startIndex is None: startIndex=0 if endIndex is None: endIndex=len(eici) # crop EIC eici=eici[startIndex:endIndex] #times=times[startIndex:endIndex] # add scales at begin and end of eic eicAdd = int(scales[1]) *2 eic = [0 for u in range(0, eicAdd)] eic.extend(eici) eic.extend([0 for u in range(0, eicAdd)]) # create R-vector of EIC eicRC = str([str(v) for v in eic]).replace('[', '').replace(']', '').replace('\'', '') # call R-MassSpecWavelet try: ret = r('getMajorPeaks(c(' + eicRC + '), c(' + str(scales[0]) + ', ' + str(scales[1]) + '), snrTh=' + str( snrTh) + ')') except Exception as e: ret = [] # Convert results from R-Objects to Python Objects/Arrays retl = [] if len(ret) > 1: # Convert linear R-Vector (4 consecutive elements represent one peak) to Python Array for i in range(0, len(ret), 4): cur = [] for j in range(0, 4): cur.append(ret[i + j]) peak=Bunch(peakIndex=int(cur[0]), peakScale=float(cur[1]), peakSNR=float(cur[2]), peakArea=float(cur[3]), peakLeftFlank=float(cur[1]), peakRightFlank=float(cur[1])) retl.append(peak) # detect and remove very close-by peaks todel = [] for f in range(0, len(retl)): for s in range(0, len(retl)): if f < s: if abs(retl[f].peakIndex - retl[s].peakIndex) < errorIndex: todel.append(f) retl[f].peakIndex = retl[f].peakIndex - 1 peakCenter = int(retl[f].peakIndex) count = 0 for i in range(max(0, int(peakCenter - retl[f].peakScale)), min(len(eic) - 1, int(peakCenter + retl[f].peakScale))): if eic[i] > 0: count = count + 1 if count < minScans: todel.append(f) # remove very close-by peaks if len(todel) > 0: todel = [x for x in set(todel)] todel.sort() todel.reverse() for i in todel: retl.pop(i) # calculate peak flanks (left and right side) for i1 in range(len(retl)): for i2 in range(len(retl)): p1 = retl[i1] p2 = retl[i2] if p1.peakIndex < p2.peakIndex and (p1.peakIndex + p1.peakScale) > (p2.peakIndex - p2.peakScale): overlap = (p1.peakIndex + p1.peakScale) - (p2.peakIndex - p2.peakScale) if p1.peakScale > p2.peakScale: p1.peakRightFlank = p1.peakRightFlank - overlap else: p2.peakLeftFlank = p2.peakLeftFlank - overlap # remove added ms scans from peak center # and add startIndex for f in retl: f.peakIndex = f.peakIndex - eicAdd + startIndex return retl
from ops import conv_rnns from mvnet import MVNet import scipy.io as sio from utils import Bunch, get_session_config import scipy.ndimage as ndimg sys.path.append(os.path.join('../utils')) from util import downsample import binvox_rw SAMPLE_DIR = os.path.join('data', 'shapenet_sample') im_dir = os.path.join(SAMPLE_DIR, 'renders') log_dir = os.path.join('models_lsm_v1/vlsm-release/train') with open(os.path.join(log_dir, 'args.json'), 'r') as f: args = json.load(f) args = Bunch(args) # Set voxel resolution voxel_resolution = 32 # Setup TF graph and initialize VLSM model tf.reset_default_graph() # Change the ims_per_model to run on different number of views bs, ims_per_model = 1, 4 ckpt = 'mvnet-100000' net = MVNet(vmin=-0.5, vmax=0.5, vox_bs=bs, im_bs=ims_per_model, grid_size=args.nvox, im_h=args.im_h, im_w=args.im_w, norm=args.norm, mode="TEST")
import numpy as np import torch import matplotlib.pyplot as plt import pdb import sys import random sys.path.append('../') from network import Network, Reservoir from utils import Bunch b = Bunch(N=20, D=1, res_init_params={'std':1.5}, reservoir_noise=.1, reservoir_burn_steps=200) net = Network(b) N = net.args.N D = net.args.D corrs = [] all_dists = [] n_steps = 300 n_net_reps = 12 n_reps = 15 activation = lambda y: y for rep in range(n_net_reps): net = Network(b)
#! /usr/bin/env python import brian2 as b2 import numpy as np import sys, json from copy import deepcopy from scipy.stats import beta from utils import Bunch, xc_score, spike_score, psp_to_current, ablateNeuron # get parameters from config file print "loading parameters" fname = sys.argv[1] f = open(fname, 'r') params = Bunch(json.loads(f.read())) # set up simulation time duration = params.simulation.duration * b2.second dt = params.simulation.dt * b2.ms # make neuron groups print "building neuron model" model = ''' dx/dt = (xinf - x + IsynE + IsynI + Iswitch*Iext)/tau: 1 (unless refractory) dIsynE/dt = -IsynE/tauSynE: 1 dIsynI/dt = -IsynI/tauSynI: 1 xinf: 1 tau: second tauSynE: second tauSynI: second
def get_task_args(args): tarr = args.task_args targs = Bunch() if args.t_type.startswith('rsg'): targs.t_len = get_tval(tarr, 'l', 600, int) targs.p_len = get_tval(tarr, 'pl', 5, int) targs.gain = get_tval(tarr, 'gain', 1, float) targs.max_ready = get_tval(tarr, 'max_ready', 80, int) if args.intervals is None: targs.min_t = get_tval(tarr, 'gt', targs.p_len * 4, int) targs.max_t = get_tval( tarr, 'lt', targs.t_len // 2 - targs.p_len * 4 - targs.max_ready, int) else: targs.max_t = max(args.intervals) targs.min_t = min(args.intervals) elif args.t_type.startswith('csg'): targs.t_len = get_tval(tarr, 'l', 600, int) targs.p_len = get_tval(tarr, 'pl', 5, int) targs.max_cue = get_tval(tarr, 'max_cue', 100, int) targs.max_set = get_tval(tarr, 'max_set', 300, int) if args.intervals is None: targs.min_t = get_tval(tarr, 'gt', targs.p_len * 4, int) targs.max_t = get_tval(tarr, 'lt', targs.t_len // 2 - targs.p_len * 4, int) elif args.t_type == 'delay-copy': targs.t_len = get_tval(tarr, 'l', 500, int) targs.dim = get_tval(tarr, 'dim', 2, int) targs.n_freqs = get_tval(tarr, 'n_freqs', 20, int) targs.f_range = get_tval(tarr, 'f_range', [10, 40], float, n_vals=2) targs.amp = get_tval(tarr, 'amp', 1, float) elif args.t_type == 'flip-flop': targs.t_len = get_tval(tarr, 'l', 500, int) targs.dim = get_tval(tarr, 'dim', 3, int) targs.p_len = get_tval(tarr, 'pl', 5, int) targs.geop = get_tval(tarr, 'p', .02, float) elif args.t_type == 'delay-pro' or args.t_type == 'delay-anti': targs.t_len = get_tval(tarr, 'l', 300, int) targs.fix_t = get_tval(tarr, 'fix', 50, int) targs.stim_t = get_tval(tarr, 'stim', 150, int) elif args.t_type == 'memory-pro' or args.t_type == 'memory-anti': targs.t_len = get_tval(tarr, 'l', 300, int) targs.fix_t = get_tval(tarr, 'fix', 50, int) targs.stim_t = get_tval(tarr, 'stim', 100, int) targs.memory_t = get_tval(tarr, 'memory', 50, int) elif args.t_type == 'dur-disc': targs.t_len = get_tval(tarr, 'l', 600, int) targs.tau = get_tval(tarr, 'tau', 10, int) targs.min_d = get_tval(tarr, 'gt', 10, int) targs.max_d = get_tval(tarr, 'lt', 80, int) targs.sep_t = get_tval(tarr, 'sep_t', 150, int) targs.cue_t = get_tval(tarr, 'cue_t', 400, int) targs.select_t = get_tval(tarr, 'select_t', 440, int) return targs
from formulaTools import formulaTools ft = formulaTools() m = 720.3923 + 0.00055 #702.27351+18.033823+0.00055 formsCRes = sfg.findFormulas(m, useAtoms=["C", "N", "H", "O"], atomsRange=[(32, 32), (0, 500), (0, 10000), (0, 400)], fixed=["C"], useSevenGoldenRules=False, useSecondRule=False, ppm=5.) from utils import Bunch, printObjectsAsTable from formulaTools import formulaTools fT = formulaTools() bs = [] for f in formsCRes: elems = fT.parseFormula(f) bs.append( Bunch(formula=f, mass=fT.calcMolWeight(elems), diffPPM=(fT.calcMolWeight(elems) - m) * 1000000. / m, diffPPMAt300=(fT.calcMolWeight(elems) - m) * 1000000. / 300)) printObjectsAsTable(bs, attrs=["formula", "mass", "diffPPM", "diffPPMAt300"])
import os '''This script can update packages list in packages.csv this script accepts one parameter `dst` as destination folder/package If dst is a package, this script will update this package, regardless of whether it exists or not. e.g. `python update_package.py /path/to/CLOVER/kexts/Other/AppleALC.kext` If dst is a folder, this script will update all the packages in that folder if list in packages.csv e.g. `python update_package.py /path/to/Kexts` will update kexts `python update_package.py /path/to/Clover` will update packages in Clover ''' fg = terminal.fg formats = Bunch(checkupdate="({}/{}) {:<46}", listupdate="[{}] {:<46} {} -> {}") ''' Get and filter packages ''' packages = [] with open(Path(path.root, 'packages.csv'), 'r') as f: keys = f.readline()[:-1].lower().split(',') packages = [Package(**dict(zip(keys, x[:-1].split(',')))) for x in f] if path.dsttype == 'file': for package in packages: if path.dst.name in package.items: package.folder = path.dst.parent packages = [package] break