Example #1
0
class AWSTestTask(MatrixTask):
    axes = OD([('testcase', ['regular', 'ec2', 'ecs', 'lambda',
                             'assume_role']), ('version', ['latest', '4.4'])])

    name_prefix = 'test-aws-openssl'

    def __init__(self, *args, **kwargs):
        super(AWSTestTask, self).__init__(*args, **kwargs)
        self.add_dependency('debug-compile-aws')
        self.commands.extend([
            func('fetch build', BUILD_NAME=self.depends_on['name']),
            bootstrap(AUTH="auth",
                      ORCHESTRATION_FILE="auth-aws",
                      VERSION=self.version,
                      TOPOLOGY="server"),
            func('run aws tests', TESTCASE=self.testcase.upper())
        ])

    @property
    def name(self):
        return '-'.join([self.name_prefix, self.testcase, self.version])
Example #2
0
def validate_memory_profile():
    import re
    regex = re.compile(r'PROFILE.*')

    with open('volatilityrc', 'r') as fp:
        for line in fp.readlines():
            match = regex.search(line)
            if match:
                result = match.group().split('=')[1].replace(' ', '')

    profile = OD()
    profile['vol'] = load_memory_profile(profile=result)
    profile['arch'] = ''

    if 'x86' in profile['vol']:
        profile['arch'] = 'x86'

    elif 'x64' in profile['vol']:
        profile['arch'] = 'x64'

    return profile
Example #3
0
def find_cover(graph, find_path):
    paths = OD()
    idx = 0
    total = 0

    def calc(vertices):
        return len(vertices) * sum(graph[v].value for v in vertices)

    while graph.vertices:
        print("Cover Iteration %s" % idx)
        path = find_path(graph)
        score = calc(path)
        paths[idx] = (score, path)
        for v in path:
            # print("Removing vertex %s" % v)
            graph.remove_vertex(v)
        idx += 1
        total += score
    print(paths)
    print(total)
    return paths
Example #4
0
 def cmp(self,
         func,
         varids=None,
         to_table=False,
         tex=True,
         filepath='',
         **kwargs_tex):
     """
     FIXME ***
     
     Input:
         func: a function that takes a model and outputs something to be
             compared
         varids: a list
         to_table: if True...
         kwargs_tex: margin... 
     """
     df = butil.DF(OD([(self.mod.id, func(self.mod)),
                       (self.mod2.id, func(self.mod2))]),
                   index=varids)
     return df
Example #5
0
	def __init__(self, *args, **kwargs):
		if not 'filename' in kwargs: kwargs['filename'] = 'harddrive0.qcow2'
		if not 'format' in kwargs: kwargs['format'] = 'qcow2'
		if not 'snapshots' in kwargs:
			kwargs['snapshots'] = OD()
			for file in glob(f'{kwargs["filename"]}.snap*'):
				kwargs['snapshots'][file] = None
		if not 'size' in kwargs: kwargs['size'] = 5 # in GB

		kwargs['filename'] = os.path.abspath(kwargs['filename'])
		# qemu-img create -f qcow2 disk.qcow2 5GB
		# qemu-img create -o backing_file=disk.qcow2,backing_fmt=qcow2 -f qcow2 snapshot0.cow

		for key, val in kwargs.items():
			self.__dict__[key] = val

		if not os.path.isfile(self.filename):
			if not self.create(**kwargs):
				raise ValueError(f'Could not create virtual harddrive image: {self.filename}')

		datastore['harddrives'][kwargs['filename']] = self
Example #6
0
def dfs_extend(graph, path, closure):
    mod_path = OD(zip(path, [True] * len(path)))
    last, val = mod_path.popitem()
    mod_path[last] = True
    edges = graph[last].edges
    choices = [e for e in edges if e not in mod_path]
    while choices:
        weights = np.array([np.e ** float(len(closure[e])) for e in choices]) + \
                  np.array([np.e ** float(len(closure[e])) for e in choices])
        total = np.sum(weights)
        if total <= 0:
            break
        weights /= total
        last = np.random.choice(choices, p=weights)
        # last = random.choice(choices)
        mod_path[last] = True
        edges = graph[last].edges
        choices = [e for e in edges if e not in mod_path]
    print("Extended path from %s to %s" % (len(path), len(mod_path)))
    verify_path(graph, mod_path.keys())
    return mod_path.keys()
Example #7
0
 def get_network_addresses(self, bind_address):
     try:
         ifaces = netifaces.interfaces()
     except NameError as n:
         print(
             "Please install netifaces or specify --bcastaddr on command line"
         )
         print("ImportError: No module named netifaces")
         exit(1)
     interface_broadcast_addresses = OD()
     for iface in ifaces:
         details = netifaces.ifaddresses(iface)
         for k, vals in details.items():
             if k == netifaces.AF_INET:
                 for addr in vals:
                     if 'broadcast' in addr:
                         interface_broadcast_addresses[
                             addr['addr']] = addr['broadcast']
     if bind_address != '0.0.0.0':
         return {bind_address: interface_broadcast_addresses[bind_address]}
     return interface_broadcast_addresses
Example #8
0
def get_parcels(parcel_ids, resources):
    data = {}
    failed_searches = []

    for resource in resources:
        print(resource)
        success, data[resource.slug], fields = v1_get_batch_data(
            parcel_ids, resource)
        if not success:
            failed_searches.append(resource.name)

    pin_data = pivot_resource_to_parcel(data)

    results = []
    for pin, data in pin_data.items():
        r = OD([('parcel_id', pin)])
        if 'geos' in data:
            r['geos'] = data['geos']
            del data['geos']

        # cleanup CKAN-specific fields
        for k, v in data.items():
            if type(v) == list:
                for row in v:
                    del row['_full_text']
                    del row['_id']

            elif type(v) == dict:
                del v['_full_text']
                del v['_id']

        # fill in missing keys
        for resource in resources:
            if resource.slug not in data:
                data[resource.slug] = []

        r['owner'] = get_owner_name(pin)
        r['data'] = data
        results.append(r)
    return results, failed_searches
Example #9
0
class IPTask(MatrixTask):
    axes = OD([('client', ['ipv6', 'ipv4', 'localhost']),
               ('server', ['ipv6', 'ipv4'])])

    name_prefix = 'test-latest'

    def __init__(self, *args, **kwargs):
        super(IPTask, self).__init__(*args, **kwargs)
        self.add_tags('nossl', 'nosasl', 'server', 'ipv4-ipv6', 'latest')
        self.add_dependency('debug-compile-nosasl-nossl')
        self.commands.extend([
            func('fetch build', BUILD_NAME=self.depends_on['name']),
            bootstrap(IPV4_ONLY=self.on_off(server='ipv4')),
            run_tests(IPV4_ONLY=self.on_off(server='ipv4'),
                      URI={
                          'ipv6': 'mongodb://[::1]/',
                          'ipv4': 'mongodb://127.0.0.1/',
                          'localhost': 'mongodb://localhost/'
                      }[self.client])
        ])

    def display(self, axis_name):
        return axis_name + '-' + getattr(self, axis_name)

    @property
    def name(self):
        return '-'.join([
            self.name_prefix,
            self.display('server'),
            self.display('client'), 'noauth', 'nosasl', 'nossl'
        ])

    def _check_allowed(self):
        # This would fail by design.
        if self.server == 'ipv4':
            prohibit(self.client == 'ipv6')

        # Default configuration is tested in other variants.
        if self.server == 'ipv6':
            prohibit(self.client == 'localhost')
Example #10
0
def single_parcel(request, pin=""):
    if not pin:
        return JsonResponse({
            'success': False,
            'help': 'parcel_id required'
        },
                            status=400)

    resources = CKANResource.objects.all()
    failed_searches = []
    data = {}
    geo = {}

    for resource in resources:
        success, data[resource.slug] = get_data(pin, resource)
        if not success:
            failed_searches.append(resource.name)
        if success and resource.has_geo:
            geo = {
                'centroid': {
                    'type':
                    'Point',
                    'coordinates': [
                        data[resource.slug][0][resource.lon_field],
                        data[resource.slug][0][resource.lat_field]
                    ]
                },
                'boundary': {}
            }

    response = OD([
        ('success', True),
        ('help', 'Data for parcel {}.'.format(pin)),
        ('geo', geo),
        ('owner', get_owner_name(pin)),
        ('data', data),
        ('failed_searches', failed_searches),
    ])

    return JsonResponse(response)
Example #11
0
    def _split_px(self, px_doc):
        """
        Parses metadata keywords from px_doc and inserts those into self object
        Returns the data part
        """
        if isinstance(px_doc, basestring):
            px_doc = open(px_doc, 'U')
        meta, data = px_doc.read().split("DATA=")
        meta = unicode(meta, 'windows_1250')
        data = unicode(data, 'windows_1250')
        nmeta = {}
        for line in _iterate_px_entries(meta.strip()):
            if not line:
                continue
            m = self._subfield_re.match(line)
            if m:
                field, subkey, value = self._get_subfield(m, line)
                if hasattr(self, field):
                    getattr(self, field)[subkey] = value
                else:
                    setattr(self, field, OD([(subkey, value)]))
            else:
                field, value = line.split('=', 1)
                if not field.startswith('NOTE'):
                    try:
                        setattr(self,
                                field.strip().lower(),
                                self._clean_value(value))
                    except UnicodeEncodeError:
                        # Weirdly encoded PX files cause sometimes the
                        # "statements" to be split wrongly, causing non-ascii
                        # characters to appear in the 'field'. To save
                        # the dataset, just ignore such cases here.
                        # See https://github.com/statfi/opendata/issues/3
                        raise PxSyntaxError(
                            "Non-ascii field in PX file. Probably due to weird usage of semicolons."
                        )

                    #TODO: NOTE keywords can be standalone or have subfields...
        return data.strip()[:-1]
Example #12
0
    def rec_tree_info(self, ijson):
        doc_ids = ijson['doc_id']
        #doc_ids = doc_ids + ['_'.join(map(str, doc_ids))]
        dc_str = '_'.join(map(str, doc_ids))
        doc_ids = doc_ids + [dc_str]
        res_dct = {'consolidated':[]}
        for idx, doc in  enumerate(doc_ids, 1):
            path = '/var/www/html/WorkSpaceBuilder_DB/39/1/pdata/docs/%s/NGramAnalysis/Main/Lexical_Tree.txt'%(doc)
            con_flg = 0
            if idx == len(doc_ids):
                con_flg = 1
                path = '/var/www/html/WorkSpaceBuilder_DB/39/1/pdata/NGramAnalysis/Main_%s/Lexical_Tree.txt'%(doc)
            node_info_dct = self.node_tree_info_read(doc, con_flg)
            f  = open(path)
            txt_data = f.readlines()
            f.close()
            check_dct = OD()
            for ln in txt_data:
                m_id, m_info = ln.strip().split('@')
                get_node_info = node_info_dct.get(m_id, {})
                check_dct[m_id] = [0, m_info, len(m_id.split('.')) - 1, get_node_info]
                mid_lst = m_id.split('.')
                if len(mid_lst) > 1:
                    ch_dt_str = '.'.join(mid_lst[:-1])
                    check_dct[ch_dt_str][0] += 1  

            resultant_lst = []
            for m_id, val_tup in check_dct.iteritems():
                hasChild = False
                if val_tup[0]:
                    hasChild = True
                dt_dct = {'id':m_id, 'info':val_tup[1], 'level_id':val_tup[2], '$$treeLevel':val_tup[2], 'hasChild':hasChild, 'node_info':val_tup[3]}            
                resultant_lst.append(dt_dct)
            if con_flg:
                res_dct['consolidated'] = resultant_lst
            else:
                res_dct[str(doc)] = resultant_lst
        
        return [{'message':'done', 'data':res_dct}]
Example #13
0
def HR(layers_to_show=['isochrones'],
       state_options='default',
       vsize=big,
       grid=True):
    """
    LAYERS_TO_SHOW can be dict of subsets' states (eg ss_cb) or a list of subset state names
    STATE_OPTIONS = OD([ attribute, value ]) for the layer state, from
        http://docs.glueviz.org/en/stable/api/glue.viewers.scatter.state.ScatterLayerState.html
    GRID draws background grid
    """
    dc, isos = dclist
    lts = layers_to_show
    so = state_options
    phr = gapp.new_data_viewer(ScatterViewer)
    phr.LABEL = 'HR'
    phr.position = (0, 50)
    phr.viewer_size = vsize

    phr.add_data(isos)
    st = phr.state
    st.x_att = isos.id['log_Teff']
    st.y_att = isos.id['log_L']
    st.flip_x()

    if so == 'default':  #so = OD([ ('size',3) ])
        so = OD([ ('cmap_mode','Fixed'), ('points_mode','markers'), ('size',3), ('alpha',1.0) \
                ])
    pl.show_layers(st, lts, so, clear=True)

    ax = phr.axes
    ax.set_title('HR')
    ax.set_xlabel('log(T$_{eff}$)')
    ax.set_ylabel('log(L)')
    if grid:
        ax.grid(linestyle='-', linewidth='0.5', color='0.7')
    plt.tight_layout()
    ax.figure.canvas.draw()  # update the plot

    return phr
Example #14
0
def get_features(dir, read=True, download=True):
    if read:
        if download:
            vgg_net = vis.models.vgg16(pretrained="imagenet", progress=True)
        else:
            ## Load model parameters from path
            vgg_net = vis.models.vgg16()
            vgg_net.load_state_dict(torch.load('./models/vgg16-397923af.pth'))

        jpg_files = ds.images_info(dir)

        ## Set requires to eliminate space taken for grads
        for p in vgg_net.parameters():
            p.requires_grad = False

        ## Net architecture
        print(vgg_net)
        # summary(vgg_net, input_size=(3, 224, 224))
        ## Remove the last classifier layer: Softmax
        print("Removing softmax layer of VGG16 ... ")
        vgg_net.classifier = vgg_net.classifier[:-1]
        print(vgg_net)
        # summary(vgg_net, input_size=(3, 224, 224))

        ## Read images with specified transforms
        print("Reading images ... ", end='')
        images = ds.read_image(dir, normalize=True, resize=224, tensor=True)
        print("done.")
        # print(images.keys())
        ## Get feature map for image tensor through VGG-16
        img_featrs = OD()
        print("Gathering images' features from last conv layer ... ", end='')
        for i, jpg_name in enumerate(images.keys()):
            with torch.no_grad():
                print(i, jpg_name)
                img_featrs[jpg_name] = vgg_net(images[jpg_name].unsqueeze(0))
        print("done.")

        return img_featrs
Example #15
0
def calculate_tfs_idfs(outputs):
    # treating sentences as a document
    counts_by_sent = OD()
    num_docs_with_term = Counter()
    num_docs = len(outputs)

    for i, extrs in outputs.items():
        counts_by_sent[i] = Counter()
        buffer = []
        # split up extractions
        for ex in extrs:
            # split up args
            for e in ex:
                # split up words
                e = e.replace('[attrib=', '')
                e = e.replace('[enabler=', '')
                e = e.replace(']', '')
                up = e.strip('"')
                words = up.split()
                counts_by_sent[i].update(words)
                for w in words:
                    if w not in buffer:
                        buffer.append(w)
        num_docs_with_term.update(buffer)

    tfs = {
        sent: {
            word: count / sum(counter.values())
            for word, count in counter.items()
        }
        for sent, counter in counts_by_sent.items()
    }

    idfs = {
        term: math.log(num_docs / freq)
        for term, freq in num_docs_with_term.items()
    }

    return tfs, idfs
Example #16
0
def add_dimension(source, target, variable, dimension, position, value,
                  new_name):
    if position != 2:
        raise NotImplementedError(
            "Can only insert a new dimension at position 2 for now.\n" +
            "Got: {}".format(position))
    if new_name is None:
        new_name = variable
    ds = xr.open_dataset(source)
    dimensions = list(ds[variable].dims)
    dimensions.insert(1, dimension)
    da = xr.DataArray(
        name=new_name,
        data=[[v] for v in ds[variable].values],
        coords=OD(
            (c, ([value] if c == dimension else ds[variable].coords[c].values))
            for c in dimensions),
        dims=dimensions,
    )
    da.attrs.update(ds.attrs)
    merged = xr.merge([ds[v] for v in ds.data_vars if v != variable] + [da])
    merged.to_netcdf(target, format="NETCDF3_64BIT", unlimited_dims=("time", ))
Example #17
0
class DNSTask(MatrixTask):
    axes = OD([('auth', [False, True]),
               ('ssl', ['openssl', 'winssl', 'darwinssl'])])

    name_prefix = 'test-dns'

    def __init__(self, *args, **kwargs):
        super(DNSTask, self).__init__(*args, **kwargs)
        sasl = 'sspi' if self.ssl == 'winssl' else 'sasl'
        self.add_dependency('debug-compile-%s-%s' %
                            (sasl, self.display('ssl')))

    @property
    def name(self):
        return self.name_prefix + '-' + '-'.join(
            self.display(axis_name)
            for axis_name in self.axes if getattr(self, axis_name))

    def to_dict(self):
        task = super(MatrixTask, self).to_dict()
        commands = task['commands']
        commands.append(func('fetch build',
                             BUILD_NAME=self.depends_on['name']))

        orchestration = bootstrap(TOPOLOGY='replica_set',
                                  AUTH='auth' if self.auth else 'noauth',
                                  SSL='ssl')

        if self.auth:
            orchestration['vars']['AUTHSOURCE'] = 'thisDB'
            orchestration['vars']['ORCHESTRATION_FILE'] = 'auth-thisdb-ssl'

        commands.append(orchestration)
        commands.append(
            run_tests(SSL='ssl',
                      AUTH=self.display('auth'),
                      DNS='dns-auth' if self.auth else 'on'))

        return task
Example #18
0
def get_features(images, download_wts=False, save=False):

    if download_wts:
        vgg_net = vis.models.vgg16(pretrained="imagenet", progress=True)
    else:
        ## Load model parameters from path
        vgg_net = vis.models.vgg16()
        vgg_net.load_state_dict(torch.load('./models/vgg16-397923af.pth'))

    for p in vgg_net.parameters():
        p.requires_grad = False

    ## Net architecture
    print(vgg_net)
    # summary(vgg_net, input_size=(3, 224, 224))

    ## Remove the last classifier layer: Softmax
    print("Removing softmax layer of VGG16 ... ")
    vgg_net.classifier = vgg_net.classifier[:-1]
    print(vgg_net)
    # summary(vgg_net, input_size=(3, 224, 224))

    # print(images.keys())

    ## Get feature map for image tensor through VGG-16
    img_featrs = OD()
    vgg_net.eval()
    print("Gathering images' features from last layer of %s ... " %
          type(vgg_net).__name__)
    for i, jpg_name in enumerate(images.keys()):
        with torch.no_grad():
            print(i, jpg_name)
            img_featrs[jpg_name] = vgg_net(images[jpg_name].unsqueeze(0))

    if save:
        print("Saving extracted features ... ", end="")
        torch.save(img_featrs, 'features_' + type(vgg_net).__name__ + '.pkl')
        print("done.")
    return img_featrs
Example #19
0
 def fircalc_cb1(self):
     cmd = 'read' if self.read else 'write'
     bdata = self.bank_data()
     tdata = Data()
     tdata.add_page('taps',
                    cmds=OD((k, bdata.cmds[k]) for k in ['ntaps', 'nbits']))
     tdata.add('nbank',
               label='bank #',
               wdgt='combo',
               state='readonly',
               text='0',
               value=['0', '1'])
     dlg = Control(data=tdata, parent=self.root, title='%s taps' % cmd)
     dlg.add_buttons_ok_cancel()
     dlg.center()
     dlg.do_modal()
     if not hasattr(dlg, 'kw'):
         return
     ntaps = dlg.kw['ntaps']
     self.nbits = int(dlg.kw['nbits'])
     nbank = int(dlg.kw['nbank'])
     n = ceil(int(ntaps) / 2)
     offset = n if nbank else 0
     dev = self.data.dev
     if self.read:
         self.taps = []
         for i in range(0, n):
             self.qo.put('tap ALT.firii %s %s %d' %
                         (dev['ip_addr'], dev['altname'], i + offset))
     else:
         taps = self.get_taps_fmt(fmt=self.nbits)
         taps[n:] = []
         for i in range(0, n):
             self.qo.put(
                 'tap ALT.firii %s %s %d %s' %
                 (dev['ip_addr'], dev['altname'], i + offset, taps[i]))
     self.pb['maximum'] = self.qo.qsize()
     return True
Example #20
0
def prep_tsv_all(name,files,ALL):
	"""
		Input: 
		----------
	    	Function: makes a TSV file per sample for 
		----------
		Output: TSV file per sample

	"""

	parts = []		
	parts = files.split("/")[6]
	#print parts

	
	file_writer = open('/home/shared_data_core/COLON/subclonality/'+ALL+'/pyclone_%s.tsv'% parts.lower(),'w+')
	file_writer.write('mutation_id\tref_counts\tvar_counts\tnormal_cn\tminor_cn\tmajor_cn\tvariant_case\tvariant_freq\tgenotype\n')	# add header to outfile

	global variant_info
	variant_info = OD([
		('mutation_id', str()),		# unique id for every mutation
		('ref_counts', int()),		# number of reads covering the mutation which contain the reference (genome) allele
		('var_counts', int()),		# number of reads covering the mutation which contain the variant allele
		('normal_cn', int()),		# copy number of the cells in the normal population. For autosomal chromosomes this will be 2 and for sex chromosomes it could be either 1 or 2
		('minor_cn', int()),		# minor copy number of the cancer cells. Usually this value will be predicted from WGSS or array data
		('major_cn', str()), 		# major copy number of the cancer cells. Usually this value will be predicted from WGSS or array data
		('variant_case', str(name)),	# patient_name
		('variant_freq', float()),	# 1 - (ref_counts/(ref_counts + var_counts))
		('genotype', str()),		# genotype of the mutation
		('nr', int()), 			# count per mutation
		('chr', str()), 		# chromosome where mutations lies
		('pos', int()),			# position on chromosome
		('REF', str()), 		# reference genome
		('ALT', str()),			# alternative, mutation genome
		('origin',str())		# pt = primary tumor or m = metases
	])

	return variant_info,file_writer
Example #21
0
def single(request):
    try:
        pin = request.GET['parcel_id']
    except KeyError:
        return JsonResponse({
            'success': False,
            'help': 'parcel_id required'
        },
                            status=400)

    resources = CKANResource.objects.all()
    failed_searches = []
    data = {}
    geo = {}

    for resource in resources:
        success, data[resource.slug] = get_data(pin, resource)
        if not success:
            failed_searches.append(resource.name)
        if success and resource.has_geo:
            try:
                geo = {
                    'latitude': data[resource.slug][0][resource.lat_field],
                    'longitude': data[resource.slug][0][resource.lon_field]
                }
            except:
                geo = {'latitude': '', 'longitude': ''}

    response = OD([
        ('success', True),
        ('help', 'Data for parcel {}.'.format(pin)),
        ('geo', geo),
        ('owner', get_owner_name(pin)),
        ('results', data),
        ('failed_searches', failed_searches),
    ])

    return JsonResponse(response)
Example #22
0
def get_ctrl(dev):
    ctrl_buttons = OD([('Reset', reset_cb), ('Write', write_cb)])
    data = Data(buttons=ctrl_buttons, io_cb=dev_io_cb)
    data.add_page('Generator', send=True)
    data.add('freq',
             label='Frequency, MHz',
             wdgt='spin',
             value={
                 'min': 0.1,
                 'max': 12400,
                 'step': 0.1
             },
             text='1451')
    data.add('amp',
             label='Amplitude, dBm',
             wdgt='spin',
             value={
                 'min': -40,
                 'max': -6,
                 'step': 0.5
             },
             text='-40')
    return data
Example #23
0
 def __init__(self, Prime=0, RunID=0, JL=[]):
     if (Prime < 1) or (Prime > MAXPRIME) or (RunID != 0):
         raise InvalidPrimeThread  # out of range
     PrimeBase.__init__(self, Prime, RunID)
     PrimeBase.THL.acquire()
     if True:  # only for a good block visibility
         # PRIMES has still in values
         self._dist = product_Primes(PRIMES)  # distances of start points
         self._size = length_Jumps(PRIMES)  # length of input JumpList
         self._Jumpers = DQ([])  # alternatively use a bytearray
         self._Strikes = set([])  # numbers to be removed
         PRIMES.append(Prime)  # set the next step of primes
         print(PRIMES, flush=True)
         # PRIMES changed for out values
         self.__maxx = product_Primes(PRIMES)  # size of frame inside
         self.__outL = length_Jumps(PRIMES)  # assumed length of target list
         self._OutList = OD([])
         # self.__OutList = DQ([])               # alternatively use a bytearray
         #
     PrimeBase.THL.release()
     if len(JL) > 0:
         self.AddJumpList(
             JL)  # get the first jumps and include create strikes
Example #24
0
 def testInitialCommit(self):
   r = TestRepo('foo', TestClock())
   ref = r['refs/heads/master']
   ref.make_full_tree_commit('Initial Commit', {
     'cool_file': 'whazzap',
     'subdir': {
       'crazy times': 'this is awesome'
     }
   })
   self.assertEqual(list(r.refglob()), [ref])
   self.assertEqual(r.snap(include_committer=True), {
     'refs/heads/master': OD([
       ('b7c705ceddb223c09416b78e87dc8c41e7035a36', [
         # 'line too long' pylint: disable=C0301
         'committer Test User <*****@*****.**> 2014-06-13 00:09:06 +0800',
         '',
         'Initial Commit'
       ])
     ])
   })
   self.assertEqual('whazzap', r.run('cat-file', 'blob', 'master:cool_file'))
   self.assertEqual('this is awesome',
                    r.run('cat-file', 'blob', 'master:subdir/crazy times'))
Example #25
0
class Base_2D_Grid_2_Lines(unittest.TestCase):
    """
    Base class holding setup and cleanup methods to make a 2D grid with only 2
    emission lines, and using a 2D Gaussian to make the grid.  There are only
    two lines, but one has fluxes set to all 1 and is just for normalisation.
    """
    params = ["p1", "p2"]
    param_range_dict = OD([("p1", (-5, 3)), ("p2", (1.2e6, 15e6))])
    n_gridpts_list = (11, 9)  # Number of gridpoints in each dimension
    interpd_shape = (50, 45)
    lines = ["L1", "L2"]  # Line names
    line_peaks = [8, 5]  # Gridpoint indices from zero

    @classmethod
    def setUpClass(cls):
        """ Make grid and run NebulaBayes to obtain the result object """
        line_peaks_dict = OD([(l, cls.line_peaks) for l in cls.lines])
        cls.DF = build_grid(cls.param_range_dict, line_peaks_dict,
                            cls.n_gridpts_list)
        cls.val_arrs = OD([(p, np.unique(cls.DF[p].values))
                           for p in cls.params])
        cls.DF.loc[:, "L1"] = 1.  # We'll normalise by this line
        cls.grid_file = os.path.join(TEST_DIR, cls.__name__ + "_grid.csv")
        cls.DF.to_csv(cls.grid_file, index=False)

        cls.NB_Model_1 = NB_Model(cls.grid_file,
                                  cls.params,
                                  cls.lines,
                                  interpd_grid_shape=cls.interpd_shape)

    @classmethod
    def tearDownClass(cls):
        """ Remove the output when tests in this class have finished """
        if clean_up:
            os.remove(cls.grid_file)
            if hasattr(cls, "posterior_plot"):
                os.remove(cls.posterior_plot)
def render_layer(db, layer, width, css):

    out = css["sym"]["left"]
    try:
        out += css["names"][layer]
    except KeyError:
        out += layer

    if db.is_selected(layer=layer):
        rendered_subelements = OD()
        # TODO
        if db.is_text_layer(layer):
            # in case of flat text element, just print it
            rendered_subelements[layer] = db.get_text_layer(layer)
        else:
            for f in db.get_fnames_in_layer(layer):
                render = render_fname(db, layer, f, 0, css)
                rendered_subelements[f] = render

        totalwidth = 0
        for s in rendered_subelements.values():
            totalwidth += len(s)

        #put it back together
        out += " " + " ".join(rendered_subelements.values()) + " "
        right = css["sym"]["right"]
        space = width - len(out) - len(right)
        if space >= 0:
            out += " " * space + right
        else:
            print "width %d space %d" % (width, space)
            out = out[:width - len(right)] + right
    else:
        # not selected layer
        out += css["sym"]["right"]

    return out
Example #27
0
def _eqn2stoich(eqn):
    """Convert reaction equation (a str) to stoichiometry (a mapping).
    """
    def _unpack(s):
        unpacked = filter(None, s.split(' '))  # an example of s: ' 2 ATP '
        if len(unpacked) == 1:
            sc_unsigned, spid = 1, unpacked[0]  # sc: stoichcoef
        elif len(unpacked) == 2:
            sc_unsigned, spid = eval(unpacked[0]), unpacked[1]
        else:
            raise 
        return spid, sc_unsigned
    
    # remove annotating species
    # eg, '(ST+)P->(ST+)G1P', where 'ST' (starch) is kept there to better
    # represent the chemistry
    eqn = re.sub('\(.*?\)', '', eqn)
    
    # re: '<?': 0 or 1 '<'; '[-|=]': '-' or '=' 
    subs, pros = re.split('<?[-|=]>', eqn)
    stoich = OD()
    
    if subs:
        for sub in subs.split('+'):
            subid, sc_unsigned = _unpack(sub)
            stoich[subid] = -1 * sc_unsigned
            
    if pros:
        for pro in pros.split('+'):
            proid, sc_unsigned = _unpack(pro)
            if proid in stoich:
                stoich[proid] = stoich[proid] + sc_unsigned
            else:
                stoich[proid] = sc_unsigned
        
    return stoich
Example #28
0
 def render(self, templatefilename, **kwargs):
     URL = self.absolute_reverse_url
     form = kwargs['form']
     data = OD()
     data['type'] = 'form'
     data['iuid'] = form['_id']
     data['title'] = form['title']
     data['version'] = form.get('version')
     data['description'] = form.get('description')
     data['owner'] = dict(
         email=form['owner'],
         links=dict(api=dict(href=URL('account_api', form['owner'])),
                    display=dict(href=URL('account', form['owner']))))
     data['status'] = form['status']
     data['modified'] = form['modified']
     data['created'] = form['created']
     data['links'] = dict(api=dict(href=URL('form_api', form['_id'])),
                          display=dict(href=URL('form', form['_id'])))
     data['orders'] = dict(
         count=self.get_order_count(form),
         # XXX Add API href when available.
         display=dict(href=URL('form_orders', form['_id'])))
     data['fields'] = form['fields']
     self.write(data)
def new_transaction():
    global this_transactions
    #データ不備のエラー処理
    if request.form['sender_add'] != address_u:
        return '{ "warning": "送信元が異なります。" }', 406
    if request.form['recipient_add'] == "" or request.form['amt'] == "":
        return '{ "warning": "データが不足です。" }', 406
    if int(request.form['amt']) != UTXOPoolFn(address_u):
        return '{ "warning": "金額が異なります。" }', 406
    for i in range(0, len(this_transactions)):
        if this_transactions[i]["sender"] == request.form['sender_add']:
            return '{ "warning": "この送信元はすでに受付中です。" }', 406

    new_transaction = OD([("sender", request.form['sender_add']),
                          ("recipient", request.form['recipient_add']),
                          ("amount", int(request.form['amt']))])
    """
    new_transaction = OD([("sender", "1DCtpKZebwxmtJQZBt1rLZ9AqoAL2BXsKs"),
                       ("recipient", "1DCtpKZebwxmtJQZBt1rLZ9AqoAL2BXsKs"), 
                       ("amount",3 )])
    """
    new_transaction = TxMetaDataFn(new_transaction)

    return jsonify(new_transaction), 201
Example #30
0
 def load_observable(self, item, datatag, caseid):
     datatag_mapping = {
         'urls': 'url',
         'fqdns': 'fqdn',
         'src_ip': 'ip',
         'dst_ip': 'ip',
         'hashes': 'hash',
         'src_email': 'other',
         'dst_email': 'other',
         #'email_subj' : 'other',
         'ldap_user': '******',
         'ldap_host': 'other'
     }
     self.observable = OD()
     self.observable['tlp'] = self.hivecase['tlp']
     self.observable['tags'] = [
         'autobot', "CASE-{0}".format(caseid),
         datatag.upper(), self.hivecase['tags'][1]
     ]
     self.observable['data'] = item
     self.observable['status'] = 'Ok'
     self.observable['message'] = self.hivecase['title']
     self.observable['dataType'] = datatag_mapping[datatag]
     self.observable['startDate'] = self.hivecase['startDate']