Beispiel #1
0
def main(argv):
    parser = argparse.ArgumentParser(description='Create or update cfn resource schema')
    parser.add_argument('--update', action='store_true')
    parser.add_argument('--type', metavar='TYPE',
                        help='Restrict parsing resource type properties only to'
                        ' type TYPE. Example: --type AWS::ApiGateway::RestApi')
    parser.add_argument('dest', nargs='?', help='Write resulting schema into FILE'
                        ' instead of just printing it')

    args = parser.parse_args(argv[1:])

    sess = CacheControl(requests.Session(),
                        cache=FileCache('.web_cache'))
    requests.get = sess.get

    stage1 = 'resource-stage1.json'
    if args.update:
        if not args.dest:
            print >> sys.stderr, ('Error: if --update is given, `dest` must be'
                                  ' specified too')
            return 2
        stage1_schema = tools.load(stage1)
        resource_schema = tools.load(args.dest)
        resource_schema['definitions']['resource_template'] = \
            stage1_schema['definitions']['resource_template']
    else:
        resource_schema = tools.load(stage1)

    resource_type_names = tools.get_all_resource_type_names()
    tools.update_all_resource_patterns_by_name(
        resource_schema,
        resource_type_names
    )

    if args.type:
        resource_type_names = [args.type]

    for resource_type_name in resource_type_names:
        print >> sys.stderr, resource_type_name
        resource_properties.set_resource_properties(resource_schema, resource_type_name)

    del resource_schema['definitions']['resource_template']

    all_properties = resource_properties.all_res_properties()
    resource_schema['definitions']['property_types'] = all_properties
    for rpt_name, rpt_schema in all_properties.items():
        print >> sys.stderr, rpt_name
        resource_properties.set_resource_property_type_properties(
            resource_schema,
            rpt_name
        )

    tweak_resource_schema.apply_all_tweaks(resource_schema)

    if args.dest:
        tools.write(resource_schema, args.dest)
    else:
        print tools.print_(resource_schema)

    return 0
Beispiel #2
0
def main(input_dir=settings.data_dir_20ng,
         label_ratio=0.1,
         time=0,
         output_dir=None):
    logger = logging.getLogger(__name__)

    if output_dir is None:
        output_dir = input_dir
    tools.make_sure_path_exists(output_dir)
    deltas = tools.load(os.path.join(input_dir, settings.deltas_file))
    classes = tools.load(os.path.join(input_dir, settings.classes_file))
    nos, hier_tree = generate_hier_info(deltas, classes, input_dir)
    sub_dir = os.path.join(input_dir, str(label_ratio), str(time))
    output_dir = os.path.join(output_dir, str(label_ratio), str(time))
    tools.make_sure_path_exists(output_dir)

    logger.info(logconfig.key_log('Input dir', sub_dir))
    logger.info(logconfig.key_log('Output dir', output_dir))

    cat_hier_file = os.path.join(output_dir, settings.cat_hier_file)
    if not os.path.exists(cat_hier_file):
        shutil.copyfile(os.path.join(input_dir, settings.cat_hier_file),
                        os.path.join(output_dir, settings.cat_hier_file))
    if os.path.exists(os.path.join(output_dir, settings.labeled_svmlight_file)) and \
        os.path.exists(os.path.join(output_dir, settings.dataless_svmlight_file)) and \
            os.path.exists(os.path.join(output_dir, settings.test_svmlight_file)):
        return
    data_managers = load_data_managers(sub_dir)
    generate_svmlight_format(data_managers, nos[-1], output_dir)
Beispiel #3
0
def main(input_dir=settings.data_dir_20ng,
         label_ratio=0.1,
         time=0,
         sparse_format=False):
    logger = logging.getLogger(__name__)
    logger.info(logconfig.key_log(logconfig.DATA_NAME, input_dir))

    depth_files = []
    for file_name in os.listdir(input_dir):
        if file_name.startswith('depth'):
            depth_files.append(file_name)
    depth_files.sort()

    data = tools.load(os.path.join(input_dir, depth_files[-1]))
    classes = tools.load(os.path.join(input_dir, settings.classes_file))
    train_test_idx = tools.load(
        os.path.join(input_dir, settings.train_test_idx_file))
    train_idx = train_test_idx['train_idx']

    output_dir = os.path.join(input_dir, str(label_ratio), str(time))

    logger.info(
        logconfig.key_log(logconfig.FUNCTION_NAME, 'split_label_unlabel'))
    label_idx, unlabel_idx = split_label_unlabel(data,
                                                 train_idx,
                                                 classes[-1],
                                                 label_ratio,
                                                 output_dir,
                                                 seed=time)

    logger.info(logconfig.key_log(logconfig.FUNCTION_NAME, 'process_dataset'))
    [labeled_data_manager, unlabeled_data_manager, test_data_manager], vocab_info = \
        process_dataset(input_dir, output_dir, sparse_format=sparse_format)
    logger.info(
        logconfig.key_log('VocabularySize', str(len(vocab_info['stoi']))))
Beispiel #4
0
def run():
    '''
    The launcher of the app
    '''

    tools.load()
    if not settings.ENCRYPTION_KEY:
        SetEncryptionKey()
    if settings.ENCRYPTION_KEY:
        app = PasswordWallet()
        app.mainloop()
def load_data_managers(input_dir, filter_words=True):
    from build_data_managers import DataManager
    labeled_data = tools.load(os.path.join(input_dir, settings.labeled_data_manager_file))
    unlabeled_data = tools.load(os.path.join(input_dir, settings.unlabeled_data_manager_file))
    test_data = tools.load(os.path.join(input_dir, settings.test_data_manager_file))
    if filter_words:
        non_zero_indices = np.nonzero(labeled_data.xit)
        non_zero_columns = sorted(set(non_zero_indices[1]))
        labeled_data.xit = labeled_data.xit[:,non_zero_columns]
        unlabeled_data.xit = unlabeled_data.xit[:,non_zero_columns]
        test_data.xit = test_data.xit[:,non_zero_columns]
    return [labeled_data, unlabeled_data, test_data]
Beispiel #6
0
    def render(self):

        def handle(tag,*args,**keywords):
            if hasattr(self, tag):
                attr = getattr(self, tag)
                if callable(attr):
                    repl = attr(self, *args, **keywords)
                else:
                    repl = attr
                return fill('<dz:','>', repl, handle)

            if tag in helpers.__dict__ and callable(helpers.__dict__[tag]):
                """call functions in a module or module-like object"""
                helper = helpers.__dict__[tag]
                return fill('<dz:','>', helper(*args, **keywords), handle)

        def set_setting(thing, name):
            if thing=='template':
                self.template = name
            elif thing=='app_title':
                self.app_title = name
            return '<!-- %s set to "%s" -->' % (thing, name)

        def render_snippet(system_snippet, page_snippet):
            return '\n'.join(system_snippet.union(set([page_snippet])))


        DEFAULT_TEMPLATE = os.path.join(system.root,'themes','default','default.html')

        self.content = fill('<dz:set_','>',self.content,set_setting)

        self.js   = render_snippet(system.js, self.js)
        self.css  = render_snippet(system.css, self.css)
        self.head = render_snippet(system.head, self.head)
        self.tail = render_snippet(system.tail, self.tail)

        template_pathname = system.theme_path 
        if template_pathname:
            template_filename = os.path.join(template_pathname, self.template+'.html')
            if not os.path.exists(template_filename):
                if not self.template in ['index','content']:
                    log.logger.warning('template missing (%s)' % (template_filename))
                template_filename = os.path.join(template_pathname, 'default.html')
        self.tpl = template_pathname and tools.load(template_filename) or tools.load(DEFAULT_TEMPLATE)

        content = fill('<dz:','>',self.tpl,handle)
        if self.callback:
            content = fill('{{','}}',content,self.callback)

        return HTMLResponse(content)
Beispiel #7
0
  def gen_mcpar(self,fname):
    """
    this routine generates MC pars. to visualize the impact with respect to 
    original cov matrix
    """

    H0=load('data/old.hess')
    H=load('data/'+fname+'.hess')
    ndim=H0.shape[0]
    I=[i for i in range(ndim) if np.isnan(H[0][i])==False]
    nrdim=len(I)

    RH0=np.copy(H0[I,:][:,I])
    RH=np.copy(H[I,:][:,I])

    P0=self.P0[I]
    SAMPLE0 = np.random.multivariate_normal(P0,LA.inv(RH0),10000).T
    SAMPLE  = np.random.multivariate_normal(P0,LA.inv(RH0+RH),10000).T
    names=[self.order[i] for i in I]

    ncols,nrows=3,4
    py.figure(figsize=(ncols*5,nrows*3))
    for i in range(nrdim):
      print i
      ax=py.subplot(nrows,ncols,i+1)
      Range=None
      bins=100
      ax.hist(SAMPLE0[i],histtype='stepfilled',range=Range,color='Yellow',bins=bins)
      ax.hist(SAMPLE[i],histtype='stepfilled',range=Range,color='b',bins=bins)

      pmap={}
      pmap['BLNY']=r'$BLNY$'
      pmap['Nu_c']=r'$N_{u(c)}$'
      pmap['Nd_c']=r'$N_{d(c)}$'
      pmap['au_c']=r'$a_{u(c)}$'
      pmap['ad_c']=r'$a_{d(c)}$'
      pmap['bu_c']=r'$b_{u(c)}$'
      pmap['bd_c']=r'$b_{d(c)}$'
      pmap['Nu_T']=r'$N_{u(T)}$'
      pmap['Nd_T']=r'$N_{d(T)}$'
      pmap['au_T']=r'$a_{u(T)}$'
      pmap['ad_T']=r'$a_{d(T)}$'
      pmap['bu_T']=r'$b_{u(T)}$'
      pmap['bd_T']=r'$b_{d(T)}$'

      ax.text(0.1,0.8,pmap[names[i]],transform=ax.transAxes,size=20)
      #ax.legend(frameon=0,fontsize=20,loc='best')
      #ax.tick_params(axis='both', which='major', labelsize=20)
    py.tight_layout()
    py.savefig('gallery/%s.pdf'%fname)
Beispiel #8
0
def initialize(options_path: str) -> True:
    """Initialize all global variables for usage in different modules.

    Uses Munch() to give java-style access to dicts. Initializes empty files if none are found.

    Vars:
        options_path: str
        String path that points to options json file.

    Returns:
        True
    """
    global OPT
    global USR
    global GLOC
    global GLD
    global COMMANDS
    global ACHIEVES
    global IDLE

    # Load options
    OPT = tools.load(options_path)

    # Initialize guild and user files if non existent
    if Path(OPT.users_path).exists() is False:
        print("Making a new users file. I was probably just installed.")
        tools.initialize_empty(OPT.users_path)
    if Path(OPT.guilds_path).exists() is False:
        print("Making a new guilds file. I was probably just installed.")
        tools.initialize_empty(OPT.guilds_path)

    # Load locale, guild and user files
    GLOC = tools.load(OPT.locale_path)
    USR = tools.load(OPT.users_path, default=0)
    for key, user in USR.items():
        for inner_key, value in user.items():
            if inner_key == "name":
                continue
            if isinstance(value, str):
                USR[key][inner_key] = mpf(value)
    GLD = tools.load(OPT.guilds_path, default=0)
    IDLE = tools.load(OPT.idle_path)
    print("Options, locale and user files loaded. I remembered {0} users.".
          format(len(USR)))

    COMMANDS = []
    ACHIEVES = []

    return True
Beispiel #9
0
  def _get_hess_grid(self,fname):
    params=self.order

    # retrieve 
    data=load('data/%s.dchid2dxdx'%fname) 
    H={}
    for p in params:
      H[p]=data[p]['h'][0]
    
    # list or parameters
    npar=len(params)
    L=[]
    L.append('        %s'%self._get_target_par(None,None,All=True))
    for i in range(npar):
      for j in range(i,npar):
        hi=H[K[i]]
        hj=H[K[j]]

        L.append('='*100)
        L.append('h(%s)=%0.2e  h(%s)=%0.2e'%(K[i],hi,K[j],hj))
        L.append('        %s'%self._get_target_par(K[i],K[j]))

        L.append('+%s +%s %s'%(K[i],K[j],self._gen_hess_pars(hi,hj,K[i],K[j])))
        L.append('+%s -%s %s'%(K[i],K[j],self._gen_hess_pars(hi,-hj,K[i],K[j])))
        L.append('-%s +%s %s'%(K[i],K[j],self._gen_hess_pars(-hi,hj,K[i],K[j])))
        L.append('-%s -%s %s'%(K[i],K[j],self._gen_hess_pars(-hi,-hj,K[i],K[j])))

    L=[l+'\n' for l in L]
    checkdir('data')
    F=open('data/%s.hesspar'%fname,'w')
    F.writelines(L)
    F.close()
def get_vocab_info(doc, labels, train_idx, output_path, sparse_format=False):
    if os.path.exists(os.path.join(output_path, settings.vocab_file)):
        vocab_info = tools.load(os.path.join(output_path, settings.vocab_file))
        if len(vocab_info['vocab_dict']) <= settings.max_vocab_size:
            return vocab_info
    tf = Counter()
    data_doc = [doc[i] for i in train_idx]
    leaf_label = labels[-1][train_idx]
    for i, x in enumerate(data_doc):
        for word_tuple in x:
            word, frequency = word_tuple
            if sparse_format or (word not in stop_words
                                 and not word.isnumeric()):
                tf[word] += frequency

    vocab_dict = dict()
    new_tf = Counter()
    for i, v in enumerate(tf.most_common(settings.max_vocab_size)):
        vocab_dict[v[0]] = i
        new_tf[v[0]] = tf[v[0]]
    tf = new_tf
    tf["<DF>"] = len(data_doc)  # to store the number of documents
    vocab_info = {"vocab_dict": vocab_dict, "tf": tf}
    tools.save(os.path.join(output_path, settings.vocab_file), vocab_info)
    return vocab_info
Beispiel #11
0
def main():

    points, p2, rect, Trv2c, image_shape, annos = load()
    #params
    point_cloud_range = [0, -40, -3, 70.4, 40, 3]
    voxel_size = [0.16, 0.16, 2]
    max_num_points = 35
    max_voxels = 12000

    #process
    point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
    voxel_size = np.array(voxel_size, dtype=np.float32)
    grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
    # grid_size = np.round(grid_size).astype(np.int32)
    grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)
    # print(grid_size)
    #compute
    for _ in range(10):
        a = time.time()
        voxels, _, _ = points_to_voxel(points, voxel_size, point_cloud_range,
                                       max_num_points, False, max_voxels)
        b = time.time() - a
        print('frame speed: {} ms/id'.format(b * 1000))

    return voxels.reshape(-1, 4), points
Beispiel #12
0
def gen_mc_F2c():
    rnd = load('data/rand.po')
    t = load('data/predictions.po')

    #--gen MCF2c
    nsets = 97
    mcF2c = []
    for k in range(len(rnd)):
        F2c = np.copy(t[0])
        cnt = 0
        for iset in range(1, nsets, 2):
            F2c += rnd[k][cnt] * (t[iset] - t[iset + 1]) / 2
            cnt += 1
        mcF2c.append(F2c)
    mcF2c = np.array(mcF2c)
    save(mcF2c, 'data/mcF2c.po')
Beispiel #13
0
def main():

    parser = argparse.ArgumentParser(description='Process some networks')
    parser.add_argument('--epochs',
                        '-e',
                        type=int,
                        default=100,
                        help='nb epochs')
    parser.add_argument('--dir_input', default='data/out')
    parser.add_argument('--file_input',
                        default='data/out/coraux_geov2.csv',
                        help='input file')
    parser.add_argument('--save_input_csv', action='store_true')
    parser.add_argument('--do_standardization', '-s,', action='store_true')
    parser.add_argument('--do_balance_smote', '-b,', action='store_true')
    parser.add_argument('--remove_duplicate', '-d,', action='store_true')
    parser.add_argument('--run_multiple_config', '-r,', action='store_true')
    parser.add_argument('--min_sample_size', '-n', type=int, default=2000)
    parser.add_argument(
        '--filter_taxon_rank',
        default=None,
        type=str,
        choices=["species", "genus", "order", "family", "class"])
    parser.add_argument('--save_model', action='store_true')
    args = parser.parse_args()

    if (args.file_input):
        data = tools.simpleLoad(args.file_input)
        process(data, args)
    else:
        data = tools.load(args.dir_input)
        for key in data:
            process(data[key], args)
Beispiel #14
0
 def load(self, projfile=None):
     "Load existing project from pickled file"
     f = projfile or self.projFilePath
     with FileLock(f) as lock:
         d = T.load(f)
         d['log'] = logging.getLogger("Project %s" % d['projName'])
         self.__dict__.update(d)
def main(input_dir='./datasets/rcv1org',
         output_dir=settings.data_dir_20ng,
         split_randomly=True):
    logger = logging.getLogger(__name__)
    logger.info(logconfig.key_log(logconfig.DATA_NAME, input_dir))

    paths = []
    for file_name in os.listdir(input_dir):
        if file_name.endswith('filtered'):
            paths.append(os.path.join(input_dir, file_name))
    paths.sort()

    logger.info(logconfig.key_log(logconfig.FUNCTION_NAME, 'build_class_tree'))
    deltas, classes = build_class_tree(paths, output_dir)

    logger.info(logconfig.key_log(logconfig.FUNCTION_NAME, 'split'))
    if split_randomly:
        data = tools.load(
            os.path.join(output_dir, 'depth%d.txt' % (len(paths))))
        train_idx, test_idx = split_train_test(data, classes[-1],
                                               settings.train_ratio,
                                               output_dir)
    else:
        copyfile(os.path.join(input_dir, 'train_test_idx.npz'),
                 os.path.join(output_dir, 'train_test_idx.npz'))

    logger.info(
        logconfig.key_log(logconfig.FUNCTION_NAME, 'generate_hier_info'))
    generate_hier_info(deltas, classes, output_dir)
Beispiel #16
0
    def test_TrajCluster(self):
        """TrajCluster test"""
        from Biskit.EnsembleTraj import traj2ensemble

        traj = T.load(T.testRoot() + '/lig_pcr_00/traj.dat')

        traj = traj2ensemble(traj)

        aMask = traj.ref.mask(lambda a: a['name'] in ['CA', 'CB', 'CG'])

        traj = traj.thin(1)

        traj.fit(aMask, verbose=self.local)
        self.tc = TrajCluster(traj, verbose=self.local)

        ## check how many clusters that are needed with the given criteria
        n_clusters = self.tc.calcClusterNumber(min_clst=3,
                                               max_clst=15,
                                               rmsLimit=0.7,
                                               aMask=aMask)

        ## cluster
        self.tc.cluster(n_clusters, aMask=aMask)

        if self.local:
            member_frames = self.tc.memberFrames()

            print 'There are %i clusters where the members are:' % n_clusters
            for i in range(n_clusters):
                print 'Cluster %i (%i members): %s' % (
                    i + 1, len(member_frames[i]), member_frames[i])
Beispiel #17
0
    def test_TrajCluster(self):
        """TrajCluster test"""
        from Biskit.EnsembleTraj import traj2ensemble

        traj = T.load( T.testRoot()+'/lig_pcr_00/traj.dat')

        traj = traj2ensemble( traj )

        aMask = traj.ref.mask( lambda a: a['name'] in ['CA','CB','CG'] )

        traj = traj.thin( 1 )

        traj.fit( aMask, verbose=self.local )
        self.tc = TrajCluster( traj, verbose=self.local )

        ## check how many clusters that are needed with the given criteria
        n_clusters = self.tc.calcClusterNumber( min_clst=3, max_clst=15,
                                                rmsLimit=0.7, aMask=aMask )

        ## cluster
        self.tc.cluster( n_clusters, aMask=aMask )

        if self.local:
            member_frames = self.tc.memberFrames()

            print 'There are %i clusters where the members are:'%n_clusters
            for i in range(n_clusters):
                print 'Cluster %i (%i members): %s'%( i+1,
                                                      len(member_frames[i]),
                                                      member_frames[i] )
Beispiel #18
0
def get_hier_info(input_dir):
    classes = tools.load(os.path.join(input_dir, settings.classes_file))
    hierarchy_file = os.path.join(input_dir, settings.cat_hier_file)
    max_depth = len(classes)

    hier_tree = dict()
    class2no = dict()
    class_set = set()
    hier_tree[0] = 0
    class_set.add("Root")
    class2no["Root"] = 0
    class_cnt = 1
    for depth in range(max_depth):
        for c in classes[depth]:
            if c not in class_set:
                hier_tree[class_cnt] = 0
                class_set.add(c)
                class2no[c] = class_cnt
                class_cnt += 1
    with open(hierarchy_file, "r") as f:
        line = f.readline()
        while line:
            line = line.split()
            hier_tree[int(line[1])] = int(line[0])
            line = f.readline()

    nos = []
    for depth in range(max_depth):
        nos.append([])
        for i in range(len(classes[depth])):
            nos[depth].append(class2no[classes[depth][i]])
    return nos, hier_tree
Beispiel #19
0
def gen_mc_glue(Q2=2.0):
    rnd = load('data/rand.po')
    t = load('data/hess_glue_%.2f.po' % Q2)

    #--gen MCF2c
    nsets = 97
    mcglue = []
    for k in range(len(rnd)):
        g = np.copy(t[0])
        cnt = 0
        for iset in range(1, nsets, 2):
            g += rnd[k][cnt] * (t[iset] - t[iset + 1]) / 2
            cnt += 1
        mcglue.append(g)
    mcglue = np.array(mcglue)
    save(mcglue, 'data/mcglue_%.2f.po' % Q2)
Beispiel #20
0
def get_vocab_info(doc, train_idx, min_freq, max_vocab_size, n_gram, dir_path):
    if os.path.exists(os.path.join(dir_path, settings.vocab_file)):
        vocab_info = tools.load(os.path.join(dir_path, settings.vocab_file))
        if vocab_info['min_freq'] == min_freq and vocab_info['max_vocab_size'] == max_vocab_size \
            and vocab_info['df'] == len(train_idx) and vocab_info['n_gram'] == n_gram:
            return vocab_info
    tf = Counter()
    data_doc = [doc[i] for i in train_idx]
    for i, doc in enumerate(data_doc):
        for tf_tuple in doc:
            term, frequency = tf_tuple
            tf[term] += frequency
    tf_tuples = sorted(tf.items(), key=lambda x:
                       (-x[1], x[0]))  # sort by frequency, then alphabetically
    stoi = dict()
    itos = dict()
    valid_term = 0
    for term, freq in tf_tuples:
        if freq >= min_freq and valid_term < max_vocab_size:
            stoi[term] = valid_term
            itos[valid_term] = term
            valid_term += 1
        else:
            tf.pop(term)
    vocab_info = {
        "min_freq": min_freq,
        "max_vocab_size": max_vocab_size,
        'n_gram': n_gram,
        "stoi": stoi,
        "itos": itos,
        "tf": tf,
        'df': len(train_idx)
    }
    tools.save(os.path.join(dir_path, settings.vocab_file), vocab_info)
    return vocab_info
Beispiel #21
0
    def load_storage(self, fname):

        # create storage if not created
        if not os.path.exists(fname):
            #print fname+' is been created !'
            self.storage = {}
        else:
            self.storage = load(fname)
Beispiel #22
0
async def loadall():
	global authinfo
	global mods
	global localdict
	global localchannel
	global systemchannel
	global commands
	
	authinfo = tools.load('authinfo.json')
	mods = tools.load('mods.json')
	localdict = tools.load('localdict.json')
	commands = tools.load('commands.json')
	
	localchannel = client.get_channel(localdict['localchannel'])
	systemchannel = client.get_channel(localdict['systemchannel'])
	
	await say(systemchannel, 'reloaded configs')
Beispiel #23
0
def gen_hess_error():
    t = load('data/predictions.po')
    nsets = 97
    err2 = np.zeros(t[0].size)
    for iset in range(1, nsets, 2):
        err2 += (t[iset] - t[iset + 1])**2 / 4.0
    t['hess'] = err2**0.5
    save(t, 'data/hess.po')
Beispiel #24
0
def main(argv):
    sess = CacheControl(requests.Session(),
                        cache=FileCache('.web_cache'))
    requests.get = sess.get

    schema = tools.load('schema.json')
    schema['definitions']['Parameter']['properties'] = parse_parameters()
    tools.write(schema, 'schema.json')
Beispiel #25
0
 def load(self, sysfile=None):
     "Load existing project from pickled file"
     f = sysfile or self.sysFilePath
     if not osp.exists(f): raise BadFile, "File %s not found." % f
     with FileLock(f) as lock:
         d = T.load(f)
         #            d['log'] = logging.getLogger("System (%s)"%d['name'])
         self.__dict__.update(d)
Beispiel #26
0
def pinger_ping(request, pinger_class):
    resp, status = '', 200
    try:
        pinger = load(pinger_class)
        resp = pinger().ping()
    except WrongPinger:
        status = 406
    return HttpResponse(resp, status=status)
Beispiel #27
0
  def gen_hess_par(self,fname):

    H0=load('data/old.hess')
    H=load('data/'+fname+'.hess')
    ndim=H0.shape[0]
    I=[i for i in range(ndim) if np.isnan(H[0][i])==False]
    nrdim=len(I)
 
    RH0=np.copy(H0[I,:][:,I])
    RH=np.copy(H[I,:][:,I])

    HTOT=RH0+RH
    W,V=LA.eig(HTOT)
    V=np.transpose(V)
    for i in range(W.size): 
      if W[i]<0: continue
      V[i]/=W[i]**0.5

    L=[]

    h='%5s'%''
    for name in self.order: h+='%15s'%name
    L.append(h)
    print h
    

    P0=np.copy(self.P0)
    for i in range(W.size):
      if W[i]<0: continue
      PP=np.copy(P0)
      PP[I]+=V[i]
      PM=np.copy(P0)
      PM[I]-=V[i]
      lp='+%5d'%i
      for p in PP: lp+='%15.5e'%p
      lm='-%5d'%i
      for p in PM: lm+='%15.5e'%p
      L.append(lp)
      L.append(lm)
      print lp
      print lm

    L=[l+'\n' for l in L]
    F=open('data/'+fname+'.eig','w')
    F.writelines(L)
    F.close()
Beispiel #28
0
 def load(self, replfile=None):
     "Load existing project from pickled file"
     f = replfile or self.replFilePath
     if not osp.exists(f): raise BadFile, "File %s not found." % f
     with FileLock(f) as lock:
         d = T.load(f)
         d['log'] = logging.getLogger("Replica (%s)" % d['name'])
         d['grids'] = None
         if not d.has_key('prod_steps'):
             d['prod_steps'] = d['nvt_prod_steps']
         self.__dict__.update(d)
Beispiel #29
0
    def loadTraj(self, ftraj):
        """
        Load trajectories from disc.

        @param ftraj: path to trajectory
        @type  ftraj: str

        @return: ensemble trajectory object
        @rtype: EnsembleTraj
        """
        t = T.load(T.absfile(ftraj))
        return traj2ensemble(t)
Beispiel #30
0
    def loadTraj( self, ftraj ):
        """
        Load trajectories from disc.

        @param ftraj: path to trajectory
        @type  ftraj: str

        @return: ensemble trajectory object
        @rtype: EnsembleTraj
        """
        t = T.load( T.absfile( ftraj ) )
        return traj2ensemble( t )
Beispiel #31
0
def gen_weights():
    rnd = load('data/rand.po')
    t = load('data/predictions.po')
    mcF2c = load('data/mcF2c.po')

    #--get absolute simulated errors
    t['alpha'] = t[0] * t['relerr']

    #--gen chi2
    chi2 = []
    for F2c in mcF2c:
        exp = np.copy(t[0])
        res = (exp - F2c) / t['alpha']
        chi2.append(np.sum(res**2))
    chi2 = np.array(chi2)
    dchi2 = chi2 - t[0].size

    #--gen weights
    weights = np.exp(-0.5 * dchi2)
    norm = np.sum(weights)
    weights /= norm

    save(weights, 'data/weights.po')
Beispiel #32
0
def main(argv):
    sess = CacheControl(requests.Session(),
                        cache=FileCache('.web_cache'))
    requests.get = sess.get
    resource_schema = tools.load(sys.argv[1])

    apply_all_tweaks(resource_schema)

    if len(argv) == 3 and argv[2].endswith('json'):
        tools.write(resource_schema, argv[1])
    else:
        print tools.print_(resource_schema)

    return 0
Beispiel #33
0
    def __init__(self, parent, controller):
        Frame.__init__(self, parent)

        # Create the top menu
        menu = Menu(self)
        controller.configure(menu=menu)

        # Define the file section of the top menu
        file_menu = Menu(menu, tearoff=False)
        menu.add_cascade(label='File', menu=file_menu)

        # Add the options to the file menu
        file_menu.add_command(label='Move password',
                              command=lambda: tools.mv_file())
        file_menu.add_command(label='Make backup',
                              command=lambda: tools.make_bk())
        file_menu.add_command(label='Burn file')
        file_menu.add_command(label='Reload', command=lambda: tools.load())
        file_menu.add_separator()
        file_menu.add_command(label='Preferences')
        file_menu.add_separator()
        file_menu.add_command(label='Exit', command=lambda: secure_exit())

        # Define the tools section of the top menu
        file_menu = Menu(menu, tearoff=False)
        menu.add_cascade(label='Tools', menu=file_menu)

        # Add the options to the tools menu
        file_menu.add_command(label='Generate password')
        file_menu.add_command(label='Test password',
                              command=lambda: TestPassword())
        file_menu.add_command(label='Add password',
                              command=lambda: AddPassword())
        file_menu.add_command(
            label='Edit password',
            command=lambda: controller.show_frame(EditPassword))
        file_menu.add_separator()
        file_menu.add_command(label='Find')
        file_menu.add_separator()
        file_menu.add_command(label='Decrypt/Encrypt')

        # Define the info section of the top menu
        file_menu = Menu(menu, tearoff=False)
        menu.add_cascade(label='Info', menu=file_menu)

        # Add the options to the tools menu
        file_menu.add_command(label='Version')
        file_menu.add_command(label='Developer')
        file_menu.add_command(label='Security')
Beispiel #34
0
def reload_users() -> True:
    """Forces dictionary reload for users from file.

    Returns:
        True
    """
    global USR
    USR = tools.load(OPT.users_path, default=0)
    for key, user in USR.items():
        for inner_key, value in user.items():
            if inner_key == "name":
                continue
            if isinstance(value, str):
                USR[key][inner_key] = mpf(value)
    return True
Beispiel #35
0
  def _plot_hess_h(self,fname):
    print 'plotting ',fname

    data=load('data/%s.dchid2dxdx'%fname) 

    ncols,nrows=4,4
    py.figure(figsize=(ncols*5,nrows*3))
    
    # list or parameters
    K=['BLNY','Nu_c','Nd_c','au_c','ad_c','bu_c','bd_c','Nu_T','Nd_T','au_T','ad_T','bu_T','bd_T']

    cnt=1
    for k in K:
      ax=py.subplot(nrows,ncols,cnt)

      h=data[k]['h']  
      dchi2dxdx=data[k]['dchi2dxdx']  
    
      ax.plot(h,dchi2dxdx,'b-o')
      ax.set_xlabel(r'$h$')
      ax.set_ylabel(r'$\partial \chi^2/\partial^2 p$')
      ax.semilogx()
      ax.set_title(r'${\rm %s}$'%k.replace('_',' '))
      cnt+=1

      I=np.argsort(h)

      #if fname=='clas_p_pim':
      if k=='BLNY':i=I[-1]  
      if k=='Nu_c':i=I[-1]
      if k=='Nd_c':i=I[-1]
      if k=='au_c':i=I[-1]
      if k=='ad_c':i=I[-1]
      if k=='bu_c':i=I[-1]  
      if k=='bd_c':i=I[-1]  
      if k=='Nu_T':i=I[-1]
      if k=='Nd_T':i=I[-1]
      if k=='au_T':i=I[-5]
      if k=='ad_T':i=I[-4]
      if k=='bu_T':i=I[-5]
      if k=='bd_T':i=I[-5]


      ax.axvline(h[i],color='r',ls='--')
    py.tight_layout()
    checkdir('gallery')
    py.savefig('gallery/hess-%s.pdf'%fname)
Beispiel #36
0
    def test_Pymoler(self):
        """Pymoler test"""
        self.traj = T.load( T.testRoot() + '/lig_pcr_00/traj.dat' )

        self.pm = Pymoler( full=0, verbose=self.local )
        
        mname = self.pm.addMovie( [ self.traj[i] for i in range(0,100,20) ] )

        sel = self.pm.makeSel({'residue':29})
##         self.pm.add('show stick, %s' % sel)
        self.pm.add('show surface, %s' % sel)

        self.pm.add('mplay')
        
        if not self.local:
            self.pm.add('quit')
            
        self.pm.run() ## old style call "pm.show()" also works
    
        self.assert_( self.pm.pid is not None )
Beispiel #37
0
def benchmarks_impl(i, OpType, nsamples, init_cuda, data, progress):
    # Initialize CUDA context
    device, ctx, stream = init_cuda()
    # Process-specific seed
    np.random.seed(int(time()) + i)
    # Retrieve saved data
    arch = 'sm_' + '_'.join(map(str, device.compute_capability))
    path = mkdir('save/{}/{}/'.format(OpType.id, arch)) + 'data{}.npz'.format(i)
    X, Y = load(path, [('X', OpType.Nparams), ( 'Y', 1)])
    # Do not update/realloc X, Y at each iteration
    step = 200
    bufX, bufY = np.empty((step, X.shape[1])), np.empty((step, Y.shape[1]))
    #Generate data
    nvalid = X.shape[0]    
    progress[i] = min(nsamples,nvalid)        
    while nvalid < nsamples:
        P = generate_valid(OpType, device)
        for params in P:
            #print(params)
            sys.stdout.flush()
            try:
                y = evaluate(OpType, ctx, stream, params)
            except:
                print('Exception for', params)
                pass
            bufX[nvalid % step, :] = params
            bufY[nvalid % step, :] = y
            # Save
            nvalid += 1
            if nvalid % step == 0:
                X = np.vstack((X, bufX))
                Y = np.vstack((Y, bufY))
                np.savez(path, X=X, Y=Y)
            # Update progress
            progress[i] = min(nsamples,nvalid)
            if nvalid > nsamples:
                break
    data[i] = (X, Y)
Beispiel #38
0
    def test_Trajectory(self):
        """Trajectory test"""
##         f = T.testRoot() + '/lig_pc2_00/pdb/'
##         allfiles = os.listdir( f )
##         pdbs = []
##         for fn in allfiles:
##             try:
##                 if (fn[-7:].upper() == '.PDB.GZ'):
##                     pdbs += [f + fn]
##             except:
##                 pass

##         ref = pdbs[0]
##         traj = Trajectory( pdbs[:3], ref, rmwat=0 )

        ## Loading
        self.traj = T.load(T.testRoot() + '/lig_pcr_00/traj.dat')

        ## sort frames after frameNames
        self.traj.sortFrames()

        ## sort atoms 
        self.traj.sortAtoms()

        ## remove waters
        self.traj = self.traj.compressAtoms(
            N.logical_not( self.traj.ref.maskH2O()) )

        ## get fluctuation on a residue level
        r1 = self.traj.getFluct_local( verbose=self.local )

        ## fit backbone of frames to reference structure
        self.traj.fit( ref=self.traj.ref,
                       mask=self.traj.ref.maskBB(), verbose=self.local )

        self.assertAlmostEqual( N.sum( self.traj.profile('rms') ),
                                58.101235746353879, 2 )
Beispiel #39
0
    def render(self):

        def handle(tag, *args, **keywords):

            # first try filling tag with page attributes
            if hasattr(self, tag):
                attr = getattr(self, tag)
                if callable(attr):
                    repl = attr(self, *args, **keywords)
                else:
                    repl = attr
                return fill('<dz:','>', repl, handle)

            # if that doesn't work look for an app helper
            elif tag in system.app.helpers:
                helper = system.app.helpers[tag]

            # if that doesn't work look for a system helper
            elif tag in system.helpers:
                helper = system.helpers[tag]

            # if that doesn't work look in the helpers module
            elif tag in helpers.__dict__ and callable(helpers.__dict__[tag]):
                """call functions in a module or module-like object"""
                helper = helpers.__dict__[tag]

            else:
                helper = None

            if helper:
                if callable(helper):
                    repl = helper(*args, **keywords)
                else:
                    repl = helper

                return fill('<dz:','>', repl, handle)

                

        def set_setting(thing, name):
            if thing=='template':
                self.template = name
            elif thing=='app_title':
                self.app_title = name
            return '<!-- %s set to "%s" -->' % (thing, name)

        def render_snippet(system_snippet, page_snippet):
            return '\n'.join(system_snippet | OrderedSet([page_snippet]))

        def render_script_tags(system_scripts, page_scripts):
            scripts = system_scripts | page_scripts
            h = scripts and '\n        <!-- Page Specific Scripts -->\n' or ''
            c = '\n'.join('        <script type="text/javascript" src="{}"></script>'.format(t) for t in scripts)
            t = scripts and '\n\n' or ''
            return h + c + t

        def render_style_sheets(system_style_sheets, page_style_sheets):
            sheets = system_style_sheets | page_style_sheets
            h = sheets and '\n        <!-- Page Specific Styles -->\n' or ''
            c = '\n'.join('        <link rel="stylesheet" type="text/css" href="{}">'.format(t) for t in sheets)
            t = sheets and '\n\n' or ''
            return h + c + t

        DEFAULT_TEMPLATE = os.path.join(system.root,'themes','default','default.html')

        self.theme = self.theme or system.app.theme or user.theme or system.theme
        if self.theme != system.theme:
            system.set_theme(self.theme)

        self.content = fill('<dz:set_','>', self.content, set_setting)

        self.styles = render_style_sheets(system.styles, self.styles)
        self.css    = render_snippet(system.css, self.css)
        self.libs   = render_script_tags(system.libs, self.libs)
        self.js     = render_snippet(system.js, self.js)
        self.head   = render_snippet(system.head, self.head)
        self.tail   = render_snippet(system.tail, self.tail)

        if len(route)>1:
            breadcrumb = link_to(system.app.title,'/'+system.app.name)
        else:
            breadcrumb = ''

        template_pathname = system.theme_path
        if template_pathname:
            template_filename = os.path.join(template_pathname, self.template+'.html')
            if not os.path.exists(template_filename):
                if not self.template in ['index','content']:
                    log.logger.warning('template missing (%s)' % (template_filename))
                template_filename = os.path.join(template_pathname, 'default.html')
        self.tpl = template_pathname and tools.load(template_filename) or tools.load(DEFAULT_TEMPLATE)

        page_header = self.render_header()
        save_content = self.content
        self.content = page_header + self.content
        save_title = self.title
        del self.title
        content = fill('<dz:','>', self.tpl, handle)
        self.title = save_title
        self.content = save_content
        if self.callback:
            content = fill('{{','}}', content, self.callback)

        return HTMLResponse(content)
Beispiel #40
0
def main():
    U, C, S = load('cornu.dat')
    cornu_plot(C, S)
#!/usr/bin/env python
import sys
import val
import tools

schema = tools.load('schema.json')
template = tools.load(sys.argv[1])
val.val(template, schema)
def plot_length(D):
    x, mags, phase = load('diffraction_%scm.dat' % D)
    combined_plot(x, mags, phase, D)
Beispiel #43
0
 def load_storage(self, fname):
     if not os.path.exists(fname):
         self.storage = {}
     else:
         self.storage = load(fname)
Beispiel #44
0
            if plot_fits:
                fig = plt.figure()
                ax = fig.add_subplot(111)
                xx = np.arange(min(x), max(x))
                yy = logistic(popt, xx)
                ax.plot(x, y, xx, yy)
                plt.show()
            
        except RuntimeError:
            offsets[group] = [0]

    return zip(*sorted(offsets.iteritems()))

if __name__ == '__main__':
    if len(sys.argv) < 2:
        print 'Usage:', sys.argv[0], '<trigger scan file> [start_nhit end_nhit]'
        sys.exit(1)

    if len(sys.argv) == 4:
        xf = lambda x: x > float(sys.argv[2]) and x < float(sys.argv[3])
    else:
        xf = lambda x: True

    zf = lambda x: x < 1.0e6

    data = tools.load(sys.argv[1], xfilt=xf, zfilt=zf)

    linear_fit_plot(*make_nhit_vs_adc(data))