Example #1
0
def open(uri=None, mode='a'):
    """Open a Blaze object via an `uri` (Uniform Resource Identifier).

    Parameters
    ----------
    uri : str
        Specifies the URI for the Blaze object.  It can be a regular file too.

    mode : the open mode (string)
        Specifies the mode in which the object is opened.  The supported
        values are:

          * 'r' for read-only
          * 'w' for emptying the previous underlying data
          * 'a' for allowing read/write on top of existing data

    Returns
    -------
    out : an Array or Table object.

    """
    ARRAY = 1
    TABLE = 2

    if uri is None:
        source = CArraySource()
    else:
        uri = urlparse(uri)

        if uri.scheme == 'carray':
            path = os.path.join(uri.netloc, uri.path[1:])
            parms = params(storage=path)
            source = CArraySource(params=parms)
            structure = ARRAY

        if uri.scheme == 'ctable':
            path = os.path.join(uri.netloc, uri.path[1:])
            parms = params(storage=path)
            source = CTableSource(params=parms)
            structure = TABLE

        elif uri.scheme == 'sqlite':
            path = os.path.join(uri.netloc, uri.path[1:])
            parms = params(storage=path or None)
            source = SqliteSource(params=parms)
            structure = TABLE

        else:
            # Default is to treat the URI as a regular path
            parms = params(storage=uri.path)
            source = CArraySource(params=parms)
            structure = ARRAY

    # Don't want a deferred array (yet)
    # return NDArray(source)
    if structure == ARRAY:
        return Array(source)
    elif structure == TABLE:
        return NDTable(source)
Example #2
0
def open(uri=None):

    if uri is None:
        source = CArraySource()
    else:
        uri = urlparse(uri)

        if uri.scheme == 'carray':
            params(storage=uri.netloc)
            source = CArraySource(params=params)
Example #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 conv_stride=1,
                 padding=1,
                 bias=True,
                 activation_func='relu'):
        super().__init__()

        parameter = params().get_params()

        if parameter.prelu:
            activation_func = 'prelu'
        else:
            activation_func = 'relu'
        norm_layer, activation_func = get_layer_info(out_channels,
                                                     activation_func)
        if norm_layer is not None:
            self.conv = nn.Sequential(
                nn.Conv2d(in_channels,
                          out_channels,
                          stride=conv_stride,
                          kernel_size=kernel_size,
                          padding=padding,
                          bias=bias), norm_layer, activation_func)
        else:
            self.conv = nn.Sequential(
                nn.Conv2d(in_channels,
                          out_channels,
                          stride=conv_stride,
                          kernel_size=kernel_size,
                          padding=padding,
                          bias=bias), activation_func)
Example #4
0
def auto_covariance_pop(spktimes, ind_include, numspikes, dt, lags, tau, tstop,
                        trans):

    import numpy as np
    import params
    reload(params)
    par = params.params()
    N = par.N

    spk = bin_pop_spiketrain(spktimes, dt, 1, tstop, trans, ind_include)
    spk = spk[int(trans / dt):] / float(len(ind_include))

    r = np.sum(spk) / (tstop - trans)
    spk -= r

    ### compute cross-covariance for each time lag
    Nlags = lags.size
    Nt = spk.size
    auto_cov = np.zeros(Nlags, )

    for i in range(0, Nlags):
        nshift = int(lags[i])  # number of bins, and direction, to shift by
        if nshift >= 0:
            auto_cov[i] += np.dot(spk[nshift:Nt:1],
                                  np.conj(spk[0:Nt - nshift:1]))
        else:
            nshift = -nshift
            auto_cov[i] += np.conj(
                np.dot(np.conj(spk[0:Nt - nshift:1]), spk[nshift:Nt:1]))
        ### unbiased estimate of cross-correlation
        auto_cov[i] = auto_cov[i] / (Nt - np.abs(lags[i]))

    return auto_cov
Example #5
0
    def __init__(self, proxies={'http': 'http://127.0.0.1:8080',
        'https': 'http://127.0.0.1:8080'}):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.
           
        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies
        
        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.users = users(self)
Example #6
0
 def test_generate_params(self):
     print('=== Testing generate_params() ===')
     param = params.params()
     param.generate_params(config_fname)
     self.configparams_test(param)
     param.generate_params(config_fname)
     
     config = DragonflyConfig(config_fname)
     config.modify_entry('emc', 'output_folder', 'other_data/')
     config.modify_entry('emc', 'log_file', 'other_EMC.log')
     config.modify_entry('emc', 'need_scaling', '1')
     config.modify_entry('emc', 'alpha', '0.5')
     config.modify_entry('emc', 'beta', '0.5')
     config.modify_entry('emc', 'beta_schedule', '1.5 10')
     config.modify_entry('emc', 'gaussian_sigma', '1.')
     param.generate_params(config_fname)
     self.configparams_test(param, default=False)
     
     config.modify_entry('emc', 'output_folder', 'data/')
     config.modify_entry('emc', 'log_file', 'EMC.log')
     config.remove_entry('emc', 'need_scaling')
     config.remove_entry('emc', 'alpha')
     config.remove_entry('emc', 'beta')
     config.remove_entry('emc', 'beta_schedule')
     config.remove_entry('emc', 'gaussian_sigma')
Example #7
0
    def __init__(self):
        prm = params.params()
        # order_df = orderAnalysis.OA().order_df
        track_df = trackAnalysis.TA().track_df
        track_graph = trackAnalysis.TA().network

        fig = plt.figure(figsize=(15, 15), facecolor='black')
        ax = fig.add_subplot(111)
        ax.set_facecolor("k")
        ax.set_ylim((prm.latMin, prm.latMax))
        ax.set_xlim((prm.lonMin, prm.lonMax))
        ax.set_xticks([])
        ax.set_yticks([])
        ax.scatter(track_df['Lon.'],
                   track_df['Lat.'],
                   color='grey',
                   s=10,
                   alpha=1)

        fig.subplots_adjust(bottom=0)
        fig.subplots_adjust(top=1)
        fig.subplots_adjust(right=1)
        fig.subplots_adjust(left=0)

        self.background = fig
        self.network = track_graph

        track_df_mean = track_df.groupby(['xInd', 'yInd']).mean()
        self.map_df = track_df_mean
    def __init__(self):
        param = params()
        self.GetDataProject = param.coreurl + '/sys/dataproject'
        self.GetDataFile = param.coreurl + '/sys/datafile'

        self.Upload = param.coreurl + '/data/upload'
        self.Download = param.coreurl + '/data/download'
        self.GetColumn = param.coreurl + '/data/getcol'
        self.GetFileStatus = param.coreurl + '/data/getstatus'
        self.DeleteFile = param.coreurl + '/data/delete'

        self.GetImg = param.coreurl + '/viz/getimg'
        self.GetDataVizAlgoList = param.coreurl + '/viz/data/getalgo'
        self.DoDataViz = param.coreurl + '/viz/data/do'

        self.GetPreprocessAlgoList = param.coreurl + '/preprocess/getalgo'
        self.DoPreprocess = param.coreurl + '/preprocess/do'
        self.PreviewPreprocess = param.coreurl + '/preprocess/preview'
        self.GetCorrelationAlgoList = param.coreurl + '/correlation/getalgo'
        self.DoCorrelation = param.coreurl + '/correlation/do'
        self.GetAnalyticAlgo = param.coreurl + '/analytic/getalgo'
        self.GetAnalyticAlgoParam = param.coreurl + '/analytic/getparam'
        self.DeleteModel = param.coreurl + '/analytic/delete'
        self.DoModelTrain = param.coreurl + '/analytic/train'
        self.GetModelStatus = param.coreurl + '/analytic/get/status'
        self.GetModelPreview = param.coreurl + '/analytic/preview'
        self.DoModelPredict = param.coreurl + '/analytic/predict'
        self.DoModelTest = param.coreurl + '/analytic/test'
        self.GetModelParameter = param.coreurl + '/analytic/get/param'
        self.GetModelFailReason = param.coreurl + '/analytic/get/fail'
        self.StopTraining = param.coreurl + '/analytic/stop'
Example #9
0
    def test_generate_params(self):
        print('=== Testing generate_params() ===')
        param = params.params()
        param.generate_params(config_fname)
        self.configparams_test(param)
        param.generate_params(config_fname)

        config = DragonflyConfig(config_fname)
        config.modify_entry('emc', 'output_folder', 'other_data/')
        config.modify_entry('emc', 'log_file', 'other_EMC.log')
        config.modify_entry('emc', 'need_scaling', '1')
        config.modify_entry('emc', 'alpha', '0.5')
        config.modify_entry('emc', 'beta', '0.5')
        config.modify_entry('emc', 'beta_schedule', '1.5 10')
        config.modify_entry('emc', 'gaussian_sigma', '1.')
        param.generate_params(config_fname)
        self.configparams_test(param, default=False)

        config.modify_entry('emc', 'output_folder', 'data/')
        config.modify_entry('emc', 'log_file', 'EMC.log')
        config.remove_entry('emc', 'need_scaling')
        config.remove_entry('emc', 'alpha')
        config.remove_entry('emc', 'beta')
        config.remove_entry('emc', 'beta_schedule')
        config.remove_entry('emc', 'gaussian_sigma')
Example #10
0
def main(sentences,
         wordfile: str,
         weightfile: str,
         weightpara: float = 1e-3,
         rmpc: int = 1):
    # load word vectors
    (words, We) = data_io.getWordmap(wordfile)
    # load word weights
    word2weight = data_io.getWordWeight(
        weightfile,
        weightpara)  # word2weight['str'] is the weight for the word 'str'
    weight4ind = data_io.getWeight(
        words, word2weight)  # weight4ind[i] is the weight for the i-th word
    # load sentences
    x, m, _ = data_io.sentences2idx(
        sentences, words
    )  # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
    w = data_io.seq2weight(x, m, weight4ind)  # get word weights

    # set parameters
    params = params.params()
    params.rmpc = rmpc
    # get SIF embedding
    embedding = SIF_embedding.SIF_embedding(
        We, x, w, params)  # embedding[i,:] is the embedding for sentence i
Example #11
0
    def __init__(self, proxies={'http': 'http://127.0.0.1:8080',
        'https': 'http://127.0.0.1:8080'}):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.
           
        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies
        
        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.users = users(self)
Example #12
0
    def __init__(self, out_channels=3):
        super(Up_Stream, self).__init__()

        parameter = params().get_params()
        if parameter.prelu:
            activation_func = 'prelu'
        else:
            activation_func = 'relu'

        input_channel = 512
        norm_layer, activation_func = get_layer_info(input_channel,
                                                     activation_func)

        self.up_16_16_1 = Conv(input_channel,
                               256,
                               activation_func=activation_func)
        self.up_16_16_2 = Conv(768, 512, activation_func=activation_func)
        self.up_16_16_3 = Conv(1024, 512, activation_func=activation_func)

        self.up_16_32 = Up(1024, 256, activation_func=activation_func)
        self.up_32_32_1 = Conv(512, 256, activation_func=activation_func)

        self.up_32_64 = Up(512, 128, activation_func=activation_func)
        self.up_64_64_1 = Conv(256, 128, activation_func=activation_func)

        self.up_64_128 = Up(256, 64, activation_func=activation_func)
        self.up_128_128_1 = Conv(128, 64, activation_func=activation_func)

        self.up_128_256 = Up(128, 32, activation_func=activation_func)
        self.out_conv = Conv(64, out_channels, activation_func='relu')
Example #13
0
def init():
    '''
    作用:
    模型和参数初始化

    输出参数:
    logger: 日志输出器
    params: 参数集合
    vocab: Vocab类
    model: 当前使用模型
    generator: 当前使用生成器
    '''

    from logger import logger
    from params import params
    from vocab import Vocab
    from beam import Generator

    # 加载日志输出器和参数集合
    logger = logger()
    params = params()

    # 从已保存的pt文件中读取数据
    # 包括:vocab,训练集/验证集各自的输入/输出索引序列
    data = torch.load(params.temp_pt_file)
    vocab = data['vocab']
    params = data['params']

    if params.rnnsearch:
        from rnnsearch import Model
    else:
        from transformer import Model

    # 打印参数列表
    if params.print_params:
        logger.info('参数列表:{}'.format(params))

    # 定义模型
    model = Model(params, vocab).to(params.device)

    # 如果参数中设置了打印模型结构,则打印模型结构
    if params.print_model:
        logger.info(model)

    # 加载模型参数
    if os.path.exists(params.checkpoint_file):
        model_params = torch.load(params.checkpoint_file,
                                  map_location=params.device)
        model.load_state_dict(model_params)
        logger.info('正在从{}中读取已经训练好的模型参数'.format(params.checkpoint_file))
    else:
        logger.info('注意!!!没有训练好的模型参数,正在使用随机初始化模型参数进行测试')

    model.eval()

    # 定义生成器
    generator = Generator(params, model)

    return logger, params, vocab, model, generator
Example #14
0
def maintaining():
    param = params()
    check_time = datetime.now().time()
    if param.maintainBegin < param.maintainEnd:
        return (check_time >= param.maintainBegin
                and check_time <= param.maintainEnd) or param.maintaining
    else:  # crosses midnight
        return check_time >= param.maintainBegin or check_time <= param.maintainEnd or param.maintaining
Example #15
0
def tokenValidator(token):
    if token == 'testing':
        return True
    else:
        return jwt.decode(token,
                          params().secretkey,
                          audience='www.inanalysis.com',
                          algorithms=['HS256'])
Example #16
0
 def test_generate_output_dirs(self):
     print('=== Testing generate_output_dirs() ===')
     param = params.params()
     param.generate_params(config_fname)
     flist = [recon_folder+b'/data/'+d for d in [b'output', b'weights', b'orientations', b'scale', b'likelihood', b'mutualInfo']]
     [shutil.rmtree(d) for d in flist if os.path.exists(d)]
     param.generate_output_dirs()
     self.assertTrue(np.array([os.path.exists(d) for d in flist]).all())
     param.generate_output_dirs()
Example #17
0
def linear_response_fun(w, W, phi_r):
    par = params.params()
    N = par.N

    Gamma = g_fun(
        w
    ) * W  # W has already been multiplied by the gain of the rate function
    Delta = np.linalg.inv(np.eye(N) - Gamma)

    return Delta
Example #18
0
    def __init__(self):
        prm = param.params()

        df = pd.read_csv('ORDER_DATA_2.csv', header=None)
        df['Date'] = df[0].apply(lambda x: utils.parseDate(x.split(' ')[0]))
        df['UpTS'] = df.apply(
            lambda x: utils.putTimeinDate(x['Date'], utils.parseDate(x[4])),
            axis=1)
        df['CatchTS'] = df.apply(
            lambda x: utils.putTimeinDate(x['Date'], utils.parseDate(x[8])),
            axis=1)
        df['PikupTS'] = df.apply(
            lambda x: utils.putTimeinDate(x['Date'], utils.parseDate(x[9])),
            axis=1)
        df['DeliverTS'] = df.apply(
            lambda x: utils.putTimeinDate(x['Date'], utils.parseDate(x[10])),
            axis=1)
        df = df.drop([0, 4, 8, 9, 10], axis=1)
        df.columns = [
            'OrderID', 'pLon', 'pLat', 'dLon', 'dLat', 'DelivererID', 'Date',
            'UpTS', 'CatchTS', 'PickUpTS', 'DeliverTS'
        ]
        df['CatchDelay'] = (df['CatchTS'] -
                            df['UpTS']).apply(lambda x: x.seconds)
        df['PickupTravelTime'] = (df['PickUpTS'] -
                                  df['CatchTS']).apply(lambda x: x.seconds)
        df['DeliverTravelTime'] = (df['DeliverTS'] -
                                   df['PickUpTS']).apply(lambda x: x.seconds)
        df['TotalTravelTime'] = (df['DeliverTS'] -
                                 df['CatchTS']).apply(lambda x: x.seconds)
        df['p-dDistance'] = df.apply(lambda x: utils.lonlatDist(
            x['pLon'], x['pLat'], x['dLon'], x['dLat']),
                                     axis=1)

        df['TimeSlot1'] = df['UpTS'].apply(
            lambda x: utils.getTimeSlot(x, prm.MINS))
        df['TimeSlot2'] = df['CatchTS'].apply(
            lambda x: utils.getTimeSlot(x, prm.MINS))
        df['TimeSlot3'] = df['PickUpTS'].apply(
            lambda x: utils.getTimeSlot(x, prm.MINS))
        df['TimeSlot4'] = df['DeliverTS'].apply(
            lambda x: utils.getTimeSlot(x, prm.MINS))
        df = df[df['PickupTravelTime'] > 59]
        df = df[df['DeliverTravelTime'] > 59]

        df['pxInd'] = df['pLon'].apply(
            lambda x: utils.toInd(x, prm.lonMin, prm.lonTick))
        df['pyInd'] = df['pLat'].apply(
            lambda x: utils.toInd(x, prm.latMin, prm.latTick))
        df['dxInd'] = df['dLon'].apply(
            lambda x: utils.toInd(x, prm.lonMin, prm.lonTick))
        df['dyInd'] = df['dLat'].apply(
            lambda x: utils.toInd(x, prm.latMin, prm.latTick))

        self.order_df = df
Example #19
0
 def allocate_iterate(self):
     itr = iterate.iterate()
     det = detector.detector()
     dset = dataset.dataset(det)
     param = params.params()
     qmax = det.generate_detectors(config_fname)
     dset.generate_data(config_fname)
     param.generate_params(config_fname)
     dset.generate_blacklist(config_fname)
     itr.generate_iterate(config_fname, qmax, param, det, dset)
     return itr, det, dset, param, qmax
Example #20
0
def classifiableChecker(data, colType):
    try:
        if colType == 'float':
            return 0
        else:
            if len(set(data)) <= params().classifiableThreshold:
                return 1
            else:
                return 0
    except Exception as e:
        raise Exception(f"[classifiableChecker] {traceback.format_exc()}")
Example #21
0
 def allocate_iterate(self):
     itr = iterate.iterate()
     det = detector.detector()
     dset = dataset.dataset(det)
     param = params.params()
     qmax = det.generate_detectors(config_fname)
     dset.generate_data(config_fname)
     param.generate_params(config_fname)
     dset.generate_blacklist(config_fname)
     itr.generate_iterate(config_fname, qmax, param, det, dset)
     return itr, det, dset, param, qmax
Example #22
0
def matl_prop_testing(basepath):

    path_matl = basepath + 'battsimpy/data/Model_v1/Model_Pars/matl_prop.txt'
    path_des = basepath + 'battsimpy/data/Model_v1/Model_Pars/des_prop.txt'

    a = params.params()

    a.get_matl_properties(path_matl)

    print '==============================='
    print a.matl_prop
    print '==============================='
def main():
    myparams = params.params()
    mainpath = 'home/francisco/GitHub/SIF/'
    datapath = '/home/francisco/GitHub/cnn-dailymail/finished_files/chunked/'
    wordf = os.path.join(mainpath, 'data/glove.840B.300d.txt')
    weightf = os.path.join(mainpath, 'auxiliary_data/enwiki_vocab_min200.txt')
    wp = 1e-3
    rp = 0
    # Example case
    fl = ['/home/francisco/GitHub/cnn-dailymail/finished_files/chunked/train_000.bin']
    #fl = os.listdir(datapath)
    embed_sentences(datapath, wordf, weightf, wp, myparams, rp, fl)
Example #24
0
    def __init__(self):
        self.num_input_tokens = None
        self.nb_classes = 32
        self.labels = None
        self.labels_idx2word = None
        self.model = None
        self.vgg16_model = None
        self.expected_frames = None
        self.vgg16_include_top = None
        self.config = None

        self.op = params()
def g_fun(w):
    import numpy as np
    import params;
    reload(params)
    par = params.params()
    tau = par.tau

    taud = 0.

    g = np.exp(-1j * w * taud) / ((1 + 1j * w * tau) ** 2)  # alpha function

    return g
Example #26
0
 def test_generate_output_dirs(self):
     print('=== Testing generate_output_dirs() ===')
     param = params.params()
     param.generate_params(config_fname)
     flist = [
         recon_folder + b'/data/' + d for d in [
             b'output', b'weights', b'orientations', b'scale',
             b'likelihood', b'mutualInfo'
         ]
     ]
     [shutil.rmtree(d) for d in flist if os.path.exists(d)]
     param.generate_output_dirs()
     self.assertTrue(np.array([os.path.exists(d) for d in flist]).all())
     param.generate_output_dirs()
Example #27
0
def generate_vecs(models, document):
    words, weight4ind, rmpc, We = models

    x, m = data_io.sentences2idx(document, words)
    # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
    w = data_io.seq2weight(x, m, weight4ind)  # get word weights

    # set parameters
    param = params.params()
    param.rmpc = rmpc
    # get SIF embedding
    embedding = SIF_embedding.SIF_embedding(
        We, x, w, param)  # embedding[i,:] is the embedding for sentence i
    return embedding
Example #28
0
def linear_response_1loop(w, W, phi_r):
    '''
    calculate one-loop correction to the propagator around mean-field theory
    :param w: frequency
    :param W: weight matrix, weighted by postsynaptic gain
    :param phi_r: firing rates
    :return: propagator matrix
    '''

    par = params.params()

    b = par.b
    gain = par.gain
    N = par.N

    Tmax = 100
    dt_ccg = 1
    wmax = 1. / dt_ccg
    dw = 1. / Tmax

    w_calc = np.arange(-wmax, wmax, dw) * math.pi
    dw *= math.pi
    Nw = w_calc.size

    g0 = np.dot(W, phi_r) + b
    phi_1 = phi_prime(g0, gain)
    phi_1_diag = np.diag(phi_1)

    phi_2 = phi_prime2(g0, gain)
    phi_2_diag = np.diag(phi_2)

    F1 = linear_response_fun(w, np.dot(phi_1_diag, W), phi_r)
    Fbar = np.dot(g_fun(w) * W, F1)

    Fbar_int = np.zeros((N, N), dtype='complex128')

    for o in range(Nw):
        Fbar1 = np.dot(
            g_fun(w_calc[o]) * W,
            linear_response_fun(w_calc[o], np.dot(phi_1_diag, W), phi_r))
        Fbar2 = np.dot(
            g_fun(w - w_calc[0]) * W,
            linear_response_fun(w - w_calc[o], np.dot(phi_1_diag, W), phi_r))
        Fbar_int += np.dot(Fbar1 * Fbar2, np.dot(phi_1_diag, Fbar)) * dw

    linear_response_1loop = np.dot(np.dot(F1, phi_2_diag / 2.),
                                   Fbar_int) / (2 * math.pi**1)

    return linear_response_1loop
Example #29
0
def main(words_path, vectors_path, weight_path, fpc_name, test_name):
    # Loading preprocessed words, vectors and weight4ind files.
    print("loading words file...")
    words = pickle.load(open(words_path, 'rb'))
    print("loading vectors file...")
    vectors = pickle.load(open(vectors_path, 'rb'))
    print("loading weight4ind file...")
    weight4ind = pickle.load(open(weight_path, 'rb'))
        rmpc = 1
        params = params.params()
        params.rmpc = rmpc
        fpc_file = fpc_name
        test_dataset = test_name
        print("calculating sentence similarity scores, use fpc file: {}.".format(fpc_file))
        pearson, mse = eval.sim_evaluate_one(vectors, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params, fpc_file, test_dataset)
Example #30
0
def params_testing(basepath, confname):
    """
    Checks to see that the buildpars method in params will run. Does not do anything
    to ensure the correctness of the data
    """
    # Config data load
    path = basepath + 'battsimpy/' + confname
    cfr = confreader.reader(path)
    RunInput = cfr.conf_data

    Pdat = {'RunInput': RunInput}
    V_init = 4.1

    a = params.params()
    a.buildpars(V_init, Pdat)
Example #31
0
 def __init__(self,algoInfo,dataCol,fid):
     try:
         self.params=params()
         self.imgId=str(uuid.uuid1())
         self.fid=fid
         self.algoInfo=algoInfo
         self.dataCol=dataCol #{"x":"x_col","y":"y_col","value":"value_col"}
         self.data=self.getData() #{"x":np.array,"y":np.array,"all":pd.datafreame,"value":np.array}
         self.bokeh_fig=self.init_figure()
         self.imgWH=None
         self.mat_plt=None
         self.component=None
         # logging.debug(f'[dataViz] algoInfo: {self.algoInfo}')
     except Exception as e:
         raise Exception(f'[dataViz][{self.algoInfo["algoname"]}]{e}')
Example #32
0
 def __init__(self, img):
     try:
         self.params = params()
         self.imgId = str(uuid.uuid1())
         self.img = img
         self.algoInfo = {
             "algoname": "CustomModelPreview",
             "lib": "custom",
             "friendlyname": "custom"
         }
         self.bokeh_fig = self.init_figure()
         self.imgWH = None
         self.component = None
     except Exception as e:
         raise Exception(f'[analyticViz][{self.algoInfo["algoname"]}]{e}')
Example #33
0
def rates_1loop(W):
    """
    inputs: weight matrix
    calculate one-loop correction for steady-state firing rates in fluctuation expansion
    """

    import params
    reload(params)
    from phi import phi_prime
    from phi import phi_prime2

    par = params.params()
    N = par.N
    gain = par.gain
    b = par.b

    phi_r = rates_ss(W)
    Tmax = 100
    dt_ccg = 1.
    wmax = 1. / dt_ccg
    dw = 1. / Tmax

    w = np.arange(-wmax, wmax, dw) * math.pi
    dw = dw * math.pi
    Nw = w.size

    g0 = np.dot(W, phi_r) + b
    phi_1 = phi_prime(g0, gain)
    phi_1 = np.diag(phi_1)

    phi_2 = phi_prime2(g0, gain)
    Fbarsum = np.zeros((N, Nw), dtype=complex)

    for o in range(Nw):  # first compute Fbar over dummy frequency
        Fbar1 = np.dot(
            g_fun(w[o]) * W, linear_response_fun(w[o], np.dot(phi_1, W),
                                                 phi_r))
        Fbarsum[:, o] = np.dot(Fbar1 * Fbar1.conj(),
                               phi_r)  # sum over first inner vertex

    Fbarsum_int = np.sum(Fbarsum,
                         axis=1) * dw  # integrate over dummy frequency

    F1 = linear_response_fun(0., np.dot(phi_1, W), phi_r)

    r_1loop = np.dot(F1, .5 * phi_2 * Fbarsum_int) / (
        (2 * math.pi)**1)  # sum over second inner vertex
    return r_1loop
Example #34
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     
     parameter = params().get_params()
     if parameter.prelu:
         activation_func = 'prelu'
     else:
         activation_func = 'relu'
     norm_layer, activation_func = get_layer_info(out_channels)
     
     up_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
     self.up = nn.Sequential(
                 up_layer,
                 Conv(in_channels, in_channels//4),
                 norm_layer,
                 activation_func)
Example #35
0
 def __init__(self, fid, action):
     try:
         self.params = params()
         self.fid = fid
         self.action = action
         fid, self.dataType, self.path, self.numFile, status, actionFile = getFileInfo(
             self.fid)[0]
         self.colType = getColType(self.numFile, self.dataType).get()
         self.df = getDf(self.numFile, self.dataType).get()
         self.data = {}
         for c in self.colType:
             self.data[c['name']] = {
                 'colType': c['type'],
                 'classifiable': c['classifiable'],
                 'do': False
             }
             self.data[c['name']]['data'] = np.asarray(self.df[c['name']])
             #self.data[c['name']]['missingFiltering']=None
             #self.data[c['name']]['outlierFiltering']=None
             #self.data[c['name']]['normalize']=None
             #self.data[c['name']]['stringCleaning']=None
             self.data[c['name']]['do'] = False
             # self.data={"col1":{"type":"int","action":action,"data":data}}
         for c in self.action:
             if c['col'] in self.data:
                 self.data[
                     c['col']]['missingFiltering'] = c['missingFiltering']
                 self.data[
                     c['col']]['outlierFiltering'] = c['outlierFiltering']
                 self.data[c['col']]['normalize'] = c['normalize']
                 self.data[c['col']]['stringCleaning'] = c['stringCleaning']
                 #self.data[c['col']]['data']=np.asarray(self.df[c['col']])
                 self.data[c['col']]['do'] = True
         for col in self.data:
             if 'data' not in self.data[col]:
                 pass
         okCount = 0
         for k, v in self.data.items():
             if 'data' in v:
                 okCount += 1
                 break
         if okCount == 0:
             raise Exception(
                 f"[Preprocess Init] actionCol and fileCol not corresponed at all"
             )
     except Exception as e:
         raise Exception(f"[Preprocess Init]{traceback.format_exc()}")
Example #36
0
def SIFSentEmbedding(weighttxt,
                     docfile,
                     words,
                     We,
                     weight4ind,
                     weightpara=1e-3,
                     paramm=1):
    # the parameter in the SIF weighting scheme, usually in the range [3e-5, 3e-3]
    # number of principal components to remove in SIF weighting scheme
    sentences = sent_tokenize(docfile)
    x, m = data_io.sentences2idx(sentences, words)
    w = data_io.seq2weight(x, m, weight4ind)  # get word weights
    paramm = params.params()
    paramm = paramm.LC
    embedding = SIF_embedding.SIF_embedding(
        We, x, w, paramm)  # embedding[i,:] is the embedding for sentence i
    return embedding
Example #37
0
    def __init__(self, proxies=None, apikey=None):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.

        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies or {
            'http': 'http://127.0.0.1:8080',
            'https': 'http://127.0.0.1:8080'
        }
        self.__apikey = apikey

        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.authorization = authorization(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.reveal = reveal(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.stats = stats(self)
        self.users = users(self)

        # not very nice, but prevents warnings when accessing the ZAP API via https
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Example #38
0
    def __init__(self, proxies={'http': 'http://127.0.0.1:8080',
        'https': 'http://127.0.0.1:8080'}):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.
           
        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies
        
        self.acsrf = acsrf(self)
        self.ascan = ascan(self)
        self.auth = auth(self)
        self.autoupdate = autoupdate(self)
        self.context = context(self)
        self.core = core(self)
        self.params = params(self)
        self.pscan = pscan(self)
        self.search = search(self)
        self.spider = spider(self)
Example #39
0
def open(uri, mode='a',  eclass=_eclass.manifest):
    """Open a Blaze object via an `uri` (Uniform Resource Identifier).

    Parameters
    ----------
    uri : str
        Specifies the URI for the Blaze object.  It can be a regular file too.
        The URL scheme indicates the storage type:

          * carray: Chunked array
          * ctable: Chunked table
          * sqlite: SQLite table (the URI 'sqlite://' creates in-memory table)

        If no URI scheme is given, carray is assumed.

    mode : the open mode (string)
        Specifies the mode in which the object is opened.  The supported
        values are:

          * 'r' for read-only
          * 'w' for emptying the previous underlying data
          * 'a' for allowing read/write on top of existing data

    Returns
    -------
    out : an Array or Table object.

    """
    ARRAY = 1
    TABLE = 2

    uri = urlparse(uri)
    path = uri.netloc + uri.path
    parms = params(storage=path)

    if uri.scheme == 'carray':
        source = CArraySource(params=parms)
        structure = ARRAY

    elif uri.scheme == 'ctable':
        source = CTableSource(params=parms)
        structure = TABLE

    elif uri.scheme == 'sqlite':
        # Empty path means memory storage
        parms = params(storage=path or None)
        source = SqliteSource(params=parms)
        structure = TABLE

    else:
        # Default is to treat the URI as a regular path
        parms = params(storage=path)
        source = CArraySource(params=parms)
        structure = ARRAY

    # Don't want a deferred array (yet)
    # return NDArray(source)
    if structure == ARRAY:

        if eclass is _eclass.manifest:
            return Array(source)
        elif eclass is _eclass.delayed:
            return NDArray(source)

    elif structure == TABLE:

        if eclass is _eclass.manifest:
            return Table(source)
        elif eclass is _eclass.delayed:
            return NDTable(source)
Example #40
0
        return False
    raise ValueError('A type that was supposed to be boolean is not boolean.')

def learner2bool(v):
    if v is None:
        return lasagne.updates.adagrad
    if v.lower() == "adagrad":
        return lasagne.updates.adagrad
    if v.lower() == "adam":
        return lasagne.updates.adam
    raise ValueError('A type that was supposed to be a learner is not.')

random.seed(1)
np.random.seed(1)

params = params()

parser = argparse.ArgumentParser()
parser.add_argument("-LW", help="Lambda for word embeddings (normal training).", type=float)
parser.add_argument("-LC", help="Lambda for composition parameters (normal training).", type=float)
parser.add_argument("-outfile", help="Output file name.")
parser.add_argument("-batchsize", help="Size of batch.", type=int)
parser.add_argument("-dim", help="Size of input.", type=int)
parser.add_argument("-wordfile", help="Word embedding file.")
parser.add_argument("-layersize", help="Size of output layers in models.", type=int)
parser.add_argument("-updatewords", help="Whether to update the word embeddings")
parser.add_argument("-wordstem", help="Nickname of word embeddings used.")
parser.add_argument("-save", help="Whether to pickle the model.")
parser.add_argument("-train", help="Training data file.")
parser.add_argument("-margin", help="Margin in objective function.", type=float)
parser.add_argument("-samplingtype", help="Type of sampling used.")
Example #41
0
def GetLSTM():
    import sys
    sys.path.append('iclr2016/main')
    sys.path.append('iclr2016/sentiment')
    import cPickle
    import ppdb_utils
    import evaluate
    from lstm_model_sentiment import lstm_model_sentiment
    import params
    import time
    import numpy as np
    import numpy.random
    import random
    import argparse
    import lasagne
    import utils
    def str2bool(v):
        if v is None:
            return False
        if v.lower() in ("yes", "true", "t", "1"):
            return True
        if v.lower() in ("no", "false", "f", "0"):
            return False
        raise ValueError('A type that was supposed to be boolean is not boolean.')
    
    def learner2bool(v):
        if v is None:
            return lasagne.updates.adam
        if v.lower() == "adagrad":
            return lasagne.updates.adagrad
        if v.lower() == "adam":
            return lasagne.updates.adam
        raise ValueError('A type that was supposed to be a learner is not.')

    random.seed(1)
    np.random.seed(1)
    
    params = params.params()
    
    parser = argparse.ArgumentParser()
    parser.add_argument("-LW", help="Lambda for word embeddings (normal training).", type=float)
    parser.add_argument("-LC", help="Lambda for composition parameters (normal training).", type=float)
    parser.add_argument("-outfile", help="Output file name.")
    parser.add_argument("-batchsize", help="Size of batch.", type=int)
    parser.add_argument("-dim", help="Size of input.", type=int)
    parser.add_argument("-memsize", help="Size of classification layer.",
                        type=int)
    parser.add_argument("-wordfile", help="Word embedding file.")
    parser.add_argument("-layersize", help="Size of output layers in models.", type=int)
    parser.add_argument("-updatewords", help="Whether to update the word embeddings")
    parser.add_argument("-wordstem", help="Nickname of word embeddings used.")
    parser.add_argument("-save", help="Whether to pickle the model.")
    parser.add_argument("-traindata", help="Training data file.")
    parser.add_argument("-devdata", help="Training data file.")
    parser.add_argument("-testdata", help="Testing data file.")
    parser.add_argument("-peephole", help="Whether to use peephole connections in LSTM.")
    parser.add_argument("-outgate", help="Whether to use output gate in LSTM.")
    parser.add_argument("-nonlinearity", help="Type of nonlinearity in projection and DAN model.",
                        type=int)
    parser.add_argument("-nntype", help="Type of neural network.")
    parser.add_argument("-evaluate", help="Whether to evaluate the model during training.")
    parser.add_argument("-epochs", help="Number of epochs in training.", type=int)
    parser.add_argument("-regfile", help="Path to model file that we want to regularize towards.")
    parser.add_argument("-minval", help="Min rating possible in scoring.", type=int)
    parser.add_argument("-maxval", help="Max rating possible in scoring.", type=int)
    parser.add_argument("-LRW", help="Lambda for word embeddings (regularization training).", type=float)
    parser.add_argument("-LRC", help="Lambda for composition parameters (regularization training).", type=float)
    parser.add_argument("-traintype", help="Either normal, reg, or rep.")
    parser.add_argument("-clip", help="Threshold for gradient clipping.",type=int)
    parser.add_argument("-eta", help="Learning rate.", type=float)
    parser.add_argument("-learner", help="Either AdaGrad or Adam.")
    parser.add_argument("-task", help="Either sim, ent, or sentiment.")
    parser.add_argument("-numlayers", help="Number of layers in DAN Model.", type=int)
    parser.add_argument("-input", help="Fine with list of sentences to classify.")
    args = parser.parse_args(['-wordstem', 'simlex', '-wordfile', 'iclr2016/data/paragram_sl999_small.txt', '-outfile', 'gpu-lstm-model', '-dim', '300', '-layersize', '300', '-save', 'False', '-nntype', 'lstm_sentiment', '-evaluate', 'True', '-epochs', '10', '-peephole', 'True', '-traintype', 'rep', '-task', 'sentiment', '-updatewords', 'True', '-outgate', 'True', '-batchsize', '25', '-LW', '1e-06', '-LC', '1e-06', '-memsize', '300', '-learner', 'adam', '-eta', '0.001', '-regfile', 'iclr2016/sentiment_2.pickle', '-input', 'iclr2016/input.txt'])
    params.LW = args.LW
    params.LC = args.LC
    params.outfile = args.outfile
    params.batchsize = args.batchsize
    params.hiddensize = args.dim
    params.memsize = args.memsize
    params.wordfile = args.wordfile
    params.nntype = args.nntype
    params.layersize = args.layersize
    params.updatewords = str2bool(args.updatewords)
    params.wordstem = args.wordstem
    params.save = str2bool(args.save)
    params.traindata = args.traindata
    params.devdata = args.devdata
    params.testdata = args.testdata
    params.usepeep = str2bool(args.peephole)
    params.useoutgate = str2bool(args.outgate)
    params.nntype = args.nntype
    params.epochs = args.epochs
    params.traintype = args.traintype
    params.evaluate = str2bool(args.evaluate)
    params.LRW = args.LRW
    params.LRC = args.LRC
    params.learner = learner2bool(args.learner)
    params.task = args.task
    params.numlayers = args.numlayers
    params.input = args.input
    
    if args.eta:
        params.eta = args.eta
    
    params.clip = args.clip
    if args.clip:
        if params.clip == 0:
            params.clip = None
    
    params.regfile = args.regfile
    params.minval = args.minval
    params.maxval = args.maxval
    
    if args.nonlinearity:
        if args.nonlinearity == 1:
            params.nonlinearity = lasagne.nonlinearities.linear
        if args.nonlinearity == 2:
            params.nonlinearity = lasagne.nonlinearities.tanh
        if args.nonlinearity == 3:
            params.nonlinearity = lasagne.nonlinearities.rectify
        if args.nonlinearity == 4:
            params.nonlinearity = lasagne.nonlinearities.sigmoid
 
    (words, We) = ppdb_utils.getWordmap(params.wordfile)
    model = lstm_model_sentiment(We, params)
    import re
    def PredictProbaFn(X):
        preds = []
        seq1 = []
        ct = 0
        for i in X:
            p1 = i.strip()
            p1 = ' '.join(re.split('(\W+)', p1))
            X1 = evaluate.getSeq(p1,words)
            seq1.append(X1)
            ct += 1
            if ct % 100 == 0:
                x1,m1 = utils.prepare_data(seq1)
                scores = model.predict_proba(x1,m1)
                if scores.shape[0] > 1:
                    scores = np.squeeze(scores)
                preds.extend(scores.tolist())
                seq1 = []
        if len(seq1) > 0:
            x1,m1 = utils.prepare_data(seq1)
            scores = model.predict_proba(x1,m1)
            if scores.shape[0] > 1:
                scores = np.squeeze(scores)
            preds.extend(scores.tolist())
        preds = np.array(preds).reshape(-1, 1)
        return np.hstack((1 - preds, preds))
    return PredictProbaFn
Example #42
0
 def test_free_params(self):
     print('=== Testing free_params() ===')
     param = params.params()
     param.generate_params(config_fname)
     param.free_params()
     param.free_params()
def UP_FFNI(driver):

    # Uses the FFNI method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'FFNI'
    method = 4

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,w = params.params(method, nodes, inpt, stvars)    # Do I need to transpose these matrices?
    #[quadpts] = params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------

    # Create full factorial experiment from individual nodes and weights
    j = fullfact(nodes)
    pts = shape(j)[0]
    x=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            x[y][i] = node[i][j[y][i]]
            wj[y][i] = w[i][j[y][i]]
    weight = prod(wj, 1)

    if krig == 1:
        load("dmodel")
        G_s = predictor(x, dmodel)
    else:
#        G_s = zeros((pts, otpt))
#        for i in range(pts):
#            print 'Running simulation',i+1,'of',pts
#            G_s[i] = run_model(driver, x[i])
            # G_s[i] = modelica.RunModelica(x[i], modelname, properties)
        G_s = run_list(driver, x)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for k in range(otpt):
        G_mean[k] = sum(weight * G_s[:, k])
    for k in range(otpt):
        for j in range(otpt):
            covar_m[k, j] = sum(weight * (G_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Example #44
0
        self.sut.deleteBridgeInterface('br1', self.config.SUT_LAN4)
        self.sut.deleteBridgeInterface('br1', self.config.SUT_LAN6)
        self.sut.deleteBridge('br1')

        # Ensure all the interfaces are down
        self.sut.down(self.config.SUT_LAN1)
        self.sut.down(self.config.SUT_LAN2)
        self.sut.down(self.config.SUT_LAN3)
        self.sut.down(self.config.SUT_LAN4)
        self.sut.down(self.config.SUT_LAN5)
        self.sut.down(self.config.SUT_LAN6)
        self.sut.down(self.config.SUT_OPTICAL3)


if __name__ == '__main__':
    args = params.params()
    CONFIG = params.readConfig(args.config, fourPorts=False)
    SUT = sut.SUT(hostname=CONFIG.hostname, key=CONFIG.key,
                  mgmt=CONFIG.SUT_MGMT)
    SUT.cleanSystem()
    TRAFFIC = traffic.Traffic()

    if args.xml:
        testRunner = xmlrunner.XMLTestRunner(output='test-reports',
                                             verbosity=args.verbose)
    else:
        testRunner = unittest2.TextTestRunner(failfast=args.failfast,
                                              verbosity=args.verbose)
    if args.vlanfiltering:
        VLAN_FILTERING = True
Example #45
0
def loadtxt(filetxt, storage):
    """ Convert txt file into Blaze native format """
    Array(np.loadtxt(filetxt), params=params(storage=storage))
Example #46
0
def UP_UDR(driver):
# Uses the UDR method for UP

    methd = 'UDR'
    method = 5

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,weight = params.params(method, nodes, inpt, stvars)

# ----------------------  Model  ---------------------------
    # set_printoptions(precision=4)
    # set_printoptions(suppress=True)

    x = kron(mu, ones((inpt * nodes[0], 1)))
    for ii in range(0,inpt):
        k = ii * nodes[ii]
        l = (ii+1) * nodes[ii]
        x[k:l, ii] = node[ii,:]

#    G_mu = run_model(driver, mu)
    values = [mu]

    G_s = zeros((inpt,max(nodes),otpt))
    for k,inputname in enumerate(driver._json_tree['Configurations']['Configuration']['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        if krig == 1:
            load('dmodel')
            for j in  range(0,nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                G_s[k, j] = predictor(X, dmodel)
        else:
            for j in  range(0,nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
#                print 'Running simulation on node',j,'of input',inputname['Name']
#                G_s[k, j] = run_model(driver, X)
                values.append(X)

    out = iter(run_list(driver, values))
    G_mu = out.next()
    for k,inputname in enumerate(driver._json_tree['Configurations']['Configuration']['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        for j in  range(0,nodes[k]):
            G_s[k, j] = out.next()

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    G_sigma = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    gs = zeros(otpt)
    gk = zeros(otpt)
    moms = []
    for l in range(0,otpt):
        moms.append(newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]))
        G_mean[l] = moment(1, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
    for l in range(0,otpt):
        moms.append(newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]))
        G_sigma[l] = moment(2, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        gs[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        G_skew[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]) / G_sigma[l] ** 1.5
        gk[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        G_kurt[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]) / G_sigma[l] ** 2
        for j in range(l,otpt):
            covar_m[l, j] = moment2(1, inpt, nodes[0], weight, G_s[:, :, l], l, G_s[:, :, j], j, G_mu, G_mean)
            covar_m[j, l] = covar_m[l, j]

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results