예제 #1
0
    def __init__(self, samp0_is_population=None, samp1_is_population=None):
        self.samp0 = Sample(title='samp0', is_population=samp0_is_population)
        """The first sample, is usually the population, or the pre-test sample"""

        self.samp1 = Sample(title='samp1', is_population=samp1_is_population)
        """The second sample, the post-test sample."""

        # Parameters
        self.alpha = 0.05
        """Requested confidence level"""

        self.dir = None  # see StatTool.xxx_TEST constants
        """Directionality of the test."""

        # Expected difference
        self.expected_difference = 0.0
        """The expected difference between the two sample means. 
        
        For two tailed test, usually we write the hypothesis as μ1 != μ2.
        This can be rewritten as μ1 - μ2 != 0. And actually the general 
        expression is μ1 - μ2 != expected_difference.
        """

        # Description
        self.treatment_title = "treatment"
        """The name of the treatment"""

        self.results_title = "results"
        """The name of the dependent variable"""
예제 #2
0
    def create_deck(self):
        '''Lesson4-1
        '''
        # カウント用変数初期化
        i = 0
        j = 0

        # デッキ用リストを初期化
        DECK = []

        try:
            # スーツをリスト化
            Suits = Sample().get_suits()
            # ランクをリスト化
            Ranks = Sample().get_ranks()

            # カードを設定
            while i < 4:
                while j < 13:
                    Sample().set_trumpcard(Ranks[j], Suits[i])
                    DECK.append([Ranks[j], Suits[i]])
                    j += 1
                i += 1
                j = 0

            # デッキを設定
            Sample().set_deck(DECK)

            # 設定
            self.deck = DECK

        except ValueError:
            # 強制終了
            sys.exit()
예제 #3
0
    def initWithoutDBScan(self):

        sample = Sample(self.buffer.iloc[0].values, 0)
        sample.set_timestamp(1)

        mc = MicroCluster(1, self.lamb, self.pMicroCluster.N + 1)

        maxEpsilon = 0

        for sampleNumber in range(0, len(self.buffer)):
            sample = Sample(self.buffer.iloc[sampleNumber].values,
                            sampleNumber)
            sample.set_timestamp(sampleNumber + 1)
            mc.insert_sample(sample)

            if mc.radius > maxEpsilon:
                maxEpsilon = mc.radius
                # print 'New max: {}'.format(mc.radius)

        self.pMicroCluster.insert(mc)

        if isinstance(self.epsilon, str):
            if self.epsilon == 'auto':
                self.epsilon = self.pMicroCluster.clusters[
                    0].radius * self.radiusFactor
                self.epsilon = maxEpsilon
예제 #4
0
    def initialDBScanSciLearn(self):

        db = DBSCAN(eps=8, min_samples=self.minPts,
                    algorithm='brute').fit(self.buffer)
        clusters = db.labels_
        self.buffer['clusters'] = clusters

        clusterNumber = np.unique(clusters)

        for clusterId in clusterNumber:

            if (clusterId != -1):

                cl = self.buffer[self.buffer['clusters'] == clusterId]
                cl = cl.drop('clusters', axis=1)

                sample = Sample(cl.iloc[0].tolist())

                mc = MicroCluster(sample, self.currentTimestamp, self.lamb)

                for sampleNumber in range(len(cl[1:])):
                    sample = Sample(cl.iloc[sampleNumber].tolist())
                    mc.insertSample(sample, self.currentTimestamp)

                self.pMicroCluster.insert(mc)
예제 #5
0
    def apply_action(self, action):
        """Apply the action to the grid.

        If left is applied then the occupied state index will decrease by 1.
        Unless the agent is already at 0, in which case the state will not
        change.

        If right is applied then the occupied state index will increase by 1.
        Unless the agent is already at num_states-1, in which case the state
        will not change.

        The reward function is determined by the reward location specified when
        constructing the domain.

        If failure_probability is > 0 then there is the chance for the left
        and right actions to fail. If the left action fails then the agent
        will move right. Similarly if the right action fails then the agent
        will move left.

        Parameters
        ----------
        action: int
            Action index. Must be in range [0, num_actions())

        Returns
        -------
        sample.Sample
            The sample for the applied action.

        Raises
        ------
        ValueError
            If the action index is outside of the range [0, num_actions())

        """
        if action < 0 or action >= self.num_actions():
            raise ValueError('Action index outside of bounds [0, %d)' %
                             self.num_actions())

        new_location = self.next_location(self._state[0], action)

        # in the case of failing action
        if new_location == self._state[0] or random() > self.transition_probabilities[new_location]:
            return Sample(self._state.copy(), action, 0., self._state.copy())

        next_state = np.array([new_location])

        if self.reward_location == new_location:
            reward = 100.
            absorb = True
            sample = Sample(self._state.copy(), action, reward, next_state.copy(), absorb)
            self.reset(self.initial_state)
        else:
            absorb = False
            reward = 0.
            sample = Sample(self._state.copy(), action, reward, next_state.copy(), absorb)
            self._state = next_state

        return sample
def computeSNVTreeError(snvMatrix, cMatrix, lafMatrix, realTree):
    sampleNum = snvMatrix.shape[1]

    cObjMatrix = np.empty(cMatrix.shape, dtype=object)
    for row in range(0, cMatrix.shape[0]):
        for col in range(0, cMatrix.shape[1]):
            currentC = cMatrix[row][col]
            cObj = C([
                2, int(currentC)
            ], [])  #empty vector to stop initialization of allele combinations
            dummyCMu = DummyCMu()
            dummyCMu.c = cObj
            cObjMatrix[row][col] = dummyCMu

    [chromosomes, positions, variantIndices] = obtainSomaticVariantIndices()
    #print variantIndices

    #Compute the distance pairwise between samples
    distanceMatrix = np.empty([sampleNum, sampleNum], dtype=float)

    for sample1 in range(0, sampleNum):
        for sample2 in range(0, sampleNum):

            #Make the sample objects. These now need somatic variants and a CMu
            sample1Obj = Sample(None, None)
            sample1Obj.bestCMu = cObjMatrix[:, sample1]
            #the dummy c mu is actually a list of dummy c mu's, so we need to make one for each c
            #dummyCMu = DummyCMu()
            #dummyCMu.c = cObjMatrix[:,sample1]
            #sample1Obj.bestCMu = dummyCMu
            sample1Obj.somaticVariants = snvMatrix[:, sample1]
            sample1Obj.somaticVariantsInd = variantIndices
            sample1Obj.measurements = LAF(lafMatrix[:, sample1], chromosomes,
                                          positions, positions)
            sample2Obj = Sample(None, None)
            #dummyCMu = DummyCMu()
            #dummyCMu.c = cObjMatrix[:,sample2]
            sample2Obj.bestCMu = cObjMatrix[:, sample2]
            sample2Obj.somaticVariants = snvMatrix[:, sample2]
            sample2Obj.somaticVariantsInd = variantIndices
            sample2Obj.measurements = LAF(lafMatrix[:, sample2], chromosomes,
                                          positions, positions)
            #The distance can be computed for the entire column at once using the FST
            [
                messages, dist
            ] = SomaticVariantDistance().computeDistanceBetweenSomaticVariants(
                sample1Obj, sample2Obj, sample1, sample2)

            distanceMatrix[sample1, sample2] = dist

    #Compute the MST
    fullGraph = generateInitialTree(distanceMatrix, realTree.vertices)
    mst = computeMST(fullGraph, realTree.vertices)
    simulationErrorHandler = SimulationErrorHandler()
    treeScore = simulationErrorHandler.computeTreeError([mst], realTree)
    return treeScore
예제 #7
0
def initWellKnownSamples():
    global WATER, SSDDIL, BLEACH
    WATER = Sample("Water", WATERLOC, -1, None, 50000)
    SSDDIL = Sample("SSDDil", SSDDILLOC, -1, None, 50000)
    BLEACH = Sample("RNase-Away",
                    BLEACHLOC,
                    -1,
                    None,
                    50000,
                    mixLC=LCBleachMix)
def computeATreeError(aMatrix, lafMatrix, afMatrix, realTree):
    sampleNum = aMatrix.shape[1]

    aObjMatrix = np.empty(aMatrix.shape, dtype=object)
    #Convert the a matrix to an actual allele matrix
    for row in range(0, aMatrix.shape[0]):
        for col in range(0, aMatrix.shape[1]):
            allele = aMatrix[row][col]
            AOccurrences = [m.start() for m in re.finditer('A', allele)]
            ACount = len(AOccurrences)
            BOccurrences = [m.start() for m in re.finditer('B', allele)]
            BCount = len(BOccurrences)

            alleleObj = Alleles(ACount, BCount)
            aObjMatrix[row][col] = alleleObj

    #Compute the distance pairwise between samples
    distanceMatrix = np.empty([sampleNum, sampleNum], dtype=float)
    [chromosomes, positions, segmentation,
     chromosomeArms] = parseReferenceFile()
    for sample1 in range(0, sampleNum):
        for sample2 in range(0, sampleNum):
            #make a dummy sample object for the FST function
            sample1Obj = Sample(None, None)
            sample1Obj.measurements = LAF(lafMatrix[:, sample1], chromosomes,
                                          positions, positions)
            sample1Obj.measurements.segmentation = segmentation
            sample1Obj.afMeasurements = afMatrix[:, sample1]
            sample2Obj = Sample(None, None)
            sample2Obj.measurements = LAF(lafMatrix[:, sample2], chromosomes,
                                          positions, positions)
            sample2Obj.measurements.segmentation = segmentation
            sample2Obj.afMeasurements = afMatrix[:, sample2]

            #The distance can be computed for the entire column at once using the FST
            [messages,
             dist] = FST().computeAlleleDistance(aObjMatrix[:, sample1],
                                                 aObjMatrix[:, sample2],
                                                 sample1Obj, sample2Obj)
            distanceMatrix[sample1, sample2] = dist
    #print distanceMatrix
    #exit()
    #Compute the MST
    fullGraph = generateInitialTree(distanceMatrix, realTree.vertices)
    mst = computeMST(fullGraph, realTree.vertices)
    simulationErrorHandler = SimulationErrorHandler()
    treeScore = simulationErrorHandler.computeTreeError([mst], realTree)
    return treeScore
예제 #9
0
def load_sample_rgbd(img_dirname, img_name, d_dirname, d_name, segm_dirname,
                     segm_name):
    """
    Load image and segmented image into Sample object.
    Returns sample object.

    img_dirname: string
        image directory
    img_name: string
        image filename
    d_dirname: string
        depth image directory
    d_name: string
        depth image filename
    segm_dirname: string
        segmented image directory
    segm_name: string
        segmented filename
    """
    image = load_image_rgb(img_dirname, img_name)
    depth = load_image_grayscale(d_dirname, d_name)
    segmented_image = load_image_rgb(segm_dirname, segm_name)
    name = img_name.split('.')[0]

    img_rgbd = np.zeros((image.shape[0], image.shape[1], 4), dtype='uint8')
    img_rgbd[:, :, 0:3] = image
    depth = depth[:image.shape[0], :image.shape[1]]
    img_rgbd[:depth.shape[0], :depth.shape[1], 3] = depth

    return Sample(name, img_rgbd, segmented_image)
예제 #10
0
    def load(self, filename):
        """
        Load Metaphor Dataset from CSV file..

        :param filename: name of the dataset file
        :returns samples: list containing Sample objects
        """
        samples = []
        raw_samples = csv.DictReader(codecs.open(filename, 'r', 'latin-1'))

        for sample in raw_samples:
            if "a" in sample["sentence_id"] or len(sample["sentence_txt"].split()) <= 1:
                continue
            sentence = sample["sentence_txt"].replace("M_", "").replace("L_", "").lower().split()
            self.discourse[sample["txt_id"]][int(sample["sentence_id"])] = sentence
            sample = Sample(sentence=sample["sentence_txt"], text_id=sample["txt_id"],
                            sent_id=sample["sentence_id"])
            samples.append(sample)

        for sample in samples:
            discourse, focus_position = self.get_discourse(sample.sentence, sample.text_id, sample.sent_id)
            sample.update_discourse(discourse, focus_position)

        if self.sort_data != 0:
            samples = sorted(samples, key=lambda x: x.max_length)
        return samples
예제 #11
0
 def slice(self):
     unique = np.unique(self.label)
     lut = np.zeros(np.max(unique) + 1, dtype=np.int)
     for iter, i in enumerate(unique):
         lut[i] = iter
     self.label = lut[self.label]
     with tqdm(total=self.height * self.width,
               desc="slicing ",
               ncols=utils.LENGTH,
               ascii=utils.TQDM_ASCII) as pbar:
         for i in range(self.height):
             for j in range(self.width):
                 tmpLabel = self.label[i, j] - 1
                 tmpPatch = self.getPatch(i, j)
                 tmpIndex = i * self.width + j
                 if (tmpLabel >= 0):
                     self.allSamples.append(
                         Sample(
                             tmpPatch,
                             utils.convertToOneHot(tmpLabel,
                                                   self.numClasses),
                             tmpLabel, tmpIndex))
                     self.numEachClasses[tmpLabel] += 1
                 pbar.update()
     self.allSamples.sort(key=lambda s: s.trueLabel)
예제 #12
0
def read_list(args):
    # Dict where each key is a group in the vcf list
    # Value is a list of Mutation-objects
    samples = []
    group_names = []
    sc = False
    with open(args.input, 'r') as list_of_files:
        for line in list_of_files:
            try:
                line = line.split('\t')
                file_name = line[0]
                sample_name = line[1]
                variants = []
                group = line[2].strip()
                if group not in group_names:
                    group_names.append(group)
                if file_name.split('.')[-1] == "vcf":
                    variants = read_vcf(file_name)
                else:
                    if sc == False:
                        args.sep = check_sep(args)
                        sc = True
                    variants = read_tfile(file_name, args.sep)
                new_sample = Sample(sample_name, variants, group)
                samples.append(new_sample)
            except IndexError:
                print("Error! Badly formatted input list.")
    return samples, group_names
예제 #13
0
def checkOutput(outdir, normal=None, prnt=True):
    # Checks for output log file and reads if present
    first = True
    done = {}
    log = outdir + "mutectLog.txt"
    if prnt == True:
        print("\tChecking for previous output...")
    if not os.path.isdir(outdir):
        os.mkdir(outdir)
    if os.path.isfile(log):
        with open(log, "r") as f:
            for line in f:
                if first == False and line.strip():
                    line = line.strip().split("\t")
                    if len(line) == 5:
                        if line[0] not in done.keys():
                            # Initialize new sample entry
                            done[line[0]] = Sample()
                        done[line[0]].update(line[0], line[1], line[2],
                                             line[3], line[4])
                else:
                    # Skip header
                    first = False
    else:
        with open(log, "w") as f:
            # Initialize log file and record normal file
            f.write("Sample\tName\tStep\tStatus\tOutput\n")
            if normal:
                f.write("N\t{}\tnormal\tcomplete\t{}\n".format(
                    getFileName(normal), normal))
    return log, done
예제 #14
0
    def test_can_fit_model(self):
        """ This test check ability of fitting model in PER to random vector. """
        state_shape = (4, )
        action_space = 2

        model = PrioritizedExperienceReplayTests._create_model(
            state_shape, action_space)
        PER = PrioritizedExperienceReplay(maxlen=1,
                                          model=model,
                                          key_scaling=10,
                                          gamma=1)
        model_wrapper = ModelWrapper(
            model=model, optimizer=K.optimizers.Adam(learning_rate=0.01))
        model_wrapper.compile()

        sample = Sample(action=np.random.randint(0, action_space),
                        state=np.random.rand(state_shape[0]),
                        reward=10,
                        next_state=None)
        PER.add(samples=[sample])

        history_of_loss = []
        fit_vector = np.zeros((action_space, ))
        fit_vector[sample.action] = sample.reward
        for _ in range(100):
            model_wrapper.fit(sample.state, fit_vector)
            history_of_loss.append(PER._loss_calculate(sample=sample))

        for idx, loss in enumerate(history_of_loss[:-1]):
            self.assertGreater(loss, history_of_loss[idx + 1])
예제 #15
0
def get_one_thread_samples(thread, max_n_words, max_n_agents, n_prev_sents, pad=True, test=False):
    samples = []
    sents = []
    agents_in_ctx = set([])

    for i, sent in enumerate(thread):
        time = sent[0]
        spk_id = sent[1]
        adr_id = sent[2]
        label = sent[-1]

        context = get_context(i, sents, n_prev_sents, label, test)
        responses = limit_sent_length(sent[3:-1], max_n_words)

        original_sent = get_original_sent(responses, label)
        sents.append((time, spk_id, adr_id, original_sent))

        agents_in_ctx.add(spk_id)

        ################################
        # Judge if it is sample or not #
        ################################
        if is_sample(context, spk_id, adr_id, agents_in_ctx):
            sample = Sample(context=context, spk_id=spk_id, adr_id=adr_id, responses=responses, label=label,
                            n_agents_in_ctx=len(agents_in_ctx), max_n_agents=max_n_agents, max_n_words=max_n_words,
                            pad=pad, test=test)
            if test:
                samples.append(sample)
            else:
                # The num of the agents in the training samples is n_agents > 1
                # -1 means that the addressee does not appear in the limited context
                if sample.true_adr > -1:
                    samples.append(sample)

    return samples
def read_data(prefix='training'):
    '''
    @param prefix: data csv file name [train,dev,test]
    returns a generator of tuples (story_id, [sent_tokenized_story], question)
    '''
    if prefix=='training':
        prefix='train'
    elif prefix=='validation':
        prefix='dev'
    data_dict = pd.read_csv(os.path.join(DATA_CSV_DIR,prefix+'.csv'),encoding='utf-8')
    nsample = len(data_dict)
    label_list = []
    sample_dict = {}
    for sid,content,question,tok_rngs in zip(data_dict["story_id"], \
                                            data_dict["story_text"], \
                                            data_dict["question"], \
                                            data_dict["answer_token_ranges"]):
        labels = get_labels(content,tok_rngs)
        if sum(labels)==0: # sub-task: at least one answer
            print("  skipped: ",sid,":: ",question)
            continue
        sample_id = get_hash(sid,question)
        if sample_id not in sample_dict:
            sample_dict[sample_id] = Sample(sample_id,content,question,labels)
        else:
            sample_dict[sample_id].labels += labels
            print(  "Repeated!!:: ", sid,":: ",question)
    return sample_dict
예제 #17
0
def get_data(data_file, output_analytes=False):
    """
    A function to load and organize all of the data
    from on experiment
    Args:
        -data_file (str): the path of the file containing all the multiplex data and the map.
    Returns:
        -experiment (dic): data dictionary populated with Sample objects containing the data
    """

    ### Start by getting the map from the "map sheet" at the end of the data file.
    data_xl = xlrd.open_workbook(data_file)
    map_sheet = data_xl.sheet_by_index(-1)
    experiment = extract_map_from_sheet(map_sheet)

    # Store all the analytes present on this experiment.
    analytes_set = set()

    ##the experiment dictionary now contains the sample ID of each sample as
    ##specified in the excel file. Now we have what we need to go through and
    ##replace each str sample ID with populated Sample objects
    for g in list(experiment):  ##top level is ctrl, experimental groups
        group = experiment[g]
        for t in list(group):  ##next level is trials/animals
            trial = group[t]
            for s in list(trial):
                sample_id = trial[s]
                ##now replace the ID with a populated Sample object
                trial[s] = Sample(s, trial, sample_id, data_xl)
                analytes_set.update(set(trial[s].analyte_names))
    if output_analytes:
        return experiment, analytes_set
    else:
        return experiment
예제 #18
0
def test(model, optimizer, test_iterator, device, BATCH_SIZE):
    model.eval()
    test_loss = 0
    kld_loss = 0
    rcl_loss = 0
    kl_per_lt = {
        'Latent_Dimension': [],
        'KL_Divergence': [],
        'Latent_Mean': [],
        'Latent_Variance': []
    }
    with torch.no_grad():
        for i, (x, y) in enumerate(test_iterator):
            model.to(device)
            sm = Sample(x, y, BATCH_SIZE, device)
            x, y = sm.generate_x_y()
            x = x.view(-1, 3, 28, 28)

            reconstructed_x, z_mu, z_var, _ = model(x, y)
            blur = calc_blur(reconstructed_x)

            for ii in range(z_mu.size()[-1]):
                _, _, kl_per_lt_temp = calculate_loss(x, reconstructed_x,
                                                      z_mu[:, ii], z_var[:,
                                                                         ii])
                kl_per_lt['Latent_Dimension'].append(ii)
                kl_per_lt['Latent_Mean'].append(z_mu[:, ii])
                kl_per_lt['Latent_Variance'].append(z_var[:, ii])
            loss, rcl, kld = calculate_loss(x, reconstructed_x, z_mu, z_var)
            test_loss += loss.item()
            rcl_loss += rcl.item()
            kld_loss += kld.item()
    return test_loss, rcl_loss, kld_loss, kl_per_lt, blur.data.item()
def read_data(split):
    filename = ''
    if split == 'training':
        filename = 'train_v1.1.json'
    elif split == 'validation':
        filename = 'dev_v1.1.json'
    else:
        filename = 'test_public_v1.1.json'

    filename = os.path.join(DATA_JSON_DIR, filename)
    data = []
    for line in open(filename, 'r'):
        data.append(json.loads(line))

    for ms_sample in data:
        question = ms_sample['query']
        qid = str(ms_sample["query_id"])
        sid = get_hash(qid, question)
        labels = np.zeros(len(ms_sample["passages"]), dtype=int)
        sents = []
        for i, passage in enumerate(ms_sample["passages"]):
            sents.append(nltk.word_tokenize(passage["passage_text"]))
            labels[
                i] = passage["is_selected"] if "is_selected" in passage else 0

        sample = Sample(sid, sents, question, labels)
        yield sample
    def get_sample_meta(self, samples_dict):
        """
        Gets Sample object meta information.

        :type samples_dict: dict
        :param samples_dict: a dictionary to parse. See one of the test
        json files for formating information (the format will likely
        change soon)
        :return: a list of Sample objects with it's name and paired/unpaired
        path
        """
        samples = []

        for sample_input in samples_dict:
            sample_dir_paths = sample_input['_embedded']['sample_files']
            sample_name = sample_input['name']

            for sample_file_path in sample_dir_paths:
                sample_path = sample_file_path['_links']['self']['href']

            sample_resource = self.make_irida_request(sample_path)
            paths = sample_resource['links']
            paired_path = ""
            unpaired_path = ""

            for link in paths:
                if link['rel'] == "sample/sequenceFiles/pairs":
                    paired_path = link['href']
                elif link['rel'] == "sample/sequenceFiles/unpaired":
                    unpaired_path = link['href']

            samples.append(Sample(sample_name, paired_path, unpaired_path))

        return samples
예제 #21
0
    def test_find_next_pixel(self):
        Nx = 3
        Ny = 3
        Jc = np.zeros((Nx, Ny))
        sample = Sample(Jc)
        sample.boolean_matrix[0, 1] = False
        sample.boolean_matrix[0, 0] = False
        nxt = find_next_pixel(0, 0)
        self.assertEqual(nxt, (1, 0))
        sample.boolean_matrix[nxt] = False
        nxt = find_next_pixel(nxt)
        self.assertEqual(nxt, (2, 0))

        sample.boolean_matrix[nxt] = False
        nxt = find_next_pixel(nxt)
        self.assertEqual(nxt, (2, 1))

        sample.boolean_matrix[nxt] = False
        nxt = find_next_pixel(nxt)
        self.assertEqual(nxt, (2, 2))

        sample.boolean_matrix[nxt] = False
        nxt = find_next_pixel(nxt)
        self.assertEqual(nxt, (1, 2))

        sample.boolean_matrix[nxt] = False
        nxt = find_next_pixel(nxt)
        self.assertEqual(nxt, (0, 2))

        sample.boolean_matrix[nxt] = False
        nxt = find_next_pixel(nxt)
        self.assertEqual(nxt, ())
예제 #22
0
파일: domains.py 프로젝트: SHi-ON/Erwin
    def step(self, action):
        if action < 0 or action > self.num_actions - 1:
            raise IndexError('Action index outside of bound [0, %d)'.format(
                self.num_actions))

        candidate_transitions_condition = (self.mdp[COL_STATE_FROM] == self.state_.item()) \
                                          & (self.mdp[COL_ACTION] == action)
        candidate_transitions = self.mdp.loc[candidate_transitions_condition]
        if len(candidate_transitions) == 0:
            raise KeyError(
                'Not able to find any candidates for state-action pair ({}, {})'
                .format(self.state_, action))
        elif len(candidate_transitions) != 1:
            candidate_transitions = candidate_transitions.sample(
                weights=COL_PROBABILITY)

        transition_reward = candidate_transitions[COL_REWARD].values.reshape(
            1, )
        transition_next_state = candidate_transitions[
            COL_STATE_TO].values.reshape(1, ).astype(np.int)

        sample = Sample(self.state_, action, transition_reward,
                        transition_next_state)

        self.state_ = transition_next_state

        return sample
예제 #23
0
    def __init__(self, notes_queue, *a, **k):
        super(Rompler, self).__init__(*a, **k)

        self._notes_queue = notes_queue
        self._note = None

        sample = Sample(*self._read_sample())
        if len(sample.data) <= 0:
            raise EmptySampleException
        self._sample = sample
        self._data_type = sample.data_type

        # Subtracting the last padding zero for interpolation (see the Sample class)
        self._max_position = len(sample.data) - 2
        self._current_position = 0.0

        self._zeros = np.zeros(BUFFER_SIZE, dtype=self._data_type)

        self._playback_speed = 1.0

        self._player = Player(
            generate_data_callback=self._generate_next_buffer,
            sample_width=sample.sample_width,
            number_of_channels=sample.number_of_channels,
            sample_rate=sample.sample_rate,
        )

        self._gain = 1

        self.lfo = LFO(0.5, sample.sample_rate)
        self.stop = Event()
예제 #24
0
    def addDiscoverySamples(self, srList, startValList, minValList, maxValList,
                            colorList):
        """
        Add a sample to be used for discovery fits
        """
        self.hasDiscovery = True
        self.parentTopLvl.hasDiscovery = True

        if not self.variableName == "cuts":
            raise TypeError("Discovery sample can only be added "
                            "to a cuts channel")

        for (iSR, sr) in enumerate(srList):
            sigSample = Sample("DiscoveryMode_%s" % sr, colorList[iSR])
            sigSample.setNormFactor("mu_%s" % sr, startValList[iSR],
                                    minValList[iSR], maxValList[iSR])
            sigSample.setDiscovery()
            sigSample.clearSystematics()

            self.addSample(sigSample)
            self.parentTopLvl.setSignalSample(sigSample)

            histoName = "h%sNom_%s_obs_%s" % (
                sigSample.name, sr, self.variableName.replace("/", ""))
            self.getSample("DiscoveryMode_%s" % sr).setHistoName(histoName)

            configMgr.hists[histoName] = TH1F(histoName, histoName,
                                              len(srList), 0.0,
                                              float(len(srList)))
            configMgr.hists[histoName].SetBinContent(iSR + 1,
                                                     startValList[iSR])

        return
예제 #25
0
def sample_test():
    """
    Testing sample.py ...
    """

    samples = []

    for i in range(5, 11):
        samples.append(Sample(2**i))

    for i in range(10**4):

        random_number = random.random()

        for sample in samples:
            sample.sample_inc(random_number)

    print()

    for sample in samples:
        print(sample.maximum_size, ':', sample.nth(0.5))

        assert close(sample.nth(0.5), 0.5, 0.2)

    print()
def read_data(split):
    filename = ''
    if split == 'training':
        filename = 'train-v1.1.json'
    else:
        filename = 'dev-v1.1.json'

    filename = os.path.join(DATA_JSON_DIR,filename)
    raw_data = json.load(open(filename,'r'))
    raw_data = raw_data["data"]

    data = []
    for article in raw_data:
        for paragraph in article["paragraphs"]:
            content = paragraph["context"]
            for qas in paragraph["qas"]:
                qid = qas["id"]
                question = qas["question"]
                char_rngs = []
                for ans in qas["answers"]:
                    char_rngs.append(ans["answer_start"])
                labels = get_labels(content,char_rngs)
                sid = get_hash(qid,question)
                sample = Sample(sid,content,question,labels)

                yield sample
예제 #27
0
    def set_simulation(self):
        # Carregando arquivos
        load_det = pd.read_csv("curve_load.csv", header=None)

        # Gerando amostras
        sample = Sample(load_det)

        # Construindo a rede
        grid = DSS(os.getcwd() + "\ieee34.dss")

        for n in range(self.number):
            # Compilando a rede
            grid.compile_dss()

            # Colocando curva de carga nas barras
            get_daily_load(sample, grid)

            # Colocando curva de geração nas barras
            get_daily_pv(sample, grid)

            # Colocando curva de PEV
            get_daily_ev(sample, grid)

            grid.dssText.Command = "New Monitor.M1_power   element=line.L32   terminal=1   mode=1   ppolar=no"
            grid.dssText.Command = "New Monitor.M1_voltage   element=line.L32   terminal=1   mode=0"
            grid.solve_dss("daily")
            grid.set_active_bus("860")
            print(grid.get_bus_vmagangle())

            grid.dssText.Command = "Plot Monitor object=M1_power Channels=(1,3,5)"
            grid.dssText.Command = "Plot Monitor object=M1_voltage Channels=(1,3,5)"
            grid.get_circuit_result()
 def test_amazon_resume(self):
     print datetime.now()
     print "Instantiating Sample"
     sample = Sample(
         "/Users/cptullio/Predicao-de-Links/PredLig/src/data/amazon_resume.txt",
         20, (0.5, 0.5))
     print datetime.now()
     print "Configuring Attributes or Features"
     sample.set_attributes_list({
         "preferential_attachment": {},
         "common_neighbors": {},
         "sum_of_neighbors": {}
     })
     print datetime.now()
     print "Rescue the Sample"
     sample.get_sample()
     print datetime.now()
     print "Classifying the data"
     table = sample.set_classification_dataset()
     print datetime.now()
     print "Making Prediction Link"
     predictor = LinkPrediction(dataset=table, folds_number=2)
     print datetime.now()
     print "Applying Classifier"
     print predictor.apply_classifier()
     pass
예제 #29
0
    def test1(self):
        'Neutrons_from above'

        rq = lambda Q: np.exp(-Q * Q / 25)

        s = Sample('sample', 5, 5, rq)

        from mcni import neutron_buffer, neutron
        N = 8
        nb = neutron_buffer(N)
        t = 1.
        for i in range(N):
            vi = np.array((0, -(i + 1) * 100, (i + 1) * 100))
            ri = -vi * t
            nb[i] = neutron(r=ri, v=vi, s=(0, 0), time=0., prob=1.)
            continue

        nb2 = s.process(nb)
        for i, (n, n2) in enumerate(zip(nb, nb2)):
            # print "input:", n
            # print "output:", n2
            vf = np.array((0, (i + 1) * 100, (i + 1) * 100))
            np.allclose(vf, n2.state.velocity)
            np.allclose([0, 0, 0], n2.state.position)
            np.isclose(n2.time, 1.)
            vy = vf[1]
            Q = np.abs(vy * 2) * conv.V2K
            np.isclose(rq(Q), n2.probability)
        return
예제 #30
0
 def test_calc_speed(self):
     sample = Sample()
     self.assertEqual(sample.calc_speed(distance=10.0, elapsed_time=2.0),
                      5.0)
     self.assertEqual(sample.calc_speed(distance=10, elapsed_time=2), 5.0)
     self.assertEqual(sample.calc_speed(distance=10, elapsed_time=4), 2.5)
     self.assertEqual(sample.calc_speed(distance=0, elapsed_time=2), 0.0)