def __init__(self, prob=.5, size=20):

        self.n = size
        self.p = prob

        Distribution.__init__(self, self.calculate_mean(),
                              self.calculate_stdev())
	def beliefUpdate(pomdp, belief, action, observation):
		"""
		Compute the belief update, the time complexity of this function
		is quadratic on the size of the number of states of the pomdp
			:param pomdp: partiallyObservableMarkovDecisionProcess
			:param belief: initial belief distribution
			:param action: performed action ID
			:param observation: received observation ID
			:return: a distribution that represent the updated belief
		"""
		# build custom pdf
		pdf = dict()
		for s1 in belief.distr.keys():
			for s2,prob in pomdp.transitionFunction[ \
				(s1,action)].iteritems():
				if observation in pomdp.observationFunction[s2]:
					if s2 not in pdf:
						pdf[s2] = belief.distr[s1] * prob * \
						pomdp.observationFunction[s2][observation]
					else:
						pdf[s2] += belief.distr[s1] * prob * \
						pomdp.observationFunction[s2][observation]
		Distribution.normalize(pdf)
		return Distribution(belief.support,\
			lambda e,domain : Distribution.customPdf(e,pdf))
Example #3
0
    def __init__(self, env, node, resource, properties):
        """
        Creates a link and automatically assign a uniqueid to the link
        It requires a simpy environment where to operate.
        It also require a simpy resource to operate correctly and
        reserve the channel for a message.

        :param env: Simpy environment
        :param node: Node which the link is refered to
        :param resource: unitary resource used to lock the link
        :param properties: properties of the link in the graphml that
            needs to be evaluated
        """
        self._id = Link.__link_counter
        Link.__link_counter += 1
        self._env = env
        self._node = node
        self._res = resource
        self._delay = None
        if Link.DELAY in properties:
            self._delay = Distribution(json.loads(properties[Link.DELAY]))
        if Link.POLICY_FUNCTION in properties:
            self._policy_function = PolicyFunction(properties[Link.POLICY_FUNCTION])
        else:
            self._policy_function = PolicyFunction(PolicyFunction.PASS_EVERYTHING)
        self._mrai = 30.0
        if Link.MRAI in properties:
            self._mrai = float(properties[Link.MRAI])
        self._mrai_active = False
        self._jitter = Distribution(json.loads('{"distribution": "unif", \
                       "min": 0, "max": ' + str(self._mrai*0.25)  + ', "int": 0.01}'))
def _convolution_in_point(t_val,
                          f,
                          g,
                          n_integral=100,
                          inverse_time=None,
                          return_log=False):
    '''
    evaluates int_tau f(t+tau)g(tau) or int_tau f(t-tau)g(tau) if inverse time is TRUE
    '''
    if inverse_time is None:
        raise Exception("Inverse time argument must be set!")

    # determine integration boundaries:
    if inverse_time:
        ## tau>g.xmin and t-tau<f.xmax
        tau_min = max(t_val - f.xmax, g.xmin)
        ## tau<g.xmax and t-tau>f.xmin
        tau_max = min(t_val - f.xmin, g.xmax)
    else:
        ## tau>g.xmin and t+tau>f.xmin
        tau_min = max(f.xmin - t_val, g.xmin)
        ## tau<g.xmax and t+tau<f.xmax
        tau_max = min(f.xmax - t_val, g.xmax)
        #print(tau_min, tau_max)

    if tau_max <= tau_min + ttconf.TINY_NUMBER:
        if return_log:
            return ttconf.BIG_NUMBER
        else:
            return 0.0  #  functions do not overlap

    else:
        # create the tau-grid for the interpolation object in the overlap region
        if inverse_time:
            tau = np.unique(
                np.concatenate((g.x, t_val - f.x, [tau_min, tau_max])))
        else:
            tau = np.unique(
                np.concatenate((g.x, f.x - t_val, [tau_min, tau_max])))
        tau = tau[(tau > tau_min - ttconf.TINY_NUMBER)
                  & (tau < tau_max + ttconf.TINY_NUMBER)]
        if len(tau) < 10:
            tau = np.linspace(tau_min, tau_max, 10)

        if inverse_time:  # add negative logarithms
            fg = f(t_val - tau) + g(tau)
        else:
            fg = f(t_val + tau) + g(tau)

        # create the interpolation object on this grid
        FG = Distribution(tau, fg, is_log=True, kind='linear')
        #integrate the interpolation object, return log, make neg_log
        #print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax))
        res = -FG.integrate(
            a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True)

        if return_log:
            return res
        else:
            return np.exp(-res)
    def __init__(self, value):
        """Parses the settings in the value parameter.

        Arguments:
            value {str|dict} -- If a string, it is a pointer to a JSON-encoded file containing the settings. If a dict, then it is the settings.
        """

        if type(value) is str:
            # Treat the value as a file locator.
            with open(value, 'r') as settingsFile:
                data = json.load(settingsFile)
        else:
            data = value

        self.topology_type = TopologyType[data['type']]
        if self.topology_type == TopologyType.STATIC_UNIFORM_DELAY:
            self.network_delay = Distribution(data['networkDelay'])
            self.static_file = data['file']
            self.static_graph = None
        else:
            self.number_of_miners = data['numberOfMiners']

            if self.topology_type == TopologyType.GEOMETRIC_UNIFORM_DELAY or self.topology_type == TopologyType.LOBSTER_UNIFORM_DELAY:
                # Graphs with uniform delays for message transmission.
                self.network_delay = Distribution(data['networkDelay'])

                if self.topology_type == TopologyType.GEOMETRIC_UNIFORM_DELAY:
                    self.radius = data['radius']
                elif self.topology_type == TopologyType.LOBSTER_UNIFORM_DELAY:
                    self.p1 = data['p1']
                    self.p2 = data['p2']
            else:
                raise NotImplementedError(
                    "Selected topology type is not implemented.")
    def generate_customers(self, generation_date):
        print('Generating customers')
        start_time = time()

        for group_name in user_groups_info:
            group_info = user_groups_info[group_name]
            size = group_info['size']
            customer_type = group_info['customer_type']
            if customer_type == 'individual':
                age_distribution = Distribution(info=distributions_info['age'][group_info['ages']])
                gender_distribution = distribution_from_list(distributions_info['gender'][group_info['gender']])

            for i in range(size):
                if customer_type == 'individual':
                    age = int(age_distribution.get_value(return_array=False))
                    gender = gender_distribution.get_value(return_array=False)
                    customer = random_individual(generation_date, age, gender)
                else:
                    customer = random_organization(generation_date)

                customer.cluster_id = group_info['cluster_id']
                c = SimulatedCustomer(customer, group_info['agreements'], self.main_session, self.system, verbose=True)
                c.generate_hierarchy(generation_date)
                self.customers.append(c)

        self.main_session.commit()
        end_time = time()
        print('Customers generation done in %f seconds' % (end_time-start_time))
Example #7
0
 def __init__(self):
     super(PlayerManager, self).__init__()
     self.first_name_dist = Distribution(self.create_frequency_dist("FirstName"))
     self.last_name_dist = Distribution(self.create_frequency_dist("LastName"))
     self.state_dist = Distribution(self.create_frequency_dist("State"))
     self.position_dist = Distribution(self.create_frequency_dist("Position"))
     self.talent_dist = Distribution(self.create_frequency_dist("ProfilePoint"))
Example #8
0
def build_tree(columns, target_column, rows, score_type):
    """Recursively build a decision tree by finding the best column, value and operation to split on"""

    if not rows:
        return Node(distribution=Distribution({}))

    best_partition = {}
    score = Distribution(value_counts(rows, target_column)).score(score_type)

    for column in [column for column in columns if column != target_column]:
        column_value_set = {row[column] for row in rows} - {''}
        assert (
            # all values have the same operations, or no values
            len({operations_for(value)
                 for value in column_value_set}) <= 1)
        if len(column_value_set) == 0:
            continue

        for operation in operations_for(next(iter(column_value_set))):
            for pivot in column_value_set:

                positive_rows, negative_rows = partition(
                    rows, column, operation, pivot)

                score_gain = (
                    score - (len(positive_rows) / len(rows)) *
                    Distribution(value_counts(
                        positive_rows, target_column)).score(score_type) -
                    (len(negative_rows) / len(rows)) *
                    Distribution(value_counts(
                        negative_rows, target_column)).score(score_type))

                if score_gain > best_partition.get('score_gain', 0.0):
                    best_partition = {
                        'score_gain': score_gain,
                        'column': column,
                        'operation': operation,
                        'pivot': pivot,
                        'positive_rows': positive_rows,
                        'negative_rows': negative_rows,
                    }

    if best_partition.get('score_gain', 0) > 0:
        return Node(
            column=best_partition['column'],
            operation=best_partition['operation'],
            pivot=best_partition['pivot'],
            positive_branch=build_tree(columns=columns,
                                       target_column=target_column,
                                       rows=best_partition['positive_rows'],
                                       score_type=score_type),
            negative_branch=build_tree(columns=columns,
                                       target_column=target_column,
                                       rows=best_partition['negative_rows'],
                                       score_type=score_type),
        )
    else:
        return Node(
            distribution=Distribution(value_counts(rows, target_column)))
Example #9
0
 def show_distribution(self):
     try:
         filename = f'{self.fraud_target.username}_dataframe.csv'
         dist = Distribution(filename)
         dist.get_distribution()
     except AttributeError:
         print('No dataframe .csv found. Could not retrieve distribution.')
     except:
         print('An error occurred while building the distribution.')
Example #10
0
def main():
    dist1 = Distribution(id=0, vals=[2], probs=[1])
    dist2 = Distribution(id=1, vals=[5], probs=[1])
    dist3 = Distribution(id=2, vals=[2, 8], probs=[0.5, 0.5])

    env = Environment(total_bandwidth = 10,\
        distribution_list=[dist1,dist2,dist3], \
        mu_list=[1,2,3], lambda_list=[3,2,1],\
        num_of_each_type_distribution_list=[300,300,300])

    action_dim = 2
    state_dim = 6
    rpm = DQNPytorchReplayMemory(MEMORY_SIZE)  # DQN的经验回放池

    # 根据parl框架构建agent
    model = DQNPtorchModel(state_dim=state_dim, act_dim=action_dim)
    algorithm = DQNPytorchAlg(model,
                              act_dim=action_dim,
                              gamma=GAMMA,
                              lr=LEARNING_RATE)
    agent = DQNPytorchAgent(
        algorithm,
        obs_dim=state_dim,
        act_dim=action_dim,
        e_greed=0.1,  # 有一定概率随机选取动作,探索
        e_greed_decrement=1e-6)  # 随着训练逐步收敛,探索的程度慢慢降低

    # 加载模型
    # save_path = './dqn_model.ckpt'
    # agent.restore(save_path)

    # 先往经验池里存一些数据,避免最开始训练的时候样本丰富度不够
    while len(rpm) < MEMORY_WARMUP_SIZE:
        run_episode(env, agent, rpm)

    max_episode = 2000

    # start train
    episode = 0
    while episode < max_episode:  # 训练max_episode个回合,test部分不计算入episode数量
        # train part
        for i in range(0, 50):
            total_reward = run_episode(env, agent, rpm)
            episode += 1

        # test part
        eval_reward, num_accpet = evaluate(env, agent)  # render=True 查看显示效果
        print(
            f'episode{episode}:evaluate reward,{eval_reward}, num of accpet:{num_accpet}'
        )

    # 训练结束,保存模型
    save_path = './dqn_pytorch_model.ckpt'
    agent.save(save_path)
Example #11
0
    def initialize(self, config):
        self.run_number = config.run_number
        self.application = config.get_param(Cluster.APPLICATION)
        self.size = Distribution(config.get_param(Cluster.SIZE))
        self.offer = Distribution(config.get_param(Cluster.OFFER))
        self.resource = config.get_param(Cluster.RESOURCE)
        self.resource_scale = int(config.get_param(Cluster.RESOURCE_SCALE))
        Configuration().host = config.get_param(Cluster.ENDPOINT)
        self.api = swagger_client.DeploymentsApi()
        self.nodes_api = swagger_client.NodesApi()

        self.index = 0
Example #12
0
def train(target_distribution: Distribution) -> None:
    """Training Normalizing Flow"""

    target_distribution.save_distribution()

    normalizing_flow = NormalizingFlow(K=NORMALIZING_FLOW_LAYER_NUM)

    z_0, log_q_0 = normalizing_flow.get_placeholder()
    z_k, log_q_k = normalizing_flow.forward(z_0, log_q_0)
    loss = normalizing_flow.calc_loss(z_k, log_q_k, target_distribution)
    trainer = normalizing_flow.get_trainer(loss)
    logger.info('Calculation graph constructed')

    loss_values = []

    with tf.Session() as sess:
        logger.info('Session Start')
        sess.run(tf.global_variables_initializer())
        logger.info('All variables initialized')
        logger.info(f'Training Start (number of iterations: {ITERATION})')

        for iteration in range(ITERATION + 1):
            z_0_batch = NormalDistribution.sample(BATCH_SIZE)
            log_q_0_batch = np.log(NormalDistribution.calc_prob(z_0_batch))
            _, loss_value = sess.run([trainer, loss], {
                z_0: z_0_batch,
                log_q_0: log_q_0_batch
            })
            loss_values.append(loss_value)

            if iteration % 100 == 0:
                iteration_digits = len(str(ITERATION))
                logger.info(
                    f'Iteration:  {iteration:<{iteration_digits}}  Loss:  {loss_value}'
                )

            if iteration % SAVE_FIGURE_INTERVAL == 0:
                z_0_batch_for_visualize = NormalDistribution.sample(
                    NUMBER_OF_SAMPLES_FOR_VISUALIZE)
                log_q_0_batch_for_visualize = np.log(
                    NormalDistribution.calc_prob(z_0_batch_for_visualize))
                z_k_value = sess.run(
                    z_k, {
                        z_0: z_0_batch_for_visualize,
                        log_q_0: log_q_0_batch_for_visualize
                    })
                save_result(z_k_value, iteration, target_distribution.__name__)
                save_loss_values(loss_values, target_distribution.__name__)

        logger.info('Training Finished')

    logger.info('Session Closed')
Example #13
0
def decode_feature(sheet, preference_map = "OrientationPreference", axis_bounds=(0.0,1.0), cyclic=True, weighted_average=True, cropfn=lambda(x):x):
    """
    Estimate the value of a feature from the current activity pattern on a sheet.

    The specified preference_map should be measured before this
    function is called.

    If weighted_average is False, the feature value returned is the
    value of the preference_map at the maximally active location.

    If weighted_average is True, the feature value is estimated by
    weighting the preference_map by the current activity level, and
    averaging the result across all units in the sheet.  The
    axis_bounds specify the allowable range of the feature values in
    the preference_map.  If cyclic is true, a vector average is used;
    otherwise an arithmetic weighted average is used.

    For instance, if preference_map is OrientationPreference (a cyclic
    quantity), then the result will be the vector average of the
    activated orientations.  For an orientation map this value should
    be an estimate of the orientation present on the input.

    If desired, a cropfn can be supplied that will narrow the analysis
    down to a specific region of the array; this function will be
    applied to the preference_map and to the activity array before
    decoding.  Examples:

    Decode whole area:

       decode_feature(topo.sim["V1"])

    Decode left half only:

       r,c = topo.sim["V1"].activity.shape
       lefthalf  = lambda(x): x[:,0:c/2]
       righthalf = lambda(x): x[:,c/2:]

       decode_feature(topo.sim["V1"], cropfn=lefthalf)

    """

    d = Distribution(axis_bounds, cyclic)

    if not (preference_map in sheet.views.maps):
        param.Parameterized.warning(preference_map + " should be measured before calling decode_feature.")
    else:
        v = sheet.views.maps[preference_map]
        for (p,a) in zip(cropfn(v.view()[0]).ravel(),
                         cropfn(sheet.activity).ravel()): d.add({p:a})

    res = DSF_WeightedAverage()(d) if weighted_average else DSF_MaxValue()(d)
    return res['']['preference']
Example #14
0
 def __show_boundings_for_scenario(self, scenario):
     print("\nUniform Distribution Percentages")
     print(
         Distribution.percentage_distribution(UniformDistribution,
                                              len(scenario.dataList)))
     self.__show_bounding_for_uniform_bounded_scenario(scenario)
     self.__show_bounding_for_uniform_extremity_scenario(scenario)
     print("\nBinomial Distribution Percentages")
     print(
         Distribution.percentage_distribution(BinomialDistribution,
                                              len(scenario.dataList)))
     self.__show_bounding_for_binomial_bounded_scenario(scenario)
     self.__show_bounding_for_binomial_extremity_scenario(scenario)
Example #15
0
    def __init__(self, config, channel, x, y):
        """
        Constructor.
        :param config: the set of configs loaded by the simulator
        :param channel: the channel to which frames are sent
        :param x: x position
        :param y: y position
        """
        Module.__init__(self)

        #Number of slots in the contention window
        self.window_slots_count = config.get_param(Node.WINDOW_SIZE)
        #Duration in seconds of the channel listening period
        self.listening_duration = config.get_param(Node.LISTENING_TIME)
        #Duration in seconds of each slot
        self.slot_duration = self.listening_duration

        # load configuration parameters
        self.datarate = config.get_param(Node.DATARATE)
        self.queue_size = config.get_param(Node.QUEUE)
        self.interarrival = Distribution(config.get_param(Node.INTERARRIVAL))
        self.size = Distribution(config.get_param(Node.SIZE))

        self.proc_time = Distribution(config.get_param(Node.PROC_TIME))
        self.maxsize = config.get_param(Node.MAXSIZE)
        # queue of packets to be sent
        self.queue = []


        # current state
        self.state = None
        self.switch_state(Node.IDLE)

        # save position
        self.x = x
        self.y = y
        # save channel
        self.channel = channel

        #Number of packets being received
        self.packets_in_air = 0

        #Number of window slots we still have to wait before transmitting
        self.slot_countdown = 0

        #First packet in the current sequence of receiving packets
        self.rx_sequence_first_packet = None

        #Hook to events in the queue for future manipulation
        self.end_listenting_event_hook = None
        self.end_slot_event_hook = None
Example #16
0
def test_norm():
    passed_count = 0

    print("Test 1: letter frequencies in English text")
    temp_dict = letters_unscaled.copy()
    d = Distribution(temp_dict)
    d.normalize()
    passed = True
    for key in d.d:
        if not math.isclose(letters[key], d.d[key]):
            passed = False
            print("Probability of", key, d.d[key],
                  "does not match expected probability", letters[key])
    if passed:
        print("Test PASSED")
        passed_count += 1
    else:
        print("Test FAILED")
    print("Test 2: drawing from an urn")
    temp_dict = rgb_unscaled.copy()
    d = Distribution(temp_dict)
    d.normalize()
    passed = True
    for key in rgb:
        if not math.isclose(rgb[key], d.d[key]):
            passed = False
            print("Probability of", key, d.d[key],
                  "does not match expected probability", letters[key])
    if passed:
        print("Test PASSED")
        passed_count += 1
    else:
        print("Test FAILED")
    return passed_count
Example #17
0
    def test_binomial_expansion_distribution_percent(self):
        instance = BinomialDistribution
        distribution = Distribution.percentage_distribution(instance, 1)
        self.assertEqual(distribution[0], 1.0 / 2)
        self.assertEqual(distribution[1], 1.0 / 2)

        distribution = Distribution.percentage_distribution(instance, 2)
        self.assertEqual(distribution[0], 1.0 / 4)
        self.assertEqual(distribution[1], 2.0 / 4)
        self.assertEqual(distribution[2], 1.0 / 4)

        distribution = Distribution.percentage_distribution(instance, 3)
        self.assertEqual(distribution[0], 1.0 / 8)
        self.assertEqual(distribution[1], 3.0 / 8)
        self.assertEqual(distribution[2], 3.0 / 8)
        self.assertEqual(distribution[3], 1.0 / 8)
Example #18
0
    def __init__(self, prob=.5, size=20):
        # TODO: store the probability of the distribution in an instance variable p
        self.p = prob

        # TODO: store the size of the distribution in an instance variable n
        self.n = size

        # TODO: Now that you know p and n, you can calculate the mean and standard deviation
        #       Use the calculate_mean() and calculate_stdev() methods to calculate the
        #       distribution mean and standard deviation
        self.mean = self.calculate_mean()

        self.stdev = self.calculate_stdev()
        #       Then use the init function from the Distribution class to initialize the
        #       mean and the standard deviation of the distribution
        Distribution.__init__(self, self.mean, self.stdev)
Example #19
0
class IndependentEstimate(Estimate):
    def __init__(self, *params):
        self.distribution = Distribution(*params)

    def __str__(self):
        return 'IndependentEstimate(' + str(self.distribution) + ')'

    def __repr__(self):
        #return "Est-Dist" + str(self.distribution)
        #return 'IndependentEstimate({0.distribution})'.format(self)
        return 'IndependentEstimate(' + str(self.distribution) + ')'

    def run(self, n):
        return self.distribution.run(n)

    def mean(self, n=0):
        return self.distribution.mean

    def std(self, n=0):
        return self.distribution.stdev

    def buildDependent(self, operation, others):
        if len(others) == 1:
            return dp.DependentEstimate(operation, self, *others)
        else:
            return dp.DependentEstimate(operation, *([self] + others))
def get_incident_duration(random_state=None):
    shape = 0.9689235428381716
    loc = -2.005873343967834
    scale = 30.310979782335075
    duration_dist = Distribution(stats.lognorm(shape, loc, scale),
                                 random_state=random_state)
    return duration_dist
Example #21
0
    def __init__(self):
        with open("properties.json") as fp:
            config = json.load(fp)

        # self.q = Queue.Queue()
        # # current completion time of a queue
        policy = config["server"]["allocationPolicy"]
        self.allocation_policy = AllocationPolicy.get_policy(policy)
        self.stat = Stats()

        # TODO : Use this code if we want to use multiple queues
        self.write_server = config["server"]["writeServer"]
        self.read_server = config["server"]["readServer"]
        self.no_of_read_response_required = config["server"][
            "noOfReadResponse"]
        self.no_of_write_response_required = config["server"][
            "noOfWriteResponse"]

        self.server_queues = []
        self.completion_time = []

        for i in range(0, config["server"]["numberOfServers"]):
            self.server_queues.append(Queue.PriorityQueue())
            self.completion_time.append(0)

        self.dist = Distribution.get_distribution(
            config["request"]["distribution"], rate=1)
def get_incident_interarrival(random_state=None):
    alpha = 0.7949678079328055  # interarrival based on monday - friday gamma dist
    loc = 0
    scale = 294.3450468550495
    interarrival_dist = Distribution(stats.gamma(alpha, loc, scale),
                                     random_state=random_state)
    return interarrival_dist
Example #23
0
    def __init__(self, fname):
        """
        Arguments:
            fname {str} -- Filename to load settings from.
        """

        with open(fname, 'r') as settingsFile:
            data = json.load(settingsFile)

        # Load settings.
        self.thread_workers = data['threadWorkers']
        self.number_of_executions = data['numberOfExecutions']
        self.topology_selection = TopologySelection[data['topologySelection']]
        self.termination_condition = TerminationCondition[
            data['terminationCondition']]
        self.termination_value = data['terminationValue']
        self.miner_power_distribution = Distribution(data['minerPower'])
        self.top_miner_power = None

        # Percent power share of the top N miners, currently drawn from https://btc.com/stats/pool?pool_mode=week on July 11, 2018.
        # Each element of the list is an float between 0 and 100; the list must sum to < 100.
        if 'topMinerPower' in data:
            self.top_miner_power = data['topMinerPower']

        self.target_termination_ticks = -1

        # Parameterize in JSON later?
        self.allow_termination_cooldown = True
        self.hard_limit_ticks = 1000  # Should this be a function of the number of miners?

        # Load the other settings objects.
        self.topology = TopologySettings(data['topology'])
        self.protocol = ProtocolSettings(data['protocol'])
Example #24
0
def normal_hypothesis(distribution: dist.Distribution, size: int):
    sample = distribution.create_sample(size)
    characteristics = Characteristics(sample)
    hypothesis = Hypothesis()
    hyp_distribution = dist.NormalDistribution(characteristics.mean(),
                                               characteristics.variance())
    hypothesis.check_hypothesis(sample, hyp_distribution)
Example #25
0
def uniform_hypothesis(distribution: dist.Distribution, size: int):
    sample = distribution.create_sample(size)
    characteristics = Characteristics(sample)
    hypothesis = Hypothesis()
    hyp_distribution = dist.UniformDistribution(characteristics.min(),
                                                characteristics.max())
    hypothesis.check_hypothesis(sample, hyp_distribution)
Example #26
0
 def test_a_values(test_items1, test_items2, symmetric_difference, table_size=DEFAULT_TABLE_SIZE, a_value=0):
     # Test Random IBLT with pre generated hash decider
     test_set_size = len(test_items1)
     size = int(test_set_size * table_size)
     key = randint(1, 1000)
     hash_decider = Distribution.create_randomly_generated_sequence(size=1000, minimum=3,
                                                                    maximum=12, a_value=a_value,
                                                                    seed_value=key)
     start_time = time()
     bloom1, x, y = RIBLT.generate_table(test_items1, seed_key=key, table_size=size, max_hashes=12,
                                         hash_decider=hash_decider)
     end_time = time()
     table_creation_time = end_time - start_time
     start_time = time()
     bloom2, a, b = RIBLT.generate_table(test_items2, seed_key=key, table_size=size, max_hashes=12,
                                         hash_decider=hash_decider)
     end_time = time()
     table_creation_time = (end_time - start_time + table_creation_time) / 2
     start_time = time()
     result = RIBLT.compare_tables(bloom1, bloom2, key, max_hashes=12, hash_decider=hash_decider)
     end_time = time()
     comparison_time = end_time - start_time
     print(result[2])
     average_table_size = (asizeof.asizeof(bloom1) + asizeof.asizeof(bloom2)) / 2
     name = "RALOHA IBLT| a=%s" % a_value
     BloomTest.write_to_file(name, test_set_size, table_size, symmetric_difference,
                             average_table_size, table_creation_time, comparison_time, result[2])
    def calc(self, group_id, partition):
        dis_res = []
        for group in partition:
            now = None
            for u in group:
                foo = copy.deepcopy(self.nodes[u])
                for edge in edges:
                    if edge.v == u and group_id[edge.u] >= group_id[u]:
                        foo = foo.add(
                            Distribution.from_mu_sigma(edge.w, 0.01,
                                                       edge.w * 2))

                foo = foo.div(NUM_TEAM)
                if now is None:
                    now = foo
                else:
                    now = now.max(foo)

            if len(dis_res) > 0:
                now = now.add(dis_res[-1])

            dis_res.append(now)

        if (dis_res[-1].mu < self.best_mu):
            print(partition)
            print(dis_res[-1].mu)
            self.best_mu = dis_res[-1].mu
            self.res = (copy.deepcopy(partition), dis_res)
Example #28
0
def laplace_hypothesis(distribution: dist.Distribution, size: int):
    sample = distribution.create_sample(size)
    characteristics = Characteristics(sample)
    hypothesis = Hypothesis()
    hyp_distribution = dist.LaplaceDistribution(
        characteristics.mean(),
        characteristics.variance() / (2**0.5))
    hypothesis.check_hypothesis(sample, hyp_distribution)
Example #29
0
def main():
     # create environment
    dist1 = Distribution(id=0, vals=[2], probs=[1])
    dist2 = Distribution(id=1, vals=[5], probs=[1])
    dist3 = Distribution(id=2, vals=[2,8], probs=[0.5,0.5])

    env = Environment(total_bandwidth = 10,\
        distribution_list=[dist1,dist2,dist3], \
        mu_list=[1,2,3], lambda_list=[3,2,1],\
        num_of_each_type_distribution_list=[300,300,300])
    # env = gym.make('CartPole-v0')
    # env = env.unwrapped # Cancel the minimum score limit
    # obs_dim = env.observation_space.shape[0]
    # act_dim = env.action_space.n
    obs_dim = 6
    act_dim = 2
    logger.info('obs_dim {}, act_dim {}'.format(obs_dim, act_dim))

    # 根据parl框架构建agent
    model = Model(act_dim=act_dim)
    alg = PolicyGradient(model, lr=LEARNING_RATE)
    agent = Agent(alg, obs_dim=obs_dim, act_dim=act_dim)

    # 加载模型
    if os.path.exists('./policy_grad_model.ckpt'):
        agent.restore('./policy_grad_model.ckpt')
        # run_episode(env, agent, train_or_test='test', render=True)
        # exit()

    for i in range(1000):
        obs_list, action_list, reward_list = run_episode(env, agent)
        if i % 10 == 0:
            logger.info("Episode {}, Reward Sum {}.".format(
                i, sum(reward_list)))

        batch_obs = np.array(obs_list)
        batch_action = np.array(action_list)
        batch_reward = calc_reward_to_go(reward_list, gamma=0.9)

        agent.learn(batch_obs, batch_action, batch_reward)
        if (i + 1) % 100 == 0:
            total_reward = evaluate(env, agent, render=True)
            logger.info('Test reward: {}'.format(total_reward))

    # save the parameters to ./policy_grad_model.ckpt
    agent.save('./policy_grad_model.ckpt')
Example #30
0
 def get(self):
     key = self.get_argument('key')
     try:
         dist = Distribution(key).get_dist()
     except KeyError:
         return self.finish({
             "status_code":404,
             "data":[],
             "error_message": "Could not find distribution in Forget Table"
         })
     return self.finish({
         "status_code":200,
         "data":[{
             "bin":key, 
             "probability":value
         } for key,value in dist.iteritems()]
     })
Example #31
0
def main(argv):
    pattern_sequence_path = argv[0]
    long_sequence_path = argv[1]

    pattern_sequence = []
    long_sequence = []

    # load defined pattern sequence
    with open(pattern_sequence_path) as f:
        for line in f:
            pattern_sequence.append(float(line.strip()))

    # load long sequence
    with open(long_sequence_path) as f:
        for line in f:
            long_sequence.append(float(line.strip()))

    pattern_seq_dist = Distribution.of(pattern_sequence)
    pattern_seq_len = len(pattern_sequence)

    subsequences = sliding(long_sequence, window_size=pattern_seq_len)
    similarity_scores = []

    for subsequence in subsequences:
        # skip if length of the subsequence isn't equal to pattern sequence's
        if len(subsequence) != pattern_seq_len:
            continue

        subsequence_dist = Distribution.of(subsequence)

        # it is not necessary to compute similarity score for a subsequence with
        # a distribution that is very different from pattern sequence's.
        if not pattern_seq_dist.interval_overlap_with(subsequence_dist):
            continue

        if Distribution.overlap_proportion(pattern_seq_dist, subsequence_dist) < 0.5:
            continue

        similarity_scores.append(similarity_score(pattern_sequence, subsequence))

    sorted_subsequences = sorted(zip(subsequences, similarity_scores), key=lambda triple: triple[1], reverse=True)

    # show top three subsequences
    top_subsequences = sorted_subsequences[:3]
    for subsequence, _ in top_subsequences:
        print(subsequence)
Example #32
0
def test_prob():
    unif_set = {1, 5, 7, 8}
    rgb_set = {"red", "green"}
    passed_count = 0

    print(
        "Test 1: Uniform distribution on integers 1-10.\nTest set: {1, 5, 7, 8}\nExpected result: 0.4"
    )
    d = Distribution(unif_dist)
    result = d.prob(unif_set)
    print("Result:", result)
    if math.isclose(0.4, result):
        print("Test PASSED")
        passed_count += 1
    else:
        print("Test FAILED")

    print(
        "Test 2: Drawing from an urn with 3 red, 7 green, 8 blue balls.\nTest set: {\"red\", \"green\"}\nExpected result: 10/18"
    )
    d = Distribution(rgb)
    result = d.prob(rgb_set)
    print("Result:", result)
    if math.isclose(10 / 18, result):
        print("Test PASSED")
        passed_count += 1
    else:
        print("Test FAILED")

    print(
        "Test 3: Benford's law on digits 1-9.\nTest set: {2, 8, 9}\nExpected result: 0.273"
    )
    d = Distribution(benford)
    result = d.prob(benford_set_a)
    print("Result:", result)
    if math.isclose(0.273, result):
        print("Test PASSED")
        passed_count += 1
    else:
        print("Test FAILED")
    '''
    if setbonus:
        print("Test 4: Union of two sets using Benford's law.\nTest sets: {2, 8, 9}, {1, 2, 7, 8}\nExpected result: .632")
        result = probfunc(benford, benford_set_a, benford_set_b)
        print("Result:", result)
        if math.isclose(0.632, result):
            print("Test PASSED")
        else:
            print("Test FAILED")
    '''
    return passed_count
Example #33
0
 def __count_in(cls, scenario):
     dist = Distribution.percentage_distribution(
         scenario.distribution_profile, len(scenario.data_list()))
     count_in = distribution.count_ins(scenario.distribution_bounding, dist,
                                       scenario.confidence_interval)
     if count_in is None:
         return 0
     else:
         return count_in
Example #34
0
def plot_distribution(n_clicks, files_content, nodes_content):
    """Plots the distribution"""
    if files_content is not None and nodes_content is not None:
        filestring = stringify_contents(files_content)
        nodestring = stringify_contents(nodes_content)
        print(nodestring)
        distribution = Distribution.from_strings(nodestring, filestring)
        fig = distribution.get_plotly(output_file=False)
        component = dcc.Graph(id="distribution_chart", figure=fig)
        return component
Example #35
0
 def get(self):
     key = self.get_argument("key")
     try:
         dist = Distribution(key).get_dist()
     except KeyError:
         return self.finish(
             {"status_code": 404, "data": [], "error_message": "Could not find distribution in Forget Table"}
         )
     return self.finish(
         {"status_code": 200, "data": [{"bin": key, "probability": value} for key, value in dist.iteritems()]}
     )
Example #36
0
 def test_simple(self):
     """Test simple cases"""
     # One file one node
     nodes = [('node', 100)]
     files = [('file', 100)]
     dist = Distribution(nodes, files)
     self.assertTrue(len(dist.placed_files), 1)
     self.assertEqual(dist.placed_files[0][0], ('file', 100))
     # One node two files that fit perfectly
     nodes = [('node', 100)]
     files = [('file_1', 25), ('file_2', 75)]
     dist = Distribution(nodes, files)
     self.assertIn(('file_1', 25), dist.placed_files[0])
     self.assertIn(('file_2', 75), dist.placed_files[0])
     # One node two files one of which doesn't fit
     nodes = [('node', 100)]
     files = [('file_1', 26), ('file_2', 75)]
     dist = Distribution(nodes, files)
     self.assertIn(('file_1', 26), dist.null_files)
     self.assertIn(('file_2', 75), dist.placed_files[0])
     self.assertIn('node', dist.node_names)
     summary = dist.summary()
     self.assertIn("file_1 NULL", summary)
Example #37
0
    def test_parsing(self):
        """Test parsing functionality"""
        nodetext = 'node_1 10\nnode_2 100'
        parsed = parse_string(nodetext)
        self.assertEqual(parsed[0], ('node_1', 10))
        self.assertEqual(parsed[1], ('node_2', 100))
        longtext = generate_text(100, 'nodes', upper=100, lower=10)
        parsed = parse_string(longtext)
        nnames, sizes = list(zip(*parsed))
        sizes = np.array(sizes)
        # Check text generator
        self.assertTrue((sizes >= 10).all())
        self.assertTrue((sizes <= 100).all())
        self.assertTrue(all([name.startswith('nodes') for name in nnames]))

        # Check invocation from strings
        dist = Distribution.from_strings(longtext, longtext)

        with tempfile.ScratchDir('.'):
            with open('files.txt', 'w') as f:
                f.write(longtext)
            with open('nodes.txt', 'w') as f:
                f.write(longtext)
            dist = Distribution.from_filenames('nodes.txt', 'files.txt')
Example #38
0
class IndependentEstimate(Estimate):
    def __init__(self,*params):
        self.distribution = Distribution(*params)
    def __str__(self):
        return 'IndependentEstimate(' + str(self.distribution) + ')'
    def __repr__(self):
        #return "Est-Dist" + str(self.distribution)
        #return 'IndependentEstimate({0.distribution})'.format(self)
        return 'IndependentEstimate(' + str(self.distribution) + ')'

    def run(self,n):
        return self.distribution.run(n)

    def mean(self,n=0):
        return self.distribution.mean

    def std(self,n=0):
        return self.distribution.stdev

    def buildDependent(self, operation, others):
        if len(others) == 1:
            return dp.DependentEstimate(operation, self, *others)
        else:
            return dp.DependentEstimate(operation, *([self] + others))
Example #39
0
 def __init__(self,*params):
     self.distribution = Distribution(*params)
Example #40
0
 def __init__(self, parmM,parmG):
     Distribution.__init__(self)
     self.parmM = parmM
     self.parmG = parmG
Example #41
0
 def test_plot(self):
     nodes = [('node', 100)]
     files = [('file_1', 25), ('file_2', 75)]
     dist = Distribution(nodes, files)
     plt = dist.plot(show=False)
     dist.get_plotly(output_file=None)
Example #42
0
class PlayerManager(models.Manager):
    def __init__(self):
        super(PlayerManager, self).__init__()
        self.first_name_dist = Distribution(self.create_frequency_dist("FirstName"))
        self.last_name_dist = Distribution(self.create_frequency_dist("LastName"))
        self.state_dist = Distribution(self.create_frequency_dist("State"))
        self.position_dist = Distribution(self.create_frequency_dist("Position"))
        self.talent_dist = Distribution(self.create_frequency_dist("ProfilePoint"))

    @classmethod
    def create_frequency_dist(self, modelName):
        records = None
        if modelName == "FirstName":
            records = FirstName.objects.all()
        elif modelName == "LastName":
            records = LastName.objects.all()
        elif modelName == "State":
            records = State.objects.all()
        elif modelName == "Position":
            records = Position.objects.all()
        elif modelName == "ProfilePoint":
            # TODO: Get profile points through TalentProfile
            records = ProfilePoint.objects.all()

        frequency_dist = []
        for row in records:
            frequency_dist.append((row.id, row.frequency))

        return frequency_dist

    def create_player(self):
        first_name_id = self.first_name_dist.generate_value()
        last_name_id = self.last_name_dist.generate_value()
        state_id = self.state_dist.generate_value()
        position_id = self.position_dist.generate_value()
        profile_point_id = self.talent_dist.generate_value()

        first_name = FirstName.objects.get(id=first_name_id)
        last_name = LastName.objects.get(id=last_name_id)
        state = State.objects.get(id=state_id)
        position = Position.objects.get(id=position_id)
        profile_point = ProfilePoint.objects.get(id=profile_point_id)
        grade = profile_point.grade

        player = self.create(first_name=first_name,
                             last_name=last_name,
                             state=state,
                             position=position,
                             grade=grade)

        player.save
        return player


    def create_pitch_weights(self, player):
        pitch_records = Pitch.objects.all().order_by('id')
        pitches_count = pitch_records.count()        
        mu = 1.0 / pitches_count
        sigma = (2.0 / 3.0) * mu        
        weights = []
        sum_weights = 0
        
        for _ in xrange(pitches_count):
          w = random.normalvariate(mu, sigma)
          w = max(w, 0.0)
          weights.append(w)
          sum_weights += w

        # Normalize weights before creating records
        for i in xrange(len(weights)):
            weights[i] /= sum_weights

        j = 0
        for pitch in pitch_records:
            pw = PitchWeight(player=player, pitch=pitch, weight=weights[j])
            pw.save()
            j += 1
Example #43
0
# Argument parsing
desc = """
Program that takes two filename inputs corresponding to lists
of files and nodes and distributes the files onto the nodes
such that the absolute loads are evenly distributed
"""

arg_parser = argparse.ArgumentParser(description=desc)
arg_parser.add_argument('-f', '--files', help='File containing file list',
                        required=True)
arg_parser.add_argument('-n', '--nodes', help='File containing node list',
                        required=True)
arg_parser.add_argument('-o', '--output',
                        help='output file, optional, default is stdout')
arg_parser.add_argument('-p', '--plot', action='store_true',
                        help='plotting flag, plots nodes/files on bar chart')
arg_parser.add_argument('-pl', '--plotly', action='store_true',
                        help='plotting flag, plots nodes/files on bar chart'
                             'in plotly')
args = arg_parser.parse_args()

# Main body
if __name__ == "__main__":
    dist = Distribution.from_filenames(args.nodes, args.files)
    dist.summary(args.output)

    if args.plot:
        ply = dist.plot()
    if args.plotly:
        plt = dist.get_plotly()
		for (s1,o1),a in it.product(S,pomdp.actions.values()):
			for (s2,o2) in S:
				if pomdp.inv_states[s2] in pomdp.transitionFunction[(pomdp.inv_states[s1],pomdp.inv_actions[a])] and pomdp.inv_observations[o2] in pomdp.observationFunction[pomdp.inv_states[s2]]:
					if ((s1,o1),a) not in T:
						T[((s1,o1),a)] = dict()
					T[((s1,o1),a)][(s2,o2)] = pomdp.transitionFunction[(pomdp.inv_states[s1],pomdp.inv_actions[a])][pomdp.inv_states[s2]] * pomdp.observationFunction[pomdp.inv_states[s2]][pomdp.inv_observations[o2]]
		print 'S:',S,'\nA:',pomdp.actions.values(),'\nT:',T

		MarkovDecisionProcess.__init__(self,S,pomdp.actions.values(),T)

# >>> main test

if __name__ == '__main__':
	pomdp = cs.tigerPomdp()
	print pomdp
	prior = Distribution(set(pomdp.states.keys()),lambda el,dom: Distribution.restrictedUniformPdf(el,dom,{pomdp.inv_states['tl'],pomdp.inv_states['tr']}))
	# generate explicit MDP
	emdp = ExplicitMarkovDecisionProcess(pomdp,prior)
	print emdp
	# export to DOT
	e2d.export2dot(emdp,'emdp','emdp.dot', [], [], [],[])

# >>> authorship information

__author__ = "Marco Tinacci"
__copyright__ = "Copyright 2016"
__credits__ = ["Marco Tinacci"]
__license__ = "ASF"
__version__ = "2.0"
__maintainer__ = "Marco Tinacci"
__email__ = "*****@*****.**"
 def share_in(self, n, week_names, month_names):
     distribution = Distribution()
     distribution.set_week(self._share_in_(self.taches_list.get_tache_for_week(), n, week_names))
     distribution.set_month(self._share_in_(self.taches_list.get_tache_for_month(), n, month_names))
     distribution.create_whole()
     return distribution
Example #46
0
        'MyAppVersion': sw_version,
        'MyAppPublisher': sw_publisher,
        'MyAppURL': sw_url,
        'MyAppExeName': '%s.exe' % sw_name,
        'distributedname': distributedname,
        'SetupIconFile': sw_logoico,
        'source_exe':  os.sep.join([os.getcwd(), 'dist', '%s.exe' % sw_name]),
        'source_folder': os.sep.join([os.getcwd(), 'dist', '*'])
    }
    setup_iss_content = auto_iss(templateDef_iss, nameSpace)
    with open(setup_iss_file, 'w') as f:
        f.write(setup_iss_content)

    # 利用py2exe将项目打包成可执行文件
    if os.name == "nt":
        dist = Distribution()
        dist.vs2008 = None
        dist.setup(
                name=sw_name,
                version=sw_version,
                description=u"Application based on PyQt4",
                script=os.sep.join([os.getcwd(), 'QMain.py']),
                target_name='QMarkdowner',
                icon=sw_logoico)

        dist.add_modules('PyQt4')
        dist.bin_excludes += ["libzmq.dll"]
        dist.includes += []
        # dist.data_files += matplotlibdata_files
        dist.data_files += get_py2exe_datafiles(os.sep.join([os.getcwd(), 'utildialog', 'utildialogskin']), 'utildialogskin')
        dist.data_files += [('phonon_backend', [
Example #47
0
 def __init__(self,listOFparms):
     Distribution.__init__()
Example #48
0
 def fromJSON(jsonObj):
   return DistrResult(
     Distribution.fromJSON(jsonObj['distribution']),
     jsonObj['time']
   )
Example #49
0
 def test_large(self):
     """Test some big cases to get an idea of scaling"""
     bigfiles = generate_text(1000000, 'files', upper=100)
     bignodes = generate_text(1000, 'nodes')
     # Takes around 1 min now
     dist = Distribution.from_strings(bignodes, bigfiles)
if __name__ == '__main__':
   random.seed(1)
   height = 4
   width = 4
   initPos = (1,1)
   concreteInitPos = (initPos,(0,0),(1,2),(2,1),(3,3))
   iterations = 10
   # partially observable model
   pomdp = robotPomdp(height, width)
   # initial states
   init = set(map(lambda s: pomdp.inv_states[s], filter(lambda s: s[0] == initPos, pomdp.states.values())))
   #init = filter(lambda s: s[0] == initPos, pomdp.states.values())
   # initial belief
   prior = Distribution(set(pomdp.states.keys()),\
      lambda el,dom: Distribution.restrictedUniformPdf(el,dom,init))
   #prior = Distribution(set(pomdp.states.keys()), Distribution.uniformPdf)
   # prior belief updated to the first observation
   #prior = beliefUpdateObservation(pomdp,prior,pomdp.inv_observations[(False,False,False)])
   
   # generate explicit MDP
   print 'emdp construction'
   emdp = ExplicitMarkovDecisionProcess(pomdp,prior)
   print 'end (emdp empty)'

   # observation classes
   collision = filter(lambda o: o[0], pomdp.observations.values())
   notCollision = filter(lambda o: not o[0], pomdp.observations.values())
   sensing = filter(lambda o: reduce(lambda a,b: a or b, o), pomdp.observations.values())
   anyObs = filter(lambda o: True, pomdp.observations.values())
   sensAndNotCollide = [o for o in pomdp.observations.values() if (o in sensing and o in notCollision)]
Example #51
0
    p = subprocess.Popen(
        ['cd','svn_co','&&','svn','info'], 
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        shell = True,
        )
    (child_stdin, child_stdout) = (p.stdin, p.stdout)
    svn_version = 'r' + (''.join(child_stdout.readlines())).split("\n")[6].split(":")[1].strip()
    time_str = 'b' + time.strftime("%Y%m%d", time.localtime(int(time.time()))).decode('UTF8')
    json_str = json.dumps({"local_name": local_name, "version_name": version_name, "svn_version": svn_version, "time_str": time_str}, indent=3)
    full_name = "-".join([local_name, version_name, svn_version, time_str])
    write_file("js/ver.json", json_str)
    issstr = backiss(full_name)
    write_file("QSoftKeyer-setup-v2.iss", issstr)
    if os.name == "nt":
        dist = Distribution()
        dist.vs2008 = None
        dist.setup(name=u"QSoftKeyer", version='1.0.0',
                   description=u"Application based on PyQt4",
                   script="svn_co/QSoftKeyer.py", target_name="QSoftKeyer",
                   icon=os.sep.join([os.getcwd(), 'skin', 'images', 'logo3.ico']))

        dist.add_modules('PyQt4')
        dist.bin_excludes += ["libzmq.dll"]
        dist.includes += []
        # dist.data_files += matplotlibdata_files
        dist.data_files += get_py2exe_datafiles(os.sep.join([os.getcwd(), 'utildialog', 'utildialogskin']), 'utildialogskin')
        dist.data_files += [('phonon_backend', [
                'C:\Python27\Lib\site-packages\PyQt4\plugins\phonon_backend\phonon_ds94.dll'
                ]),
            ('imageplugins', [
Example #52
0
 def __init__(self, parmA,parmB):
     Distribution.__init__(self)
     self.parmA = parmA
     self.parmB = parmB
Example #53
0
 def __init__(self,Map = {}):
     Distribution.__init__(self)
      #map is a python dictionary{[time,count]..}    
     self.map = Map