Example #1
0
    def setUp(self):
        self.tb = gr.top_block()

        random.seed(0) # make repeatable
        N = 10000
        self._noise = [get_n_cplx() for i in xrange(N)]
        self._bits = [get_cplx() for i in xrange(N)]
Example #2
0
def load_segmented_blizzard_metadata():
    with open(DATA_PATH+'/prompts.gui') as prompts_file:
        lines = [l[:-1] for l in prompts_file]

    filepaths = [DATA_PATH + '/wavn/' + fname + '.wav' for fname in lines[::3]]
    transcripts = lines[1::3]

    # Clean up the transcripts
    for i in xrange(len(transcripts)):
        t = transcripts[i]
        t = t.replace('@ ', '')
        t = t.replace('# ', '')
        t = t.replace('| ', '')
        t = t.lower()
        transcripts[i] = t

    # We use '*' as a null padding character
    charmap = {'*': 0}
    inv_charmap = ['*']
    for t in transcripts:
        for char in t:
            if char not in charmap:
                charmap[char] = len(charmap)
                inv_charmap.append(char)

    all_data = zip(filepaths, transcripts)
    random.seed(123)
    random.shuffle(all_data)
    train_data = all_data[2*BATCH_SIZE:]
    test_data  = all_data[:2*BATCH_SIZE]

    return charmap, inv_charmap, train_data, test_data
Example #3
0
def stream_random_bytes(n):
    """Streams n random bytes generated with given seed, at given chunk size per packet."""
    n = min(n, 100 * 1024) # set 100KB limit

    params = CaseInsensitiveDict(request.args.items())
    if 'seed' in params:
        random.seed(int(params['seed']))

    if 'chunk_size' in params:
        chunk_size = max(1, int(params['chunk_size']))
    else:
        chunk_size = 10 * 1024

    def generate_bytes():
        chunks = bytearray()

        for i in xrange(n):
            chunks.append(random.randint(0, 255))
            if len(chunks) == chunk_size:
                yield(bytes(chunks))
                chunks = bytearray()

        if chunks:
            yield(bytes(chunks))

    headers = {'Content-Type': 'application/octet-stream'}

    return Response(generate_bytes(), headers=headers)
Example #4
0
    def checkEvent(self):
        pass
        random.seed()

        event_value = random.uniform(0, 1)
        encounter_chance = 0.03 + BASE_ENEMY_ENCOUNTER_CHANCE * self.danger 
        
        if self.player.hiding: 
            h_event_value = random.uniform(0, 1)
            h_encounter_chance = 0.03 + BASE_HIDE_ENCOUNTER_CHANCE * self.hide_danger 
            if h_event_value <= h_encounter_chance:
                encounter_chance = 1
            else:
                encounter_chance = -1
        
        if STORY_MODE and self.map.stevendorf and not self.map.boss_fight:
            self.map.boss_fight = True
            self.current_enemy = self.enemy_factory.generateEnemy(self.level, boss=self.map.stevendorf or self.encounter_sdorf, dorfweap=14)
            self.runEvent()

        elif event_value <= encounter_chance and not self.invuln_turns:
            # spawn an enemy TODO generator
            self.current_enemy = self.enemy_factory.get_next_enemy()
            self.runEvent()
            self.current_enemy = None

        if self.invuln_turns: self.invuln_turns -= 1
        self.player.hiding = False
Example #5
0
    def __init__(self):
        global ARGS, CONFIG, CONFIG_DIR
        parser = argparse.ArgumentParser(description=(
            "osquery python integration testing."
        ))
        parser.add_argument(
            "--config", metavar="FILE", default=None,
            help="Use special options from a config."
        )
        parser.add_argument(
            "--verbose", default=False, action="store_true",
            help="Run daemons and extensions with --verbose"
        )

        # Directory structure options
        parser.add_argument(
            "--build", metavar="PATH", default=".",
            help="Path to osquery build (./build/<sys>/)."
        )
        ARGS = parser.parse_args()

        if not os.path.exists(ARGS.build):
            print("Cannot find --build: %s" % ARGS.build)
            print("You must first run: make")
            exit(1)

        # Write config
        random.seed(time.time())

        utils.reset_dir(CONFIG_DIR)
        CONFIG = read_config(ARGS.config) if ARGS.config else DEFAULT_CONFIG
Example #6
0
def test_random_addition_and_slicing():
    seed = random.randrange(10000)
    print seed
    random.seed(seed)
    st = "abc"
    curr = LiteralStringNode(st)
    last = None
    all = []
    for i in range(1000):
        a = (chr(random.randrange(ord('a'), ord('z') + 1)) *
                random.randrange(500))
        last = curr
        all.append(curr)
        c = random.choice([0, 1, 2])
        if c == 0:
            curr = curr + LiteralStringNode(a)
            st = st + a
        elif c == 1:
            curr = LiteralStringNode(a) + curr
            st = a + st
        else:
            if len(st) < 10:
                continue
            # get a significant portion of the string
            #import pdb; pdb.set_trace()
            start = random.randrange(len(st) // 3)
            stop = random.randrange(len(st) // 3 * 2, len(st))
            curr = getslice_one(curr, start, stop)
            st = st[start: stop]
        assert curr.flatten_string() == st
    curr = curr.rebalance()
    assert curr.flatten_string() == st
Example #7
0
    def setup(self, bottom, top):
        # config
        params = eval(self.param_str)
        self.data_root_dir = params['data_root_dir']
        self.mean = np.array((104.00699, 116.66877, 122.67892), dtype=np.float32)
        self.random = params.get('randomize', True)
        self.seed = params.get('seed', None)

        # two tops: data and label
        if len(top) != 2:
            raise Exception("Need to define two tops: data, and label.")
        # data layers have no bottoms
        if len(bottom) != 0:
            raise Exception("Do not define a bottom.")

        # load indices for images and labels
        split_f  = '{}/train_pairs.txt'.format(self.data_root_dir)
        self.indices = open(split_f, 'r').read().splitlines()
        self.idx = 0

        # make eval deterministic
        #if 'train' not in self.split:
        #    self.random = False

        # randomization: seed and pick
        if self.random:
            random.seed(self.seed)
            self.idx = random.randint(0, len(self.indices)-1)
Example #8
0
    def test_translational_alignment(self):
        """ Test the translational alignment in 2D routine """
        random.seed()
        name=self.get_input_file_name("1z5s-projection-2.spi")
        srw = IMP.em2d.SpiderImageReaderWriter()
        image=IMP.em2d.Image()
        image.read(name,srw)
        translated=IMP.em2d.Image()
        # random translation
        trans=IMP.algebra.Vector2D(random.random()*10,random.random()*10)
        transformation = IMP.algebra.Transformation2D(trans)
        IMP.em2d.get_transformed(image.get_data(),translated.get_data(),
                                 transformation)
        fn_translated = self.get_input_file_name("translated.spi")
#        translated.write(fn_translated,srw)
        result=IMP.em2d.get_translational_alignment(
                image.get_data(),translated.get_data(),True)
        fn_aligned = self.get_input_file_name("trans_aligned.spi")
 #       translated.write(fn_aligned,srw)
        # -1 to get the translation applied to reference.
        # Result contains the translation required for align the second matrix
        determined_trans= (-1)*result[0].get_translation()
        # Tolerate 1 pixel error
        self.assertAlmostEqual(abs(determined_trans[0]-trans[0]),0, delta=1,
                msg="1st coordinate is incorrect: Applied %f Determined %f" \
                    % (trans[0], determined_trans[0]))
        self.assertAlmostEqual(abs(determined_trans[1]-trans[1]),0, delta=1,
                msg="2nd coordinate is incorrect: Applied %f Determined %f" \
                    % (trans[1], determined_trans[1]))
Example #9
0
    def test_complete_alignment(self):
        """ Test the complete alignment in 2D routine (new) """
        random.seed()
        name=self.get_input_file_name("1z5s-projection-2.spi")
        srw = IMP.em2d.SpiderImageReaderWriter()
        image=IMP.em2d.Image()
        image.read(name,srw)
        transformed=IMP.em2d.Image()

        rot=IMP.algebra.Rotation2D(random.random()*2*pi)
        trans=IMP.algebra.Vector2D(random.random()*10,random.random()*10)

        T=IMP.algebra.Transformation2D(rot,trans)
        IMP.em2d.get_transformed(image.get_data(),transformed.get_data(),T)
        fn_transformed = self.get_input_file_name("transformed.spi")
#       transformed.write(fn_transformed,srw)

        result=IMP.em2d.get_complete_alignment(image.get_data(),
                                         transformed.get_data(),True)
        fn_aligned = self.get_input_file_name("aligned_complete.spi")
#       transformed.write(fn_aligned,srw)
        cross_correlation_coefficient = result.second
        # Tolerate 1 pixel error
        self.assertAlmostEqual(cross_correlation_coefficient,1, delta=0.03,
              msg="Error in the complete aligment routine,"
                  "ccc %f less than 0.97" % (cross_correlation_coefficient))
Example #10
0
def get_token(request):
    user_id = authenticated_userid(request)
    discussion_id = request.context.get_discussion_id()
    if not user_id:
        raise HTTPUnauthorized()
    req_permissions = request.GET.getall('permission') or [
        P_READ, P_READ_PUBLIC_CIF]
    random_seed = request.GET.get('seed', None)
    if random_seed:
        # We need some determinism
        import random
        random.seed(random_seed)
        random_str = ''.join([chr(random.randint(0,256)) for i in range(8)])
        random.seed(urandom(8))
    else:
        random_str = urandom(8)
    if isinstance(req_permissions, list):
        req_permissions = set(req_permissions)
    else:
        req_permissions = set((req_permissions,))
    permissions = set(get_permissions(user_id, discussion_id))
    if not req_permissions:
        req_permissions = permissions
    else:
        if P_READ in permissions:
            permissions.add(P_READ_PUBLIC_CIF)
        if P_SYSADMIN not in permissions:
            req_permissions = list(req_permissions.intersection(permissions))
    req_permissions = list(req_permissions)
    data = [str(user_id), str(discussion_id)]
    data.extend([str(x) for (x,) in Permission.db.query(Permission.id).filter(
        Permission.name.in_(req_permissions)).all()])
    data = ','.join(data) + '.' + base64.urlsafe_b64encode(random_str)
    return Response(body=data_token(data), content_type="text/text")
Example #11
0
    def test_rotational_alignment(self):
        """ Test the rotational alignment in 2D routine (new) """
        random.seed()
        name=self.get_input_file_name("1z5s-projection-2.spi")
        srw = IMP.em2d.SpiderImageReaderWriter()
        image=IMP.em2d.Image()
        image.read(name,srw)
        rotated=IMP.em2d.Image()
        # random rotation
        angle=random.random()*2*pi
        rot=IMP.algebra.Rotation2D(angle)
        transformation = IMP.algebra.Transformation2D(rot)
        IMP.em2d.get_transformed(image.get_data(),rotated.get_data(),
                                 transformation)

        fn_rotated = self.get_input_file_name("rotated.spi")
#        rotated.write(fn_rotated,srw)
        result=IMP.em2d.get_rotational_alignment(
                image.get_data(),rotated.get_data(),True)
        fn_aligned = self.get_input_file_name("rot_aligned.spi")
 #       rotated.write(fn_aligned,srw)
        determined_angle=result[0].get_rotation().get_angle()
        # approximately 6 degrees tolerance, 0.1 rad.
        x = angle+determined_angle
        modulo = (abs(x % (2*pi)) < 0.1) or (abs(x % (2*pi)-2*pi) < 0.1)
        self.assertEqual(modulo,True,msg="Angles applied %f and "
          "determined %f are different, difference %f" % (angle
                                                      ,determined_angle,x))
Example #12
0
    def _generate_task_id_part(cls, task_id, length):
        # NOTE(stpierre): the first part of the random name is a
        # subset of the task ID
        task_id_part = task_id.replace("-", "")[0:length]

        if len(task_id_part) < length:
            LOG.debug("Task ID %(task_id)s cannot be included in a random "
                      "name because it is too short. Format: %(format)s" %
                      {"task_id": task_id,
                       "format": cls.RESOURCE_NAME_FORMAT})
        elif any(char not in cls.RESOURCE_NAME_ALLOWED_CHARACTERS
                 for char in task_id_part):
            LOG.debug("Task ID %(task_id)s cannot be included in a random "
                      "name because it includes disallowed characters. "
                      "Allowed characters are: %(chars)s" %
                      {"task_id": task_id,
                       "chars": cls.RESOURCE_NAME_ALLOWED_CHARACTERS})
        else:
            return task_id_part

        # NOTE(stpierre): either the task UUID is shorter than the
        # task portion; or the portion of the task ID that we
        # would use contains only characters in
        # resource_name_allowed_characters.
        try:
            # NOTE(stpierre): seed pRNG with task ID so that all random
            # names with the same task ID have the same task ID part
            random.seed(task_id)
            return cls._generate_random_part(length)
        finally:
            random.seed()
Example #13
0
def main(scrape_file, gen_file, min_wait=1.0, max_wait=3.5, **kwargs):
	# seed for waiting
	random.seed()

	if gen_file == scrape_file:
		raise IOError("HEY! Don't use the same file for two things!!!")

	try:
		with open(scrape_file, 'r') as to_scrape:
			if (gen_file):
				genfile = open(gen_file[0], 'w+')
			site_counter = kwargs['site_counter'] # loop break, default=1
			result_number = 1 # counter for filtered set
			for site in to_scrape:
				url = site.rsplit(',')[1].strip()
				url_num = site.rsplit(',')[0].strip()
				# --skip option takes effect here
				if int(url_num) == int(site_counter):
					# get the result, None = failure
					site_result = scrape(url)
					if site_result:
						# record the results
						report_results(url, site_result, gen_file,
							result_number, kwargs['writeout'],
							kwargs['logfile'])
						result_number += 1
					if site_counter >= int(kwargs['site_counter']) +\
					 int(kwargs['max_sites'] - 1):
						break
					# don't want to DoS...
					sleep(random.uniform(min_wait, max_wait))
					site_counter += 1
			
	except IOError, e:
		raise IOError("File " + e.filename + " does not exist!")
Example #14
0
def lorem(randseed=None, count=1, method=None):
    u"""
    Creates Lorem Ipsum text.

    Usage format:

        {% lorem [randseed] [count] [method] %}

    ``randseed`` is any hashable object used to initialize the random numbers generator.
    If ``randseed`` is not given the common "Lorem ipsum dolor sit..." text is used.

    ``count`` is a number of paragraphs or sentences to generate (default is 1).

    ``method`` is either ``p`` for HTML paragraphs enclosed in ``<p>`` tags, or ``b`` for
    plain-text paragraph blocks (default is ``b``).

    Notice: This filter is rewrited ``lorem`` filter from ``webdesign`` modul from default Django
    package ``django.contrib.webdesign``. The original ``lorem`` filter does not give stable random
    text, thus its generated paragraphs change on every page refresh. We stabilize the generated
    text by setting a fixed randseed before generating the paragraph.
    """

    state = random.getstate()
    random.seed(randseed)
    res = paragraphs(count, common=(randseed is None))
    random.setstate(state)

    if method == u'p':
        res = [u'<p>{}</p>'.format(p) for p in res]
    return u'\n'.join(res)
Example #15
0
def main():
    """Main function to initialize databases to analysize Yelp data."""
    import random

    # ------------ Save Yelp Data as Pandas DataFrames to pickle ------------
    # Save all Yelp restaurant data in Arizona (Phoenix area)
    #restaurant_data = read_yelp('business',state=['AZ'],open=[True],categories='restaurants')
    #review_data = read_yelp('review',business_id=restaurant_data.business_id.unique())
    restaurant_data = pd.read_pickle('../data/pandas/business.pkl')
    review_data = pd.read_pickle('../data/pandas/review.pkl')
    result = save2pickle(restaurant_data,review_data)
    result = py2mysql(restaurant_data,review_data)

    # Save information for mexican restaurants only
    restaurant_data = restaurant_data[restaurant_data['categories'].map(lambda x: 'mexican' in [cat.lower() for cat in x])]
    review_data = review_data[review_data['business_id'].isin(restaurant_data.business_id.unique())]
    result = save2pickle(restaurant_data,review_data,append_string='_mexican')
    result = py2mysql(restaurant_data,review_data,append_string='_mexican')

    # Segment some data for training
    random.seed(1234)
    trainids = random.sample(restaurant_data.business_id,20)
    restaurant_data = restaurant_data[restaurant_data['business_id'].isin(trainids)]
    review_data = review_data[review_data['business_id'].isin(trainids)]
    result = save2pickle(restaurant_data,review_data,append_string='_mexican_train')

    # Make database of individual sentences from review data
    sentences = process_text.reviews_to_sentences(review_data)
    sentences = process_text.add_training_label(sentences,review_data)
    sentences.to_pickle('../data/pandas/sentences_mexican.pkl')
    result = sentence2mysql(sentences,review_data,append_string='_mexican')
Example #16
0
    def __init__(self, env):
        super(LearningAgent, self).__init__(env)  # sets self.env = env, state = None, next_waypoint = None, and a default color
        self.color = 'red'  # override color
        self.planner = RoutePlanner(self.env, self)  # simple route planner to get next_waypoint
        # TODO: Initialize any additional variables here
        # Initialize a random seed:
        random.seed(0)
        # the set of valid actions:
        self.valid_actions = [None, 'forward', 'left', 'right']
        # the state of the agent
        self.state = None
        # store the previous state, action and reward.
        self.prev_state = None
        self.prev_action = None
        self.prev_reward = None
        # the Q-table as a dict:
        # Q-table initialization to 0:
        self.q_table = dict()
        for light in ['green','red']:
            for oncoming in [None, 'forward', 'left', 'right']:
                for left in [None, 'forward', 'left', 'right']:
                    for right in [None, 'forward', 'left', 'right']:
                        for waypoint in ['forward', 'left', 'right']:
                            self.q_table[State({'light':light,'oncoming':oncoming,'left':left,'right':right},waypoint)] = {None:0,'forward':0,'left':0,'right':0}

        # parameters:
        self.gamma = 0.1
        self.learning_rate = 0.5
        self.epsilon = 0.1
Example #17
0
def main():
	


	random.seed(RANDOM_SEED)
	for arrival_rate in [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09]:

		host_current_slot = 1 
		host_num_backoffs = 0 
		

		env = simpy.Environment()
		Packet_Delay = StatObject()
		Server_Idle_Periods = StatObject()
		
		Host1 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host2 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host3 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host4 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host5 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host6 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host7 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host8 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host9 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)
		Host10 = server_queue(env, arrival_rate, Packet_Delay, Server_Idle_Periods, host_current_slot, host_num_backoffs, 1)

		result = store_result()

		list_of_host = [Host1,Host2,Host3,Host4,Host5,Host6,Host7,Host8,Host9,Host10]
		sim = simulation(env,list_of_host, result)
		env.process(sim.run_process(env, list_of_host, result))		
		env.run(until=SIM_TIME)

		print ("Lambda Value: %f" % arrival_rate)
		print ("Throughput: %f" % result.get_throughput())
Example #18
0
  def testIrisES(self):
    random.seed(42)

    iris = datasets.load_iris()
    x_train, x_test, y_train, y_test = train_test_split(iris.data,
                                                        iris.target,
                                                        test_size=0.2,
                                                        random_state=42)

    x_train, x_val, y_train, y_val = train_test_split(
        x_train, y_train, test_size=0.2)
    val_monitor = learn.monitors.ValidationMonitor(x_val, y_val,
                                                   early_stopping_rounds=100)

    # classifier without early stopping - overfitting
    classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
                                                n_classes=3,
                                                steps=1000)
    classifier1.fit(x_train, y_train)
    accuracy_score(y_test, classifier1.predict(x_test))

    # classifier with early stopping - improved accuracy on testing set
    classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
                                                n_classes=3,
                                                steps=1000)

    classifier2.fit(x_train, y_train, monitors=[val_monitor])
    accuracy_score(y_test, classifier2.predict(x_test))
Example #19
0
 def setup(self, data):
     'parse initial input and setup starting game state'
     for line in data.split('\n'):
         line = line.strip().lower()
         if len(line) > 0:
             tokens = line.split()
             key = tokens[0]
             if key == 'cols':
                 self.cols = int(tokens[1])
             elif key == 'rows':
                 self.rows = int(tokens[1])
             elif key == 'player_seed':
                 random.seed(int(tokens[1]))
             elif key == 'turntime':
                 self.turntime = int(tokens[1])
             elif key == 'loadtime':
                 self.loadtime = int(tokens[1])
             elif key == 'viewradius2':
                 self.viewradius2 = int(tokens[1])
             elif key == 'attackradius2':
                 self.attackradius2 = int(tokens[1])
             elif key == 'spawnradius2':
                 self.spawnradius2 = int(tokens[1])
             elif key == 'turns':
                 self.turns = int(tokens[1])
     self.map = [[LAND for col in range(self.cols)]
                 for row in range(self.rows)]
Example #20
0
def random_fill_cells(seed, fill_percent):
    global cells
    random.seed(seed)

    for x in range(G_WIDTH):
        for y in range(G_HEIGHT):
            cells[x][y] = 1 if random.random() < fill_percent else 0
Example #21
0
  def setUp(self) :
    random.seed(123)

    self.ids_str = iter([('1', '2'), ('2', '3'), ('4', '5'), ('6', '7'), ('8','9')])

    self.records = iter([({'name': 'Margret', 'age': '32'}, {'name': 'Marga', 'age': '33'}), \
                         ({'name': 'Marga', 'age': '33'}, {'name': 'Maria', 'age': '19'}), \
                         ({'name': 'Maria', 'age': '19'}, {'name': 'Monica', 'age': '39'}), \
                         ({'name': 'Monica', 'age': '39'}, {'name': 'Mira', 'age': '47'}), \
                         ({'name': 'Mira', 'age': '47'}, {'name': 'Mona', 'age': '9'}),
                        ])

    self.normalizedAffineGapDistance = dedupe.affinegap.normalizedAffineGapDistance
    self.data_model = {}
    self.data_model['fields'] = dedupe.core.OrderedDict()
    v = {}
    v.update({'Has Missing': False, 'type': 'String', 'comparator': self.normalizedAffineGapDistance, \
              'weight': -1.0302742719650269})
    self.data_model['fields']['name'] = v
    self.data_model['bias'] = 4.76

    score_dtype = [('pairs', 'S1', 2), ('score', 'f4', 1)]
    self.desired_scored_pairs = numpy.array([(['1', '2'], 0.96), (['2', '3'], 0.96), \
                                             (['4', '5'], 0.78), (['6', '7'], 0.72), \
                                             (['8', '9'], 0.84)], dtype=score_dtype)
Example #22
0
    def xtest_005_interp_random_vals(self):
        MAX_TAPS = 9
        MAX_INTERP = 7
        INPUT_LEN = 9

        random.seed(0)    # we want reproducibility

        for ntaps in xrange(1, MAX_TAPS + 1):
            for interp in xrange(1, MAX_INTERP+1):
                for ilen in xrange(ntaps, ntaps + INPUT_LEN):
                    src_data = random_floats(ilen)
                    taps = random_floats(ntaps)
                    expected_result = reference_interp_filter(src_data, interp, taps)

                    tb = gr.top_block()
                    src = gr.vector_source_f(src_data)
                    op = gr.rational_resampler_base_fff(interp, 1, taps)
                    dst = gr.vector_sink_f()
                    tb.connect(src, op, dst)
                    tb.run()
                    tb = None
                    result_data = dst.data()
                    L1 = len(result_data)
                    L2 = len(expected_result)
                    L = min(L1, L2)
                    #if True or abs(L1-L2) > 1:
                    if False:
                        sys.stderr.write('delta = %2d: ntaps = %d interp = %d ilen = %d\n' % (L2 - L1, ntaps, interp, ilen))
                        #sys.stderr.write('  len(result_data) = %d  len(expected_result) = %d\n' %
                        #                 (len(result_data), len(expected_result)))
                    #self.assertEqual(expected_result[0:L], result_data[0:L])
                    # FIXME check first ntaps+1 answers
                    self.assertEqual(expected_result[ntaps+1:L], result_data[ntaps+1:L])
Example #23
0
def testPrimes(mag=12):
  import random
  random.seed()
  minNum = 10**((mag-1)/2)
  maxNum = 10**((mag)/ 2)
  num1, num2, numProd = (random.randint(minNum, maxNum) for n in range(3))
  num1 *= numProd
  num2 *= numProd
  factors = list(genPrimeFactors(num1))
  factorStr = ' '.join('%d' % f for f in factors)
  if num1 == prod(factors):
    # it worked!
    print('Prime factors of %d are %s' % (num1, factorStr))
    divisorStr = ' '.join('%d' % d for d in sorted(getDivisors(num1)))
    print('Divisors of %d are %s' % (num1, divisorStr))
  else:
    print('Failure. Incorrectly reports prime factors of %d are %s'
      % (num1, factorStr))
  
  gcd = getGreatestCommonDivisor(num1, num2)
  lcm = getLeastCommonMultiple(num1, num2)
  if num1 * num2 / gcd == lcm:
    print('Greatest common divisor of %d and %d is %d' % (num1, num2, gcd))
    print('Least common multiple of %d and %d is %d' % (num1, num2, lcm))
  else:
    print('Failure: found greatest common divisor of %d and %d is %d'
          % (num1, num2, gcd))
    print('Failure: found least common multiple of %d and %d is %d'
          % (num1, num2, lcm))
    factors = list(genPrimeFactors(num2))
    factorStr = ' '.join('%d' % f for f in factors)
    print('Prime factors of %d are %s' % (num2, factorStr))
Example #24
0
def                     Generate():
  random.seed()

  code = "".join([ random.choice(string.ascii_letters + string.punctuation)
                   for x in range(128) ])

  return code
def test_arb():
    random.seed(42)
    s = SetlxSet([42])

    result = stlx_arb(s)
    eq_(result, 42)
    eq_(s, SetlxSet([42]))
Example #26
0
def _define_forest (ns):
  random.seed(random_seed if isinstance(random_seed,int) else None);
  if not mssel.msname:
    raise RuntimeError,"MS not set up in compile-time options";
  if run_purr:
    print mssel.msname;
    import os.path
    purrlog = os.path.normpath(mssel.msname)+".purrlog";
    Timba.TDL.GUI.purr(purrlog,[mssel.msname,'.']);
  # setup contexts properly
  array,observation = mssel.setup_observation_context(ns);

  # setup imaging options (now that we have an imaging size set up)
  imsel = mssel.imaging_selector(npix=512,arcmin=meqmaker.estimate_image_size());
  TDLRuntimeMenu("Imaging options",*imsel.option_list());

  # reading in model?
  if read_ms_model:
    model_spigots = array.spigots(column="PREDICT",corr=mssel.get_corr_index());
    meqmaker.make_per_ifr_bookmarks(model_spigots,"UV-model visibilities");
  else:
    model_spigots = None;

  # get a predict tree from the MeqMaker
  output = meqmaker.make_predict_tree(ns,uvdata=model_spigots);

  # throw in a bit of noise
  if noise_stddev:
    noisedef = Meq.GaussNoise(stddev=noise_stddev,dims=[2,2],complex=True)
    for p,q in array.ifrs():
      ns.noisy_predict(p,q) << output(p,q) + ( ns.noise(p,q)<<noisedef );
    output = ns.noisy_predict;

  # in add or subtract sim mode, make some spigots and add/subtract visibilities
  if sim_mode == ADD_MS:
    spigots = array.spigots(corr=mssel.get_corr_index());
    for p,q in array.ifrs():
      ns.sum(p,q) << output(p,q) + spigots(p,q);
    output = ns.sum;
  elif sim_mode == SUB_MS:
    spigots = array.spigots(corr=mssel.get_corr_index());
    for p,q in array.ifrs():
      ns.diff(p,q) << spigots(p,q) - output(p,q);
    output = ns.diff;
  else:
    spigots = False;

  meqmaker.make_per_ifr_bookmarks(output,"Output visibilities");

  # make sinks and vdm.
  # The list of inspectors comes in handy here
  Meow.StdTrees.make_sinks(ns,output,spigots=spigots,post=meqmaker.get_inspectors(),corr_index=mssel.get_corr_index());

  # very important -- insert meqmaker's options properly
  TDLRuntimeOptions(*meqmaker.runtime_options());

  TDLRuntimeJob(_tdl_job_1_simulate_MS,"Run simulation",job_id="simulate");

  # close the meqmaker. This produces annotations, etc.
  meqmaker.close();
def bootstrapRealization(genTable, pathOutput, realization): #Input is table to give Kriging
  import random
  lines = []
  for jj in genTable:
    lines.append(jj)
  # 
  #Shuffling
  #
  newList = []
  for jj in numpy.arange(len(lines)):
    random.seed()
    select = choice(lines)
    # To avoid duplicates, if the line already exists, the positions RA and Dec are  
    # offset by a random value in the range -0.5<D<0.5 arcsec.
    if select in numpy.array(newList):
      select[0] += random.random()-0.5
      select[1] += random.random()-0.5
    #
    if len(select) == 4:
      newList.append([select[0],select[1],select[2],select[3]])
    else:
      newList.append([select[0],select[1],select[2]])
#
  newList = numpy.array(newList)
# Save in dir
  if not(os.path.exists(pathOutput+'/MC'+str(realization))):
    os.mkdir(pathOutput+'/MC'+str(realization))
# Savetxt file
  listTmp = []
  for jj in newList:
    listTmp.append('\t'.join(map(str, jj))) #Join elements of the same line
  fileTMP = open(pathOutput+'/MC'+str(realization)+'/realization_'+str(int(realization))+'_Points.txt', 'wb')
  fileTMP.write("\n".join(listTmp))
  fileTMP.close()
  return True
Example #28
0
    def xtest_004_decim_random_vals(self):
        MAX_TAPS = 9
        MAX_DECIM = 7
        OUTPUT_LEN = 9

        random.seed(0)    # we want reproducibility

        for ntaps in xrange(1, MAX_TAPS + 1):
            for decim in xrange(1, MAX_DECIM+1):
                for ilen in xrange(ntaps + decim, ntaps + OUTPUT_LEN*decim):
                    src_data = random_floats(ilen)
                    taps = random_floats(ntaps)
                    expected_result = reference_dec_filter(src_data, decim, taps)

                    tb = gr.top_block()
                    src = gr.vector_source_f(src_data)
                    op = gr.rational_resampler_base_fff(1, decim, taps)
                    dst = gr.vector_sink_f()
                    tb.connect(src, op, dst)
                    tb.run()
                    tb = None
                    result_data = dst.data()
                    L1 = len(result_data)
                    L2 = len(expected_result)
                    L = min(L1, L2)
                    if False:
                        sys.stderr.write('delta = %2d: ntaps = %d decim = %d ilen = %d\n' % (L2 - L1, ntaps, decim, ilen))
                        sys.stderr.write('  len(result_data) = %d  len(expected_result) = %d\n' %
                                         (len(result_data), len(expected_result)))
                    self.assertEqual(expected_result[0:L], result_data[0:L])
  def generateSequenceSet(self, numSequenceGroups, sequenceLength, seed):
    sequences = []
    random.seed(seed)
    symbolPool = range(self.symbolPoolSize)

    for i in range(numSequenceGroups):
      shuffledPool = copy.copy(symbolPool)
      random.shuffle(shuffledPool)
      startElement1 = [shuffledPool[0]]
      startElement2 = [shuffledPool[1]]
      endElement1 = [shuffledPool[2]]
      endElement2 = [shuffledPool[3]]
      sequenceElements = shuffledPool[4:(4+sequenceLength-2)]

      sharedSubsequence1 = copy.copy(sequenceElements)
      sharedSubsequence2 = copy.copy(sequenceElements)
      while sharedSubsequence1 == sharedSubsequence2:
        random.shuffle(sharedSubsequence1)
        random.shuffle(sharedSubsequence2)

      sequences.append(startElement1+sharedSubsequence1+endElement1)
      sequences.append(startElement2+sharedSubsequence1+endElement2)
      # sequences.append(startElement1+sharedSubsequence2+endElement2)
      # sequences.append(startElement2+sharedSubsequence2+endElement1)
    return sequences
Example #30
0
def _serve_one(s, listener, alive_r, handlers):
    # close unnecessary stuff and reset signal handlers
    listener.close()
    os.close(alive_r)
    for sig, val in handlers.items():
        signal.signal(sig, val)

    # receive fds from parent process
    fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
    s.close()
    assert len(fds) <= MAXFDS_TO_SEND
    (child_r, child_w, _forkserver._forkserver_alive_fd,
     stfd, *_forkserver._inherited_fds) = fds
    semaphore_tracker._semaphore_tracker._fd = stfd

    # send pid to client processes
    write_unsigned(child_w, os.getpid())

    # reseed random number generator
    if 'random' in sys.modules:
        import random
        random.seed()

    # run process object received over pipe
    code = spawn._main(child_r)

    # write the exit code to the pipe
    write_unsigned(child_w, code)
Example #31
0
def Vulscaner(access, filters, preferences = [], output = 'text'):
	print output
	global table
	if not os.path.isfile(access):
		print "error: the log file doesn't exist"
		return
	if not os.path.isfile(filters):
		print "error: the filters file (XML) doesn't exist"
		print "please download the filter"
		return
	if output not in ('html', 'text', 'xml'):
		print "error: the output format '%s' hasn't been recognized" % output
		return
	
	xml_filters = parse(filters)
	len_filters = len(xml_filters)
	if len_filters < 1:
		return
	
	regs = {} 

	print "Loading XML file '%s'..." % filters
	for group in xml_filters:
		for f in xml_filters[group]:
			if f == 'filter':
				if type(xml_filters[group][f]) == type([]):
					for elmt in xml_filters[group][f]:
						rule, impact, description, tags = "",-1,"",[]
						if 'impact' in elmt:
							impact = get_value(elmt['impact'], -1)
						if 'rule' in elmt:
							rule = get_value(elmt['rule'], "")
						if 'description' in elmt:
							description = get_value(elmt['description'], "")
						if 'tags' in elmt and 'tag' in elmt['tags']:
							if type(elmt['tags']['tag']) == type([]):
								for tag in elmt['tags']['tag']:
									tags.append(get_value(tag, ""))
							else:
								tags.append(get_value(elmt['tags']['tag'], ""))
						
						for t in tags:
							compiled = None
							if t not in regs:
								regs[t] = []
							try:
								compiled = re.compile(rule)
							except Exception:
								print "The rule '%s' cannot be compiled properly" % rule
								return
							_hash = hash(rule)
							if impact > -1:
								table[_hash] = (compiled, impact, description, rule, _hash)
								regs[t].append(_hash)
	if len(preferences['attack_type']) < 1:
		preferences['attack_type'] = regs.keys()
	flag = {} 

	print "Processing the file '%s'..." % access

	sample, sampled_lines = False, []
	if preferences['sample'] != float(100):
		
		sample = True
		total_nb_lines = sum(1 for line in open(access))
		
		random.seed(time.clock())
		sampled_lines = random.sample(range(total_nb_lines), int(float(total_nb_lines) * preferences['sample'] / float(100)))
		sampled_lines.sort()

	loc, lines, nb_lines = 0, 0, 0
	old_diff = 0
	start = time.time()
	diff = []
	with open(access) as log_file:
		for line in log_file:
			lines += 1
			if sample and lines not in sampled_lines:
				continue
			if c_reg.match(line):
				out = c_reg.search(line)
				ip = out.group(1)
				name  = out.group(2)
				date = out.group(3)
				ext  = out.group(4)
				method = out.group(5)
				url = out.group(6)
				response = out.group(7)
				byte = out.group(8)
				referrer = out.group(9)
				agent = out.group(10)

				if not correct_period(date, preferences['period']):
					continue
				loc += 1
				if len(url) > 1 and method in ('GET','POST','HEAD','PUT','PUSH','OPTIONS'):
					analyzer([(ip,name,date,ext,method,url,response,byte,referrer,agent),regs,flag, preferences, line])
			elif preferences['except']:
				diff.append(line)

			if nb_lines > 0 and lines > nb_lines:
				break

	tt = time.time() - start
	n = 0
	for t in flag:
		for i in flag[t]:
			n += len(flag[t][i])
	print "Vulscan results:"
	print "\tProcessed %d lines over %d" % (loc,lines)
	print "\tFound %d attack patterns in %f s" % (n,tt)

	short_name = access[access.rfind(os.sep)+1:]


	hcmp = filecmp.cmp('/etc/hosts', './template_files/hosts_temp', True)
	nscmp = filecmp.cmp('/etc/nsswitch.conf', './template_files/nsswitch_temp', True)
	rcomp = filecmp.cmp('/etc/resolv.conf', './template_files/resolv_temp', True) 

	print hcmp
	print nscmp
	print rcomp

	vr = 0
	if hcmp == False:
		vr = vr + 0.34
	if nscmp == False:
		vr = vr + 0.33
	if rcomp == False:
		vr = vr + 0.33

	#if n > 0:
	print "Generating output in %s%s%s_Vulscan_*" % (preferences['odir'],os.sep,short_name)
	if 'html' in preferences['output']:
		generate_html_file(flag, short_name, filters, preferences['odir'], vr)
	elif 'text' in preferences['output']:
		generate_text_file(flag, short_name, filters, preferences['odir'])
	elif 'xml' in preferences['output']:
		generate_xml_file(flag, short_name, filters, preferences['odir'])

	if len(diff) > 0:
		o_except = open(os.path.abspath(preferences['odir'] + os.sep + "Vulscan_except.txt"), "w")
		for l in diff:
			o_except.write(l + '\n')
		o_except.close()
Example #32
0
 def random_bool(seed=None):
     random.seed(seed)
     return bool(random.getrandbits(1))
Example #33
0
def CreateTrainingData(dataset, numClassifiers=2):
    seed(2)
    ##Create a list for all IN IDs and one for all EX IDs
    AllIDs_IN = list()
    AllIDs_EX = list()
    ##ADD the IDs for all  input articles to their respective list.
    for pos in range(0, len(dataset.Training_articles)):
        if dataset.Training_articles[pos] == 1:
            AllIDs_IN.append(dataset.Training_articles_IDs[pos])
        else:
            AllIDs_EX.append(dataset.Training_articles_IDs[pos])

    ##Randomise the include and exclude ID lists.
    random.shuffle(AllIDs_IN)
    random.shuffle(AllIDs_EX)

    ##Print the lists to check them.
    #for pos in range(0,len(FibratesData.Target)):
    #    print(FibratesData.Target[pos])
    #    print(FibratesData.IDs[pos])
    #print(AllIDs_IN)
    #print(AllIDs_EX)

    ##Truncate both lists to be half the length of the specified training set size.
    #totalAbstracts=len(dataset.Target)
    #print("Total abstracts to classify: ",totalAbstracts)
    #print("Proportion to use to train: ",trainProp)

    ##divide the trainProp by 2 to calculate the max number of each class (IN and EX).
    #maxAbstracts=int(totalAbstracts*trainProp/2)
    ##Truncate both lists if they are above that length
    #if len(AllIDs_IN)>maxAbstracts:
    #    del AllIDs_IN[maxAbstracts:]
    #if len(AllIDs_EX)>maxAbstracts:
    #    del AllIDs_EX[maxAbstracts:]

    ##Define the number of abstracts in the smaller class.
    #numInSmallerClass=0
    #INexcess=len(AllIDs_IN)-len(AllIDs_EX)
    #if INexcess >=0:
    #    numInSmallerClass=len(AllIDs_EX)
    #if INexcess <0:
    #    numInSmallerClass=len(AllIDs_IN)

    ##Check there are enough abstracts in the smaller class for at least one
    ## Per classifier.
    #if numInSmallerClass<numClassifiers:
    #    print("\nNot enough abstracts in minority class to train",numClassifiers,"classifiers.")
    #    print("Reducing the number of classifiers to" ,numInSmallerClass,".\n")
    #    numClassifiers=numInSmallerClass

    ##Truncated the longer list so that it is the same length as the IN list.
    #if len(AllIDs_EX)>len(AllIDs_IN):
    #    AllIDs_EX=AllIDs_EX[:len(AllIDs_IN)]
    #if len(AllIDs_IN)>len(AllIDs_EX):
    #    AllIDs_IN=AllIDs_IN[:len(AllIDs_EX)]

    ##Define how many pairs of INEX abstract pairs will be fed for training into each
    ## classifier.
    print("numClassifiers: ", numClassifiers)
    absPerClassifier = len(AllIDs_IN) // numClassifiers
    print("absPerClassifier: ", absPerClassifier)
    print("Total abstracts used for training: ",
          (numClassifiers * absPerClassifier))

    ##create include ID list sublists of the correct length to yield the same number
    ##of sublists are there are desired classifiers.
    INIDs_rand = list()
    EXIDs_rand = list()
    for i in range(0, len(AllIDs_IN), absPerClassifier):
        INIDs_rand.append(AllIDs_IN[i:i + absPerClassifier])
        EXIDs_rand.append(AllIDs_EX[i:i + absPerClassifier])

    #print("shuffled, paired INEX training lists")
    #print(INIDs_rand)
    #print(EXIDs_rand)

    ## Remove the last sublist if it is not full.
    #if len(INIDs_rand)>numClassifiers:
    #    del INIDs_rand[-1]
    #    del EXIDs_rand[-1]

    #print("shuffled, paired INEX training lists")
    #print(INIDs_rand)
    #print(EXIDs_rand)

    ## Merge IN and EX ID lists (the assignment will be looked up from the ID later).
    IDgroups = list()
    for pos in range(0, len(INIDs_rand)):
        IDgroups.append(INIDs_rand[pos] + EXIDs_rand[pos])

    ## Check the number of training groups and reduce down the the requested number
    ## of classifiers. This is because each group will have an even number of IDs
    ## (to match IN and EX numbers), and all groups have to have the same number.
    ## Therefore at this point there might be a higher number of groups than requested.
    ## e.g. If there are 50 abstracts, 25 IN and 25 EX, and 10 classifiers are requested,
    ## This will mean that training abstracts will be in groups of 4, which will give 12
    ## groups in total. The next hightest group number would be 6, but that would only give
    ## 8 groups, which is fewer than requested.
    ##return ID lists.
    while len(IDgroups) > numClassifiers:
        del IDgroups[-1]

    ## Return the list of ID number groups.
    return (IDgroups)
Example #34
0
def PickTrainingArticles(articles, numClassifiers=2, trainProp=1):
    seed(1)
    ##define lists contain the IDs of the articles of each class
    INlist = list()
    EXlist = list()
    ##count up the articles for each class.
    for article in articles:
        if article.goldAssignment == "IN":
            INlist.append(article.ID)
        elif article.goldAssignment == "EX":
            EXlist.append(article.ID)
    ##randomise both lists.
    random.shuffle(INlist)
    random.shuffle(EXlist)
    print("number of IN articles in dataset: ", len(INlist))
    print("number of EX articles in dataset: ", len(EXlist))
    ##calculate the length of the smaller class and truncate the larger to be the same.
    print("Correcting class imblance...")
    if len(INlist) < len(EXlist):
        del EXlist[len(INlist):]
    else:
        del INlist[len(EXlist):]
    print("number of IN articles in dataset: ", len(INlist))
    print("number of EX articles in dataset: ", len(EXlist))
    ##Reduce the size of the training lists based on the trainProp argument.
    print(
        "Reducing the size of training sets according to defined proportion..."
    )
    print("Defined proportion is: ", trainProp)
    reduceByTrainPop = int(len(INlist) // (1 / trainProp))
    del EXlist[reduceByTrainPop:]
    del INlist[reduceByTrainPop:]
    print("number of IN articles in dataset: ", len(INlist))
    print("number of EX articles in dataset: ", len(EXlist))
    ##Reduce the size of the training lists if they sum to be greater than the
    ##maxTrainingAbstracts.
    print(
        "Reducing the size of the training sets to lie below the absolute maximum."
    )
    print("Defined maximum number is: ", maxTrainingAbstracts)
    print("Current number is: ", (len(INlist) + len(EXlist)))
    if len(INlist) > (maxTrainingAbstracts / 2):
        print("Reducing size of training set...")
        del INlist[int(maxTrainingAbstracts / 2):]
        del EXlist[int(maxTrainingAbstracts / 2):]
    print("number of IN articles in dataset: ", len(INlist))
    print("number of EX articles in dataset: ", len(EXlist))
    ##Reduce the size of the training lists so that they can be evenly split by
    ##the number of classifiers.
    print(
        "Reducing the size of training sets to match classifier training set size."
    )
    print("Defined number of classifiers is: ", numClassifiers)
    reduceByClassifierNum = int(len(INlist) % (numClassifiers))
    del EXlist[len(EXlist) - reduceByClassifierNum:]
    del INlist[len(INlist) - reduceByClassifierNum:]
    print("number of IN articles in dataset: ", len(INlist))
    print("number of EX articles in dataset: ", len(EXlist))
    ##Flag each of the training articles that are left as for training.
    for id in EXlist:
        for article in articles:
            if article.ID == id:
                article.SVMtrainEX = True
    for id in INlist:
        for article in articles:
            if article.ID == id:
                article.SVMtrainIN = True
Example #35
0
##Define how many iterations of the C and gamma parameter optimiser we want.
nestedIterations = 1

## Define punctuations to be removed from abstract text before analysis.
punctuations = "_çìëá•™©!£$€/%^&*+±=()≥[]|0123456789'=.,:;?%<>ÿòñœ~#@-—–{}úøîæ÷ó¬åய§" + '"“”'

##############################################################

# Import necessary packages.
import time
import math
import csv
import random
from random import seed
seed(1)

import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize

from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics

##Record the time that the program started.
startTime = time.time()

##############################################################
Example #36
0
import numpy as np
import tensorflow as tf
import random as rn
import numpy as np

import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)

from keras import backend as K

tf.set_random_seed(1234)

sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import LabelEncoder
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from keras.layers import Input, Dense,LSTM,RepeatVector,GRU,Dropout,Reshape
from keras.layers import*
from keras.models import Model
from keras.models import Sequential
from deap import base, creator, tools, algorithms
from pandas import DataFrame
from pandas import Series
Example #37
0
        #mean = np.mean(future_to_url.result())
        '''
        for future in concurrent.futures.as_completed(future_to_url):
            try:
                data = future.result()
            except Exception as exc:
                data = str(type(exc))
            finally:
                # out response
                out.append(data)

                #print(str(len(out)),end="\r")
        '''


'''
data=np.empty([10000,4])
i=0
random.seed(42)
while i<10000:



    d1=random.randint(1,365)
    d2=random.randint(1,500)
    d3=random.randint(0,100)
    d4=random.randint(1,1000)
    r=requests.get("http://memento.evannai.inf.uc3m.es/age/eci1?days={}&seats={}&demand={}&price={}".format(d1,d2,d3,d4))
    r_float=float(r.text)

    #print(aux_arr)
 def setUp(self):
     seed(42)
Example #39
0
from random import randint
from random import seed
from queue import PriorityQueue

# reading input
inputFile = sys.argv[1]
myInput = open(inputFile, 'r')
allLines = myInput.readlines()  #reads each line to an array

# setting global variables and the random seed
state = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
         '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23')
goal = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
        '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23')
maxNodes = -1
seed(98765)


# function to rotate in a given direction
# instead of moving just one tile we have to rotate multiple cubies
def move(st, direction):
    if direction == "F":  # rotate front face clockwise
        F = ('6', '7', '8', '0', '1', '2', '9', '10', '11', '3', '4', '5',
             '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
             '23')
        temp = []
        for i in F:
            temp.append(st[int(i)])
        new = tuple(temp)
        return new
    elif direction == "F!":  # rotate front face counterclockwise
# This is necessary to find the main code
import sys
sys.path.insert(0, '../../bomberman')
sys.path.insert(1, '..')

# Import necessary stuff
import random
from game import Game
from monsters.selfpreserving_monster import SelfPreservingMonster

# TODO This is your code!
sys.path.insert(1, '../groupNN')
from testcharacter_astar4 import TestCharacter

# Create the game
random.seed() # TODO Change this if you want different random choices
g = Game.fromfile('map.txt')
g.add_monster(SelfPreservingMonster("aggressive", # name
                                    "A",          # avatar
                                    3, 13,        # position
                                    2             # detection range
))

# TODO Add your character
g.add_character(TestCharacter("me", # name
                              "C",  # avatar
                              0, 0  # position
))

# Run!
g.go()
Example #41
0
 def _select_data(self):
     random.seed(42)
     selected_indices = random.sample(range(0, len(self.data)), 20)
     self.widget._selection = selected_indices
     self.widget.commit()
     return selected_indices
Example #42
0
def replace_magic_states(g: BaseGraph[VT, ET],
                         pick_random: Any = False) -> SumGraph:
    """This function takes in a ZX-diagram in graph-like form 
    (all spiders fused, only Z spiders, only H-edges between spiders),
    and splits it into a sum over smaller diagrams by using the magic
    state decomposition of Bravyi, Smith, and Smolin (2016), PRX 6, 021043.
    """
    g = g.copy(
    )  # We copy here, so that the vertex labels we get will be the same ones if we copy the graph again
    phases = g.phases()

    # First we find 6 T-like spiders
    boundary = []
    internal = []
    gadgets = []
    ranking = dict()
    for v in g.vertices():
        if not phases[v] or phases[v].denominator != 4: continue

        ### begin AK changes ....
        deg = g.vertex_degree(v)
        if g.vertex_degree(v) == 1:
            w = list(g.neighbors(v))[0]
            if g.type(w) == VertexType.Z:
                gadgets.append(v)
                deg = g.vertex_degree(w) - 1

        if any(w in g.inputs or w in g.outputs for w in g.neighbors(v)):
            boundary.append(v)
        else:
            internal.append(v)
        ranking[v] = deg
        ### ... end AK changes

    if len(ranking) >= 6: num_replace = 6
    elif len(ranking) >= 2: num_replace = 2
    elif len(ranking) == 1: num_replace = 1
    else: raise Exception("No magic states to replace")

    if not pick_random:
        candidates = sorted(ranking.keys(),
                            key=lambda v: ranking[v],
                            reverse=True)[:num_replace]
    else:
        if not isinstance(pick_random, bool):
            random.seed(pick_random)
        candidates = random.sample(ranking.keys(), num_replace)

    graphs = []
    if num_replace == 6:
        replace_functions = [
            replace_B60, replace_B66, replace_E6, replace_O6, replace_K6,
            replace_phi1, replace_phi2
        ]
    if num_replace == 2:
        replace_functions = [replace_2_S, replace_2_N]
    if num_replace == 1:
        replace_functions = [replace_1_0, replace_1_1]
    for func in replace_functions:
        h = func(g.copy(), candidates)
        if num_replace == 6: h.scalar.add_float(MAGIC_GLOBAL)
        graphs.append(h)

    return SumGraph(graphs)
Example #43
0
from seq2seq.runner import run
from seq2seq.trainers.monotonic_train import MonotonicTrainer
from utils.arguments import PARSER

# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.basicConfig(format=':%(levelname)s: %(message)s', level=logging.INFO)

if __name__ == '__main__':
    args = PARSER.parse_args()
    args = vars(args)
    logging.info(args)
    seed = args["seed"]

    remove_spaces = True
    np.random.seed(seed)
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

    lang = langcodes[args["lang"]]

    testpath = args["ftest"]

    fr_lang, en_lang, examples = load_vocab_and_examples(vocabfile=args["vocabfile"], aligned_file=args["aligned_file"])
    logging.info(fr_lang.word2index)
    logging.info(en_lang.word2index)

    # ALWAYS READ ALL TEST EXAMPLES
    test = read_examples(fpath=testpath)

    logging.info("input vocab: %d", fr_lang.n_words)
Example #44
0
def get_man_name(user_id: int) -> str:
    random.seed(user_id)
    name = random.choice(('Орзик', 'Девочка', 'Мальчик', 'Человек'))
    random.seed()
    return name
Example #45
0
import numpy as np
import random
import matplotlib.pyplot as plt
from math import *
import scipy.signal as signal
import random
import matplotlib as mplt
#para reprodução do mesmo experimento setando a semente que garante gerar a mesma sequancia para execuçoes diferentes
random.seed(1234)

#função que implementa as equações diferenciais referente ao modelo do tanque quádruplo
def dvTanQua(x, uv1, uv2, t):
    A1,A2,A3,A4 = 32,28,32,28 #cm²
    alpha1,alpha2,alpha3,alpha4 = 0.071,0.071,0.071,0.071
    g = 981#cm²s⁻²
    k1, k2, kc = 3.33,3.33,0.5
    y1,y2 = 0.6, 0.7 #verificar esses valores
    xd = []
    #equação 1
    xd_0 = (-alpha1/A1) * np.sqrt(2*g*x[0]) + (alpha3/A1)*np.sqrt(2*g*x[2]) + ((y1*k1)/A1)*uv1
    xd.append(xd_0*kc)
    #equação 2
    xd_1 = (-alpha2/A2) * np.sqrt(2*g*x[1]) + (alpha4/A2)*np.sqrt(2*g*x[3]) + ((y2*k2)/A2)*uv2
    xd.append(xd_1*kc)
    #Equação 3
    xd_2 = (-alpha3/A3) * np.sqrt(2*g*x[2]) + (((1 - y2)*k2)/A3)*uv2
    xd.append(xd_2)
    #Equação 4
    xd_3 = (-alpha4/A4) * np.sqrt(2*g*x[3]) + (((1 - y1)*k1)/A4)*uv1
    xd.append(xd_3)
chainSyncStrategyStr=args.c
debug=args.v
total_nodes = pnodes
killCount=args.kill_count if args.kill_count > 0 else 1
killSignal=args.kill_sig
killEosInstances= not args.leave_running
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
killAll=args.clean_run
p2pPlugin=args.p2p_plugin

seed=1
testUtils.Utils.Debug=debug
testSuccessful=False

random.seed(seed) # Use a fixed seed for repeatability.
cluster=testUtils.Cluster(walletd=True)
walletMgr=testUtils.WalletMgr(True)

try:
    cluster.setChainStrategy(chainSyncStrategyStr)
    cluster.setWalletMgr(walletMgr)

    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    walletMgr.killall(allInstances=killAll)
    walletMgr.cleanup()

    Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % (
    pnodes, topo, delay, chainSyncStrategyStr))
Example #47
0
def main(args):
    utils.init_distributed_mode(args)
    print("git:\n  {}\n".format(utils.get_sha()))

    if args.frozen_weights is not None:
        assert args.masks, "Frozen training is meant for segmentation only"
    print(args)

    device = torch.device(args.device)

    # fix the seed for reproducibility
    seed = args.seed + utils.get_rank()
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    model, criterion, postprocessors = build_model(args)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('number of params:', n_parameters)

    param_dicts = [
        {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
        {
            "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
            "lr": args.lr_backbone,
        },
    ]
    optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
                                  weight_decay=args.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)

    dataset_train = build_dataset(image_set='train', args=args)
    dataset_val = build_dataset(image_set='val', args=args)

    if args.distributed:
        sampler_train = DistributedSampler(dataset_train)
        sampler_val = DistributedSampler(dataset_val, shuffle=False)
    else:
        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_val = torch.utils.data.SequentialSampler(dataset_val)

    batch_sampler_train = torch.utils.data.BatchSampler(
        sampler_train, args.batch_size, drop_last=True)

    data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
                                   collate_fn=utils.collate_fn, num_workers=args.num_workers)
    data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
                                 drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)

    if args.dataset_file == "coco_panoptic":
        # We also evaluate AP during panoptic training, on original coco DS
        coco_val = datasets.coco.build("val", args)
        base_ds = get_coco_api_from_dataset(coco_val)
    else:
        base_ds = get_coco_api_from_dataset(dataset_val)

    if args.frozen_weights is not None:
        checkpoint = torch.load(args.frozen_weights, map_location='cpu')
        model_without_ddp.detr.load_state_dict(checkpoint['model'])

    output_dir = Path(args.output_dir)
    if args.resume:
        if args.resume.startswith('https'):
            checkpoint = torch.hub.load_state_dict_from_url(
                args.resume, map_location='cpu', check_hash=True)
        else:
            checkpoint = torch.load(args.resume, map_location='cpu')
        
        del checkpoint['model']['class_embed.weight']
        del checkpoint['model']['class_embed.bias']
        del checkpoint['model']['query_embed.weight']
        
        model_without_ddp.load_state_dict(checkpoint['model'])
        if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
            args.start_epoch = checkpoint['epoch'] + 1

    if args.eval:
        test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
                                              data_loader_val, base_ds, device, args.output_dir)
        if args.output_dir:
            utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            sampler_train.set_epoch(epoch)
        train_stats = train_one_epoch(
            model, criterion, data_loader_train, optimizer, device, epoch,
            args.clip_max_norm)
        lr_scheduler.step()
        if args.output_dir:
            checkpoint_paths = [output_dir / 'checkpoint.pth']
            # extra checkpoint before LR drop and every 100 epochs
            if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
                checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
            for checkpoint_path in checkpoint_paths:
                utils.save_on_master({
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'epoch': epoch,
                    'args': args,
                }, checkpoint_path)

        test_stats, coco_evaluator = evaluate(
            model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
        )

        log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
                     **{f'test_{k}': v for k, v in test_stats.items()},
                     'epoch': epoch,
                     'n_parameters': n_parameters}

        if args.output_dir and utils.is_main_process():
            with (output_dir / "log.txt").open("a") as f:
                f.write(json.dumps(log_stats) + "\n")

            # for evaluation logs
            if coco_evaluator is not None:
                (output_dir / 'eval').mkdir(exist_ok=True)
                if "bbox" in coco_evaluator.coco_eval:
                    filenames = ['latest.pth']
                    if epoch % 50 == 0:
                        filenames.append(f'{epoch:03}.pth')
                    for name in filenames:
                        torch.save(coco_evaluator.coco_eval["bbox"].eval,
                                   output_dir / "eval" / name)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Example #48
0
import numpy as np
import random
from os import listdir
from os.path import isfile, join

random.seed(10000)
number_of_datapoints = 10 #set this for output



anomalities = "../OutlierDetector/outliers/"
normal_digits = "../OutlierDetector/goodDigits/"

anom_files = [f for f in listdir(anomalities) if isfile(join(anomalities, f))]
print(anom_files)

normal_files = [f for f in listdir(normal_digits) if isfile(join(anomalities, f))]
print(normal_files)

anom = {}  # dict for storing anoms keys are "1" etc
normal = {}

for a in anom_files:
    anom[a.rstrip(".npy")] = np.load(join(anomalities + a))
for a in normal_files:
    normal[a.rstrip(".npy")] = np.load(join(normal_digits + a))


def generate_random_scenario(machine, sequence):
    result = []
    for m in sequence:
Example #49
0
# Textual Voronoi code modified from: <*****@*****.**>
# http://www.ocf.berkeley.edu/~Eabhishek/

import random # for generateRandomPoints
random.seed(42)
from math import sqrt

def generateRandomPoints(npoints=6):
    """Generate a few random points v1...vn"""
    print npoints, "points x,y:"
    points = []
    for i in xrange(npoints):
        xrand, yrand = random.random(), random.random()
        print '%.2f' % xrand, '%.2f' % yrand
        for xoff in range(-1, 2):
            for yoff in range(-1, 2):
                points.append( (xrand + xoff, yrand + yoff) )
    return points


def closest(x,y,points):
    """Function to find the closest of the vi."""
    best,good = 99.0*99.0, 99.0*99.0
    for px, py in points:
        dist = (x-px)*(x-px) + (y-py)*(y-py)
        if dist < best:
            best, good = dist, best
        elif dist < good:
            good = dist
    return sqrt(best) / sqrt(good)
Example #50
0
try:
    import pymongo as mg
except ImportError as err:
    print(err)
    exit()

maxSevSelDelay = 2
try:
    myclient = mg.MongoClient("mongodb://localhost:27017/", serverSelectionTimeoutMS=maxSevSelDelay)
except mg.errors.ServerSelectionTimeoutError as err:
    print(err)
    exit()

mydb = myclient["battleship"]
random.seed()

used_random_points = list()
used_points = list()


class Computer:
    def __init__(self):
        self.ships = list()
        self.hunt_mode = False
        self.player_ships = list()
        used_points.clear()
        used_random_points.clear()

    def place_ship(self, length):
        temp_list = list()
Example #51
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jun  7 09:20:40 2019

@author: lei
"""

import math
import pickle
import random

random.seed(0)


def rand(a, b):
    return (b - a) * random.random() + a


def make_matrix(m, n, fill=0.0):
    mat = []
    for i in range(m):
        mat.append([fill] * n)
    return mat


def sigmoid(x):
    return 1.0 / (1.0 + math.exp(-x))


def sigmoid_derivative(x):
    return x * (1 - x)
Example #52
0
def set_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
Example #53
0
    def fit(self, X_train=None, Y_train=None, X_test=None, Y_test=None, dataset_train=None, dataset_val=None, time_limit=None, **kwargs):
        start_time = time.time()
        params = self.params.copy()
        # TODO: kwargs can have num_cpu, num_gpu. Currently these are ignored.
        verbosity = kwargs.get('verbosity', 2)
        params = fixedvals_from_searchspaces(params)
        if 'min_data_in_leaf' in params:
            if params['min_data_in_leaf'] > X_train.shape[0]: # TODO: may not be necessary
                params['min_data_in_leaf'] = max(1, int(X_train.shape[0]/5.0))

        num_boost_round = params.pop('num_boost_round', 1000)
        logger.log(15, 'Training Gradient Boosting Model for %s rounds...' % num_boost_round)
        logger.log(15, "with the following hyperparameter settings:")
        logger.log(15, params)
        seed_val = params.pop('seed_value', None)

        eval_metric = self.get_eval_metric()
        dataset_train, dataset_val = self.generate_datasets(X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test, dataset_train=dataset_train, dataset_val=dataset_val)
        gc.collect()
        
        if verbosity <= 1:
            verbose_eval = False
        elif verbosity == 2:
            verbose_eval = 1000
        elif verbosity == 3:
            verbose_eval = 50
        else:
            verbose_eval = True
        
        self.eval_results = {}
        callbacks = []
        valid_names = ['train_set']
        valid_sets = [dataset_train]
        if dataset_val is not None:
            callbacks += [
                early_stopping_custom(150, metrics_to_use=[('valid_set', self.eval_metric_name)], max_diff=None, start_time=start_time, time_limit=time_limit,
                                      ignore_dart_warning=True, verbose=verbose_eval, manual_stop_file=False),
                ]
            valid_names = ['valid_set'] + valid_names
            valid_sets = [dataset_val] + valid_sets

        try_import_lightgbm()
        import lightgbm as lgb
        callbacks += [
            # lgb.reset_parameter(learning_rate=lambda iter: alpha * (0.999 ** iter)),
        ]
        # lr_over_time = lambda iter: 0.05 * (0.99 ** iter)
        # alpha = 0.1
        
        train_params = {
            'params': params.copy(),
            'train_set': dataset_train,
            'num_boost_round': num_boost_round,
            'valid_sets': valid_sets,
            'valid_names': valid_names,
            'callbacks': callbacks,
            'verbose_eval': verbose_eval,
        }
        if type(eval_metric) != str:
            train_params['feval'] = eval_metric
        if seed_val is not None:
            train_params['params']['seed'] = seed_val
            random.seed(seed_val)
            np.random.seed(seed_val)
        # Train lgbm model:
        self.model = lgb.train(**train_params)
        # del dataset_train
        # del dataset_val
        # print('running gc...')
        # gc.collect()
        # print('ran garbage collection...')
        self.best_iteration = self.model.best_iteration
        params['num_boost_round'] = num_boost_round
        if seed_val is not None:
            params['seed_value'] = seed_val
Example #54
0
                       north=0,
                       south=0,
                       west=0,
                       east=0)
    return paths_nb


try:
    for_seed, density, height, width = (abs(
        int(i)) for i in input('Enter four integers: ').split())
except ValueError:
    print('Incorrect input, giving up.')
    sys.exit()
if not density:
    density = 1
seed(for_seed)
grid = np.array([randrange(density) > 0
                 for _ in range(height * width)]).reshape((height, width))
print('Here is the grid that has been generated:')
display_grid()
try:
    i1, j1, i2, j2 = (int(i) for i in input('Enter four integers: ').split())
    pt_1 = Point(i1, j1)
    pt_2 = Point(i2, j2)
    if not valid(pt_1) or not valid(pt_2):
        raise ValueError
except ValueError:
    print('Incorrect input, giving up.')
    sys.exit()
print('Will compute the number of good paths '
      f'from ({pt_1.x}, {pt_1.y}) to ({pt_2.x}, {pt_2.y})...')
TRAIN_FILE_NAME = '~/Kaggle/RLI/input/train.json'
TEST_FILE_NAME = '~/Kaggle/RLI/input/test.json'
target_num_map = {'high': 0, 'medium': 1, 'low': 2}
train_data = pd.read_json(TRAIN_FILE_NAME).reset_index()
test_data = pd.read_json(TEST_FILE_NAME).reset_index()
list_img_time = pd.read_csv("~/Kaggle/RLI/input/listing_image_time.csv")
train_data = train_data.merge(list_img_time,
                              left_on="listing_id",
                              right_on="Listing_Id",
                              how='inner')
test_data = test_data.merge(list_img_time,
                            left_on="listing_id",
                            right_on="Listing_Id",
                            how='inner')
RS = 2016
random.seed(RS)
np.random.seed(RS)
# RS = 0


def validation_score(early_stop=False):
    clf = xgboostClassifier(
        objective='multi:softprob',
        eval_metric='mlogloss',
        num_class=3,
        nthread=3,
        eta=0.04,
        max_depth=6,
        subsample=0.7,
        colsample_bytree=1.0,
        colsample_bylevel=0.7,
import random
import pickle
import numpy as np


def stop_condition_1(domain, error=0.05):
    N = 0
    cond = (2 * domain.d_factor ** N * domain.Br) / (1 - domain.d_factor) ** 2
    while cond > error:
        N += 1
        cond = (2 * domain.d_factor ** N * domain.Br) / (1 - domain.d_factor) ** 2
    return N


if __name__ == "__main__":
    random.seed(2)
    domain = Domain()
    agent = Agent(domain)
    policy = agent.RANDOM_POLICY
    # print("Generating the set of four tuples ...")
    # four_tuples_set = agent.generate_four_tuples(policy)
    # #four_tuples_set = agent.generate_four_tuples2(policy)
    # print("Generation of the set completed.")
    # print("Compute statistics over the set...")
    # positiveRewards = 0
    # negativeRewards = 0
    # for t in four_tuples_set:
    #     if t[2] == 1:
    #         positiveRewards += 1
    #     elif t[2] == -1:
    #         negativeRewards += 1
import matplotlib

matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
import os
from optim import trust_region_newton_step, adaptive_trust_region_newton_step, trust_region_newton_step2
from newton_cg import trust_region_newton_cg
from newton_cr import newton_step_cubic_regularization, newton_step_cr_cg, newton_adaptive_cubic_regularization, \
      newton_adaptive_cr_cg
import random

# Then set a random seed for deterministic results/reproducability.
SEED = 1234

random.seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True


def parse_args():
    parser = argparse.ArgumentParser(
        description='Train a classifier for CIFAR10')
    parser.add_argument('--model',
                        dest='model',
                        help='classifier type',
                        default='CNN',
                        type=str)
    parser.add_argument('--lr',
                        dest='lr',
                        help='learning rate',
Example #58
0
from __future__ import division
import numpy as np
import random
import datetime
import copy
from random import choice
from math import log, sqrt

random.seed("1234")


class State():
    def __init__(self, player1Cards, player2Cards, player3Cards, money,
                 playersMoney, roundMoney):
        self.player1Cards = player1Cards
        self.player2Cards = player2Cards
        self.player3Cards = player3Cards
        self.money = money
        self.playersMoney = playersMoney
        self.roundMoney = roundMoney

    def __hash__(self):
        return hash((str(self.player1Cards), str(self.roundMoney)))
        # return hash((str(self.player1Cards), str(self.player2Cards), str(self.player3Cards), str(self.roundMoney)))
        # return hash((str(self.player1Cards), str(self.player2Cards), str(self.player3Cards), str(self.money), str(self.playersMoney), str(self.roundMoney)))


class Board():
    # Returns a representation of the starting state of the game.
    def init(self, playerCards):
        if len(playerCards) == 0:
 def worker_init_fn(worker_id):
     random.seed(worker_id + opt.random_seed)
     np.random.seed(worker_id + opt.random_seed)
    while len(clusters) < num_clusters:
        # choose the last-merged of our clusters
        next_cluster = min(clusters, key=get_merge_order)
        # remove it from the list
        clusters = [c for c in clusters if c != next_cluster]
        # and add its children to the list (i.e., unmerge it)
        clusters.extend(get_children(next_cluster))

    # once we have enough clusters...
    return clusters

if __name__ == "__main__":

    inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]

    random.seed(0) # so you get the same results as me
    clusterer = KMeans(3)
    clusterer.train(inputs)
    print "3-means:"
    print clusterer.means
    print

    random.seed(0)
    clusterer = KMeans(2)
    clusterer.train(inputs)
    print "2-means:"
    print clusterer.means
    print

    print "errors as a function of k"