Exemplo n.º 1
0
def pack_entities(entities):
    s = ""
    # player start?
    classnames = [
        'info_player_start', 'info_player_deathmatch', 'testplayerstart'
    ]
    player_starts = [
        e for e in entities if "classname" in e and e.classname in classnames
    ]
    if len(player_starts) == 0:
        logging.warning(
            "Missing info_player_start entity in: {}".format(entities))
        player_starts = [
            dotdict({
                'classname': 'debug_player_start',
                'origin': dotdict({
                    'x': 0,
                    'y': 0,
                    'z': 0
                }),
                'angle': 0
            })
        ]
    player_start = player_starts[0]
    logging.info("Found player start: {} at: {}".format(
        player_start.classname, player_start.origin))
    s += pack_vec3(player_start.origin)
    s += pack_fixed("angle" in player_start and player_start.angle or 0)

    return s
Exemplo n.º 2
0
    def validate_and_prepare_args(self, args):
        """
        Prepares the given arguments such that they fits our needs. Returns
        a dotdict filled with the arguments.
        """

        # 'args' is given as Namespace object. Transform it to dotdict.
        prepared = dotdict(args)

        # Read key/value pairs from the tool info file.
        tool_info = util.read_tool_info(prepared.tool_root)
        prepared.update(tool_info)

        # The junk is given as string. Split it to get a list of junk tokens.
        junk = prepared.junk
        if junk is not None:
            prepared.junk = junk.split()
        else:
            prepared.junk = []

        # The dir filter is given as string. Split it to get a list of filters.
        dir_filter = prepared.dir_filter
        if dir_filter is not None:
            prepared.dir_filter = dir_filter.split()
        else:
            prepared.dir_filter = []

        # Set num_threads to None if it is <= 0.
        num_threads = prepared.num_threads
        if num_threads is not None:
            prepared.num_threads = num_threads if num_threads > 0 else None

        return prepared
Exemplo n.º 3
0
 def convert_dotdict(self, datas):
     cluster = dotdict()
     cluster.vectors = []
     cluster.vectors[0] = datas[0]
     cluster.vectors[1] = datas[1]
     cluster.centroids = self.calculate_centroid(cluster.vectors)
     return datas
Exemplo n.º 4
0
 def __init__(self, folderName):
     myDATA = dotdict.dotdict()
     for x in (['B1H', 'B1V', 'B2H', 'B2V']):
         myFileList = glob.glob(folderName + '/*%s*.h5' % x)
         myTimestampList = []
         pus = []
         status = []
         if myFileList:
             fills = np.unique([
                 int(filename.split('/')[-1].split('_')[0])
                 for filename in myFileList
             ])
             df = {}
             for fill in fills:
                 df[fill] = importData.LHCFillsByNumber(fill)
                 df[fill] = df[fill][df[fill]['mode'] != 'FILL']
                 df[fill] = df[fill].reset_index(drop=True)
         for fileName in myFileList:
             fill = int((fileName.split('/')[-1].split('_'))[0])
             time = self.fromName2Timestamp(fileName.split('/')[-1])
             status.append(self.getStatus(time, fill, df))
             myTimestampList.append(time)
             pus.append(self.fromName2PU(fileName.split('/')[-1]))
         myDATA['at' + x] = pd.DataFrame(index=np.array(myTimestampList))
         myDATA['at' + x]['fileName'] = np.array(myFileList)
         myDATA['at' + x]['Status'] = status
         myDATA['at' + x]['PU'] = pus
     self.importEmptyDF = myDATA
Exemplo n.º 5
0
    def execute(self, X):
        clusters = []
        cluster = dotdict()
        cluster.vectors = []
        for x in X:
            cluster.vectors.append(x)

        cluster.centroid = self.calculate_centroid(cluster.vectors)

        clusters.append(cluster)
        while (len(clusters) != self.k):
            split_cluster = self.find_smallest_sim_cluster(clusters)

            # re-construct clusters except the split cluster
            clusters = [
                d for d in clusters
                if not np.array_equal(d['centroid'], split_cluster['centroid'])
            ]
            max_cluster = float("-inf")
            max_bicluster = None
            for i in range(self.max_iter):
                kmeans = KMeans(k=2)

                # loop max_iter to find the best way to split
                biclusters = kmeans.kmeans(np.array(split_cluster.vectors))
                sim = kmeans.similitary(biclusters)
                if (sim > max_cluster):
                    max_bicluster = [d for d in biclusters]
                    max_cluster = sim

            clusters.extend(biclusters)
        return clusters
Exemplo n.º 6
0
def param_ljg(params, cut, fixed_default=True, label=None):
    """Set up a LJG dictionary
    Notes
    -----
    Caution, no type checking implemented
    """
    tmp = {
        'type': 'ljg',
        'Label': label,
        'Cut': cut,
        'B': Parameter(1.0, fixed_default),
        'Kappa': Parameter(0.25, fixed_default),
        'Dist0': Parameter(0.0, fixed_default),
        'Sigma': Parameter(1.0, fixed_default),
        'Epsilon': Parameter(0.0, fixed_default)
    }
    set_parameters(tmp, params)
    """
    if isinstance(fixed,dict):
        for key,value in fixed.items():
            tmp[key].fixed = value
    elif fixed is None:
        pass 
    else:
        raise ValueError('fixed parameter must be a dictionary')
    """
    return dotdict(tmp)
Exemplo n.º 7
0
    def prepare_arguments(self, args):
        """ Prepares the given arguments such that they fits our needs."""

        # 'args' is given as Namespace object. Transform it to dotdict.
        prepared = dotdict(args)

        # Read key/value pairs from the tool info file.
        tool_info = util.read_tool_info(prepared.output_dir)
        prepared.update(tool_info)

        # The dir filter is given as string. Split it to get a list of filters.
        dir_filter = prepared.dir_filter
        if dir_filter is not None:
            prepared.dir_filter = dir_filter.split()
        else:
            prepared.dir_filter = []

        # Ensure that there is a criterion given on collectin pdf files.
        collect_criterion = prepared.criterion
        if collect_criterion is None:
            prepared.criterion = "ALL"
        elif collect_criterion != "ALL" and collect_criterion != "NON_EXISTING":
            prepared.criterion = "ALL"

        # Set num_threads to None if it is <= 0.
        num_threads = prepared.num_threads
        if num_threads is not None:
            prepared.num_threads = num_threads if num_threads > 0 else None

        return prepared
Exemplo n.º 8
0
    def assert_mailed(user, amount=1):
        if amount > 0:
            assert any_mails() and len(stubmailer.args) > 0, (
                'Nobody was mailed')

        user_id = helpers.get_id(user)
        user = m.User.query.get(user_id)
        assert user is not None, f'Given user {user_id} was not found'
        msgs = []

        for arg, in stubmailer.args:
            message = arg._message()
            recipients = message['To'].split(', ')
            assert recipients, 'A mail was send to nobody'
            for recipient in recipients:
                print(recipient)
                if '<{}>'.format(user.email) in recipient:
                    msgs.append(
                        dotdict(
                            orig=message,
                            msg=arg.html,
                            subject=message['Subject'],
                            message_id=message['Message-ID'],
                            in_reply_to=message['In-Reply-To'],
                            references=(message['References']
                                        and message['References'].split(' ')),
                        ))
                    amount -= 1

        assert amount == 0, 'The given user was not mailed or mailed to much'
        return msgs
Exemplo n.º 9
0
    def exitBlock(self, ctx):
        # entity properties
        properties = dotdict({})
        for pair in ctx.pair():
            attribute = pair.keyword().getText().lower().strip('"')
            value = pair.args().getText().lower().strip('"')
            # decode special attributes
            if attribute in ['origin']:
                x, y, z = [float(v) for v in value.split(' ')]
                # fix Quake y/z orientation
                value = dotdict({'x': x, 'y': z, 'z': y})
            elif attribute in ['angle', 'speed', 'spawnflags']:
                value = int(value)
            # persist value
            properties[attribute] = value

        self.result.append(properties)
Exemplo n.º 10
0
    def size_data(self):
        "Data about this photo from the getSizes endpoint."
        try:
            return self._sizes
        except AttributeError:
            sizes = normalize_json(API.photos_getSizes(photo_id=self.id))
            self._sizes = dotdict(sizes['sizes'])

        return self._sizes
Exemplo n.º 11
0
def param_bond(params, fixed_default=True, label=None):
    tmp = {
        'type': 'bond',
        'Label': label,
        'Dist0': Parameter(0.0, fixed_default),
        'FConst': Parameter(1.0, fixed_default)
    }
    set_parameters(tmp, params)
    return dotdict(tmp)
Exemplo n.º 12
0
Arquivo: cfract.py Projeto: aib/cfract
def reset_state():
	state = dotdict.dotdict()

	state.dots = []
	for i in range(DOTS):
		dot = Dot(float(i) / DOTS)
		state.dots.append(dot)

	state.paths = []

	return state
Exemplo n.º 13
0
    def extract_tiles(self, texture_name, palette):
        # read image bytes
        src = Image.open(texture_name)
        width, height = src.size
        if width > 1024 or height > 1024:
            raise Exception(
                "Texture: {} invalid size: {}x{} - Texture file size must be less than 1024x1024px"
                .format(width, height))
        img = Image.new('RGBA', (width, height), (0, 0, 0, 0))
        img.paste(src, (0, 0, width, height))

        # extract tiles
        pico_gfx = [bytearray(32)]
        pico_map = bytearray()
        for j in range(0, math.floor(height / 8)):
            for i in range(0, math.floor(width / 8)):
                data = bytearray()
                for y in range(8):
                    # read nimbles
                    x_data = bytearray()
                    for x in range(0, 8, 2):
                        # print("{}/{}".format(i+x,j+y))
                        # image is using the pico palette (+transparency)
                        low = palette.register(
                            img.getpixel((i * 8 + x, j * 8 + y)))
                        high = palette.register(
                            img.getpixel((i * 8 + x + 1, j * 8 + y)))
                        x_data.insert(0, low | high << 4)
                    data += x_data
                # not referenced zone
                tile = 0
                # known tile?
                if data in pico_gfx:
                    tile = pico_gfx.index(data)
                else:
                    tile = len(pico_gfx)
                    pico_gfx.append(data)
                # sprite 0 cannot be used
                if (tile == 1): tile = 0
                pico_map.append(tile)

        # map width
        width = width >> 3

        max_tiles = 16 * 4 * 4
        if len(pico_gfx) > max_tiles:
            raise Exception(
                "Too many unique tiles: {} in tileset: {} (max: {})".format(
                    len(pico_gfx), texture_name, max_tiles))

        logging.info("Tileset: Found {}/{} unique tiles".format(
            len(pico_gfx), max_tiles))

        return dotdict({'width': width, 'map': pico_map, 'gfx': pico_gfx})
Exemplo n.º 14
0
def param_ext_sin(params, fixed_default=True, label=None):
    tmp = {
        'type': 'ext_sin',
        'Label': label,
        'UConst': Parameter(0.0, True),
        'NPeriods': Parameter(1., True),
        'PlaneAxis': Parameter(0, True),
        'PlaneLoc': Parameter(0., True)
    }
    set_parameters(tmp, params)
    return dotdict(tmp)
Exemplo n.º 15
0
def param_ewald(params, cut, exclude_bond_ord=0, shift=True, label=None):
    tmp = {
        'type': 'ewald',
        'Label': label,
        'Cut': cut,
        'Shift': shift,
        'ExcludeBondOrd': exclude_bond_ord,
        'Coef': Parameter(0.0, True)
    }
    set_parameters(tmp, params)
    return dotdict(tmp)
Exemplo n.º 16
0
def mail_functions(monkeypatch, monkeypatch_celery, make_function_spy,
                   stubmailer):

    direct = make_function_spy(psef.mail, 'send_direct_notification_email')
    digest = make_function_spy(psef.mail, 'send_digest_notification_email')

    def any_mails():
        if direct.called:
            print('Direct emails send', direct.all_args, stubmailer.args)
            return True
        elif digest.called:
            print('Digest emails send', digest.all_args, stubmailer.args)
            return True
        return False

    def assert_mailed(user, amount=1):
        if amount > 0:
            assert any_mails() and len(stubmailer.args) > 0, (
                'Nobody was mailed')

        user_id = helpers.get_id(user)
        user = m.User.query.get(user_id)
        assert user is not None, f'Given user {user_id} was not found'
        msgs = []

        for arg, in stubmailer.args:
            message = arg._message()
            recipients = message['To'].split(', ')
            assert recipients, 'A mail was send to nobody'
            for recipient in recipients:
                print(recipient)
                if '<{}>'.format(user.email) in recipient:
                    msgs.append(
                        dotdict(
                            orig=message,
                            msg=arg.html,
                            subject=message['Subject'],
                            message_id=message['Message-ID'],
                            in_reply_to=message['In-Reply-To'],
                            references=(message['References']
                                        and message['References'].split(' ')),
                        ))
                    amount -= 1

        assert amount == 0, 'The given user was not mailed or mailed to much'
        return msgs

    yield dotdict(
        send_mail=stubmailer,
        direct=direct,
        digest=digest,
        any_mails=any_mails,
        assert_mailed=assert_mailed,
    )
Exemplo n.º 17
0
 def __init__(self, folderName):
     myDATA = dotdict.dotdict()
     for x in (['B1H', 'B1V', 'B2H', 'B2V']):
         myFileList = glob.glob(folderName + '/*BQ%sT.%s*.h5' %
                                (x[-1], x[0:2]))
         myTimestampList = []
         for fileName in myFileList:
             time = self.fromName2Timestamp(fileName)
             myTimestampList.append(time)
         myDATA['at' + x] = pd.DataFrame(index=np.array(myTimestampList))
         myDATA['at' + x]['fileName'] = np.array(myFileList)
     self.importEmptyDF = myDATA
Exemplo n.º 18
0
 def setUp(self):
     self.env = MockEnv()
     self.nnet = MockNNet()
     self.args = dotdict({
         'simulation_num': 100,
         'c_puct': 5,
         'save_weights_path': '',
         'rows': 1,
         'columns': 3,
         'max_sample_pool_size': 100000,
     })
     self.mcts = MCTS(self.nnet, self.env, self.args)
     self.rl = RL(self.nnet, self.env, self.args)
Exemplo n.º 19
0
 def analyze(self, out, err, returncode):
     from dotdict import dotdict
     ret = {
         'goto_next': False,
         'hint': None,
         'ok_text': out,
         'err_text': err,
     }
     if out == 'next':
         ret['goto_next'] = True
     else:
         ret['hint'] = 'Try to write a command which prints "next"'
     return dotdict(ret)
Exemplo n.º 20
0
 def query_metadata(self, interval):
     """Builds and send a query to GC to get scanner images metadata.
     
     Args:
         interval: a tuple of string dates representing time interval
             from which to pull the data.
     
     Returns:
         urls: a list of dictionary metadata objects downloaded from GC.
     """
     query = """
         select
           phase_group_id,
           bs.meta_machine_name,
           bs.outline,
           bs.meta_orb_site,
           bs.barcode,
           bs.scanner_id,     
           um.url
         from {barcode_table} bs inner join 
         {media_table} um using (phase_group_id) 
         where bs.meta_event_time >= '{start_time}'
           and bs.meta_event_time <= '{end_time}'  
           and bs.scan_phase = 'scan'      
           and bs.scanner_id in ('1', '2')
           and barcode like 'A%'
           and bs.meta_orb_site in (
             'gapglt',
             'gapfrs',
             'gapfkl',
             'gapcmh'
              )
           and um.name like 'barcode-scan-%'
           and um.meta_event_time >= '{start_time}'
           and um.meta_event_time <= '{end_time}'   
         limit 1000
     """.format(
         start_time=interval[0],
         end_time=interval[1],
         barcode_table="kin-sort-metrics.raw_metrics.barcode_scans",
         media_table="kin-sort-metrics.raw_metrics.uploaded_media"
     )
     query_job = self.gc_client.query(query)  # Make an API request.
     urls = []
     for row in query_job:
         row = dotdict(row)
         urls.append(row)
     print("Found {} urls".format(len(urls)))
     return urls    
Exemplo n.º 21
0
 def analyze(self, out, err, returncode):
     ret = {
         'goto_next': False,
         'hint': None,
         'ok_text': out,
         'err_text': err,
     }
     if out and out.lower() == self.expected_out.lower():
         ret['hint'] = self.on_success_hint
         ret['goto_next'] = True
     elif err:
         ret['hint'] = self.on_err_hint
     else:
         ret['hint'] = self.on_wrong_out_hint
     return dotdict(ret)
Exemplo n.º 22
0
def param_sm_coul_ew_corr(params,
                          cut,
                          sm_coul_shift=True,
                          fixed_default=True,
                          label=None):
    tmp = {
        'type': 'sm_coul_ew_corr',
        'Label': label,
        'Cut': cut,
        'Shift': sm_coul_shift,
        'BornA': Parameter(1.0, fixed_default),
        'Coef': Parameter(1.0, fixed_default)
    }
    set_parameters(tmp, params)
    return dotdict(tmp)
Exemplo n.º 23
0
 def __init__(self, folderName):
     """
     Import the list of the DOROS acquisitions during MD4147. Sort by DOR (orbit) and DOS(oscillations) and by name of BPM. The final DataFrame will be: *.at*(DOS or DOR).*(name of BPM)
     ===EXAMPLE===
     from cl2pd import noise
     doros = noise.DOROS(/eos/user/s/skostogl/SWAN_projects/Noise/DOROS_data/181025)
     myDict= noise.doros.importEmptyDF
     """
     myDATA = dotdict.dotdict()
     for i, j in zip(['ORB', 'OSC'], ['DOR', 'DOS']):
         myFileList = glob.glob(folderName + '/%s*' % i)
         unique_bpms = self.fromName2BPM(myFileList)
         myTimestampList = []
         bpms = []
         for fileName in myFileList:
             myTimestampList.append(self.fromName2Timestamp(fileName))
             bpms.append(self.fromName2BPM(fileName))
         myDATA['at' + j] = dotdict.dotdict()
         for bpm in unique_bpms:
             idx = [a for a, b in enumerate(bpms) if b == bpm]
             myDATA['at' + j][bpm] = pd.DataFrame(
                 index=np.array(myTimestampList)[idx])
             myDATA['at' + j][bpm]['fileName'] = np.array(myFileList)[idx]
     self.importEmptyDF = myDATA
Exemplo n.º 24
0
def index_set_photos(photoset, index):
    """Add all photos in photoset to the specified DownloadIndex."""
    photos = normalize_json(API.photosets_getPhotos(photoset_id=photoset.id))
    photos = dotdict(photos['photoset'])

    # TODO: We need to add pagination support for big sets.
    assert int(photos.total) == len(photos.photo), "This user has set with too many photos."

    print "Indexing the %s photos in this set:\n" % photos.total

    for idx, photo in enumerate(photos.photo, 1):
        photo = Photo(photo)

        print "%s. %s" % (idx, photo.details)

        index.add_to_index(photo, photoset)
Exemplo n.º 25
0
def importEmptyDF(folderName):
    """
    Import the list of the EPC acquisition stored in folderName.
    ===EXAMPLE===
    from cl2pd import noise
    myDict=noise.importEmptyDF('/eos/project/a/abpdata/lhc/rawdata/power_converter')
    """
    myDATA = dotdict.dotdict()
    for i, j in zip(['current', 'voltage'], ['Current', 'Voltage']):
        myFileList = glob.glob(folderName + '/*' + i)
        myTimestampList = []
        for fileName in myFileList:
            myTimestampList.append(fromName2Timestamp(fileName))
        myDATA['at' + j] = pd.DataFrame(index=myTimestampList)
        myDATA['at' + j]['fileName'] = myFileList
    return myDATA
Exemplo n.º 26
0
 def setUp(self):
     self.args = dotdict({
         'rows': 3,
         'columns': 3,
         'n_in_row': 2,
         'history_num': 2,
         'conv_filters': 16,
         'conv_kernel': (3, 3),
         'residual_block_num': 2,
         'save_weights_path': './tmp',
         'max_sample_pool_size': 10000,
         'l2': 1e-4,
     })
     self.env = GomokuEnv(self.args)
     self.nnet = GomokuNNet(self.env, self.args)
     self.rl = GomokuRL(self.nnet, self.env, self.args)
Exemplo n.º 27
0
def index_all_set_photos(user_id, index):
    """Add all photos in all sets to the specified DownloadIndex."""
    print "Finding photosets for user %s..." % user_id

    sets = get_sets(user_id)

    # TODO: We need to add pagination support for large lists of sets.
    assert int(sets.total) == len(sets.photoset), "This user has too many photosets. Aborting."

    print "%s photosets found." % sets.total

    for idx, item in enumerate(sets.photoset, start=1):
        item = dotdict(item)

        print "\n%s. %s (ID: %s)\n" % (idx, item.title['_content'], item.id)

        index_set_photos(item, index)
Exemplo n.º 28
0
    def smallest(self):
        "Size data for the smallest photo available for this image."
        size = 0
        smallest = None

        for x in self.size_data.size:
            x = dotdict(x)
            pixel_count = int(x.height) * int(x.width)
            if pixel_count < size or size == 0:
                smallest = x
                size = pixel_count

        # Save the file extension to the size object
        smallest.ext = smallest.source.partition('?')[0].rpartition('.')[2]
        if len(smallest.ext) > 5:
            smallest.ext = "unk"

        return smallest
Exemplo n.º 29
0
    def largest(self):
        "Size data for the largest photo available for this image."
        size = 0
        biggest = None

        for x in self.size_data.size:
            x = dotdict(x)
            pixel_count = int(x.height) * int(x.width)
            if pixel_count >= size:
                biggest = x
                size = pixel_count

        # Save the file extension to the size object
        biggest.ext = biggest.source.partition('?')[0].rpartition('.')[2]
        if len(biggest.ext) > 5:
            biggest.ext = "unk"

        return biggest
Exemplo n.º 30
0
def follow_harmonics(df):
    """
  Follow 50 Hz oscillation of harmonics from the average FFT 
  """
    fs = 50000.
    lim = int(0.5 * fs / 50)
    t0 = df.iloc[0].name
    complex_fft = [[] for j in range(lim)]
    frequency = [[] for j in range(lim)]
    counter_tot = [[] for j in range(lim)]
    timestamp = [[] for j in range(lim)]
    dt = [[] for j in range(lim)]
    counter = 0
    for index, row in df.iterrows():
        print row.name
        data = row['data'].reshape(100, 10000)  ### Average of 100 acquisitions
        fourier = np.average(
            [abs(np.fft.fft(data[j, :])) for j in range(data.shape[0])],
            axis=0)
        fourier /= float(len(fourier)) * 2.0
        freqs = np.arange(0, len(data[0, :])) * fs / len(data[0, :])
        counter += 1
        for i in range(0, lim):
            timestamp[i].append(row.name)
            dt[i].append((row.name - t0).seconds / 60.)
            imin = (i + 1) * 50. - 10.
            imax = (i + 1) * 50. + 10.
            myfilter = (freqs > imin) & (freqs < imax)
            idx = np.argmax(abs(fourier[myfilter]))
            frequency[i].append(freqs[myfilter][idx])
            complex_fft[i].append(fourier[myfilter][idx])
            counter_tot[i].append(counter)
    df_fft = dotdict.dotdict()
    for harmonic in range(len(frequency)):
        df_fft['h%s' % harmonic] = pd.DataFrame(index=timestamp[harmonic])
        df_fft['h%s' % harmonic]['frequency'] = frequency[harmonic]
        df_fft['h%s' % harmonic]['fourier'] = complex_fft[harmonic]
        df_fft['h%s' % harmonic]['file_number'] = counter_tot[harmonic]
        df_fft['h%s' % harmonic]['dt'] = dt[harmonic]
    return df_fft
Exemplo n.º 31
0
def generate_firewall(filename):
    y = dotdict(read_yaml(filename))

    command_groups = []

    # Firewall groups

    if 'firewall_groups' in y.keys():
        for group_type in y.firewall_groups.keys():
            for group_name in y.firewall_groups[group_type].keys():
                prefix = f"firewall group {group_type}-group {group_name}"
                command_group = CommandGroup(prefix=prefix)
                for value in y.firewall_groups[group_type][group_name]:
                    command_group.append(f"{prefix} {group_type} '{value}'")
                command_groups.append(command_group)

    # Firewall rules

    for source_zone in y.zones:
        for destination_zone in y.zones:

            if source_zone == destination_zone:
                continue

            prefix = f"firewall name {source_zone}_TO_{destination_zone}4"
            group = CommandGroup(prefix=prefix)

            group.append(f"{prefix} default-action 'reject'")

            for source in y.rules.keys():
                for destination in y.rules[source].keys():
                    if belongs_to(y, source_zone, source) and belongs_to(
                            y, destination_zone, destination):
                        for rule in y.rules[source][destination]:
                            for command in vyos_rule(rule, prefix, source,
                                                     destination):
                                group.append(command)
            command_groups.append(group)

    return command_groups
Exemplo n.º 32
0
    def kmeans(self, X, k=2):
        old_centroids = X[
            np.random.choice(X.shape[0], size=k, replace=False), :]
        centroids = old_centroids
        iterations = 0
        lc_clusters = []
        while True:
            old_centroids = centroids
            lc_clusters = self.assign_point_to_clusters(X=X,
                                                        centroids=centroids)
            centroids = self.recalculate_centroids(lc_clusters)
            iterations += 1
            if (self.has_converged(iterations, centroids, old_centroids)):
                break

        final_clusters = []

        for index in range(len(centroids)):
            cluster = dotdict()
            cluster.centroid = centroids[index]
            cluster.vectors = lc_clusters[index]
            final_clusters.append(cluster)

        return final_clusters
Exemplo n.º 33
0
def index_all_photostream_photos(user_id, index):
    """Add the user's photostream photos to the specified DownloadIndex"""
    print "Inspecting photostream of user %s..." % user_id

    count = 1

    for output in Paginator(API.people_getPublicPhotos, user_id=user_id):
        stream = normalize_json(output)
        stream = dotdict(stream['photos'])

        print "\nIndexing page %s of %s:\n" % (stream.page, stream.pages)

        total = stream.total

        for photo in stream.photo:
            photo = Photo(photo)

            print "%s. %s" % (count, photo.details)

            index.add_to_index(photo)
            
            count += 1

    print "\n%s photostream photos found." % total
Exemplo n.º 34
0
from solution_checker import SolutionChecker
import random
from collections import defaultdict

INDIVIDUAL_TILES = False
PREDICT_FULL_EXAMPLES = False
VISUALIZE_PREDICTIONS = False
ORIENTATIONS = 2

args = dotdict({
    'numIters': 8,
    'numEps': 3,
    'tempThreshold': 15,
    'updateThreshold': 0.6,
    'maxlenOfQueue': 200000,
    'numMCTSSims': 50,
    'arenaCompare': 2,
    'cpuct': 1,
    'checkpoint': './temp/',
    'load_model': False,
    'load_folder_file': ('/dev/models/8x100x50', 'best.pth.tar'),
    'numItersForTrainExamplesHistory': 40,
})

tiles_to_np_array = SolutionChecker.tiles_to_np_array


def gen_state(width, height, n_tiles, dg, scalar_tiles=False):
    '''
    get tiles and solution
    '''
Exemplo n.º 35
0
 def process(self, path, http_history):
     if self._path_contains(path, 'items'):
         self.process_item(dotdict(http_history))
     return True
Exemplo n.º 36
0
 def __get_pages(self, output):
     output = normalize_json(output)
     output = dotdict(output['photos'])
     return output.pages
Exemplo n.º 37
0

import numpy as np
import math
import dotdict
import time

params = dotdict.dotdict({
	'visits': 200,
	'c_puct': 1.1, # from katago
	'alpha': 0.3, # dirichlet noise parameter, set to 0.3 since that's what was used for chess,
	# since we can assume ~10 random plays in every game or so similar to chess
	# as explained in https://stats.stackexchange.com/questions/322831/purpose-of-dirichlet-noise-in-the-alphazero-paper

	'eps': 0.25
})


class BatchedMCTS():
	# Basically what we do is simulate a lot of games at once. At each step in each game we can batch the requested evaluations
	# This should result in a LOT faster game generation since we are batching the prediction data to the neural net


	def visit_up(self, board, netpi, netv, next_move = 0):
		board_hash = hash(board)

		if board not in self.game_ended_cache:
			self.game_ended_cache[board_hash] = (board.get_game_ended(), board.get_win_result())

		if self.game_ended_cache[board_hash][0]:
			result = self.game_ended_cache[board_hash][1]
Exemplo n.º 38
0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Dense, Add, Activation

import dotdict

import time

import numpy as np

import os

params = dotdict.dotdict({
    'learning_rate': 0.03,
    'epochs': 1,
    'c': 0.0001,  # from original 
    'momentum': 0.9  # from original
})

# We are not using a convolutional net because ultimate tic tac toe is inherently not-convolutional:
# if you have a convolution of 3x3, for example, then you might be taking positions from
# different boards, in which case there is almost no correlation between them, and the output is near-useless.
# So simple densely connected layers is probably better.
# Also it's a lot faster.

# Input vector will be 81 * 2 + 9 * 2, the first 81 are binary encodings of whether we have moved there or not,
# second 81 are whether opponent has moved there or not, and finally 9 telling us whether or not we have won
# the mini-boards, and the other 9 for whether the opponent has won the mini-boards.

# Last 9 are whether or not you can play on that mini-board.
Exemplo n.º 39
0
 def __init__(self, photo):
     self._photo = dotdict(photo)
Exemplo n.º 40
0
def get_sets(user_id):
    """Retrieve a listing of the specified user's sets."""
    sets = normalize_json(API.photosets_getList(user_id=user_id))
    return dotdict(sets['photosets'])
Exemplo n.º 41
0
import os
import sys
from dotdict import dotdict
import _settings_base as baseSettings


def import_path(fullpath):
    """
    Import a file with full path specification. Allows one to
    import from anywhere, something __import__ does not do.
    """
    path, filename = os.path.split(fullpath)
    filename, ext = os.path.splitext(filename)
    sys.path.insert(0, path)
    module = __import__(filename)
    reload(module)  # Might be out of date
    del sys.path[0]
    return module

userSettings = import_path(os.path.join('..', 'settings'))

# Assemble settings.
settings = {}
settings.update(vars(baseSettings))
settings.update(vars(userSettings))

sys.modules[__name__] = dotdict(settings)
Exemplo n.º 42
0
 def get_flow_settings(self):
     flow = self.get_flow_module().__flow__
     return dotdict(flow)