Exemplo n.º 1
0
    def fill(self):

        r_phi = self.r_phi
        b_phi = self.b_phi
        g_phi = self.g_phi

        if not self.r_phi == 0:
            r_phi = pi / self.r_phi
        if not self.b_phi == 0:
            b_phi = pi / self.b_phi
        if not self.g_phi == 0:
            g_phi = pi / self.g_phi

        for idx in range(self.strip_length):
            idx2degree = scale(idx + self.counter, 0, self.max_range, 0, self.strip_length)

            r = sin(idx2degree + r_phi)
            g = sin(idx2degree + g_phi)
            b = sin(idx2degree + b_phi)

            r = scale(r, 0, 255, -1, 1)
            g = scale(g, 0, 255, -1, 1)
            b = scale(b, 0, 255, -1, 1)

            r = floor(r)
            g = floor(g)
            b = floor(b)

            self.pixels[idx]['color'] = RGB(r=r, g=g, b=b, a=self.color.a)

        self.counter += 1
        self.counter %= self.strip_length * 255
Exemplo n.º 2
0
def printminmaxavg(entries, details=False):
    """ Print start, current, minimum, maximum, and average usage
    """
    # list comprehension:
    values = [int(e[1]) for e in entries]
    min_used = min(values)
    max_used = max(values)
    max_used_entry = entries[values.index(max_used)]
    min_used_entry = entries[values.index(min_used)]
    avg_used = mean(values)

    if details:
        print('     started:', scale('M', entries[0][1]), 'on',
              entries[0][0].date())
        print('   currently:', scale('M', entries[-1][1]), 'on',
              entries[-1][0].date())
        print('    min used:', scale('M', min_used), 'on',
              min_used_entry[0].date())
        print('    max used:', scale('M', max_used), 'on',
              max_used_entry[0].date())
        print('    avg used:', scale('M', avg_used))
    else:
        print('     started:', humanize(entries[0][1]), 'on',
              entries[0][0].date())
        print('   currently:', humanize(entries[-1][1]), 'on',
              entries[-1][0].date())
        print('    min used:', humanize(min_used), 'on',
              min_used_entry[0].date())
        print('    max used:', humanize(max_used), 'on',
              max_used_entry[0].date())
        print('    avg used:', humanize(avg_used))
Exemplo n.º 3
0
    def image(self):
        image = pg.Surface(self.original_image.get_rect().size, pg.SRCALPHA)
        # rotating image
        img = pg.transform.rotate(self.original_image.copy(), self.rotation)
        # updating transparency of the image nearing the end of cooldown
        img.set_colorkey((102, 255, 0))
        alpha = int(self.cooldown * 255 / 100)
        img.set_alpha(alpha)

        if self.size[0] == 0 or self.size[0] == 0:
            self.size[0], self.size[1] = 0, 0
            img.set_alpha(0)
        else:
            if not alpha % 3:
                self.size[0] += 1
                self.size[1] += 1
            img = u.scale(img, self.size)
            self.rect.size = img.get_rect().size
            #self.rect.center = self.cfg["position"]
            image = pg.Surface(self.rect.size, pg.SRCALPHA)
        # drawing created image to returning surface
        image.blit(img, (0, 0))

        if self.scale > 1:
            old_posistion = self.rect.center
            old_size = image.get_rect().size
            image = u.scale(image, self.scale)
            new_size = image.get_rect().size
            self.rect.centerx -= (new_size[0] - old_size[0]) / self.scale
            self.rect.centery -= (new_size[1] - old_size[1]) / self.scale

        return image
Exemplo n.º 4
0
    def scale(rs, gs, bs):
        """
        Scale and convert to int lists of rgb values
        """

        # get maxs and mins
        r_min = min(rs)
        r_max = max(rs)

        g_min = min(gs)
        g_max = max(gs)

        b_min = min(bs)
        b_max = max(bs)

        # scale
        rs = [scale(r, 0, 255, r_min, r_max) for r in rs]
        gs = [scale(g, 0, 255, g_min, g_max) for g in gs]
        bs = [scale(b, 0, 255, b_min, b_max) for b in bs]

        # convert to int
        rs = [int(elem) for elem in rs]
        gs = [int(elem) for elem in gs]
        bs = [int(elem) for elem in bs]

        return rs, gs, bs
Exemplo n.º 5
0
def alpha32(df):
    """
    Alpha#32
    (scale(((sum(close, 7) / 7) - close)) + 
    (20 * scale(correlation(vwap, delay(close, 5), 230)))) 
    """
    temp1 = u.scale(((u.ts_sum(df.close, 7) / 7) - df.close))
    temp2 = (20 * u.scale(u.corr(df.vwap, u.delay(df.close, 5), 230)))
    return temp1 + temp2
Exemplo n.º 6
0
def alpha60(df):
    """
    Alpha#60
    (0 - (1 * ((2 * scale(rank(((((close - low) - (high - close)) / (high - low)) * volume)))) 
    - scale(rank(ts_argmax(close, 10))))))
    """
    temp1 = u.scale(
        u.rank(((((df.close - df.low) - (df.high - df.close)) /
                 (df.high - df.low)) * df.volume)))
    return (0 - (1 *
                 ((2 * temp1) - u.scale(u.rank(u.ts_argmax(df.close, 10))))))
Exemplo n.º 7
0
    def _radar(self, spectrum):
        r = [0] * spectrum
        for b in self.balls:
            angle = utils.angle(self.agent.x, b.x, self.agent.y, b.y)
            distance = utils.dist(self.agent.x, b.x, self.agent.y, b.y)
            r[utils.sector(angle, spectrum)] += utils.scale(distance)

        r[utils.sector(0, spectrum)] += utils.scale(self.width - self.agent.x)
        r[utils.sector(90,
                       spectrum)] += utils.scale(self.height - self.agent.y)
        r[utils.sector(180, spectrum)] += utils.scale(self.agent.x)
        r[utils.sector(270, spectrum)] += utils.scale(self.agent.y)
        return r
Exemplo n.º 8
0
 def _radar(self, spectrum):
     r = [0] * spectrum
     for b in self.balls:
         angle = utils.angle(self.agent.x, b.x, self.agent.y, b.y)
         distance = utils.dist(self.agent.x, b.x, self.agent.y, b.y)
         r[utils.sector(angle, spectrum)] += utils.scale(distance)
     # передаем на вход сети сигналы от стен, чтобы агент не прилипал к краю
     r[utils.sector(0, spectrum)] += utils.scale(self.width - self.agent.x)
     r[utils.sector(90,
                    spectrum)] += utils.scale(self.height - self.agent.y)
     r[utils.sector(180, spectrum)] += utils.scale(self.agent.x)
     r[utils.sector(270, spectrum)] += utils.scale(self.agent.y)
     return r
    def pre_process(self):
        #tf.keras.K.image_data_format() == 'channels_first'
        a = False
        if a:
            x_train = self.x_train.reshape(self.x_train.shape[0], 1,
                                           self.img_rows, self.img_cols)
            x_val = self.x_val.reshape(self.x_val.shape[0], 1, self.img_rows,
                                       self.img_cols)
            x_test = self.x_test.reshape(self.x_test.shape[0], 1,
                                         self.img_rows, self.img_cols)
            input_shape = (1, self.img_rows, self.img_cols)
        else:
            x_train = self.x_train.reshape(self.x_train.shape[0],
                                           self.img_rows, self.img_cols, 1)
            x_val = self.x_val.reshape(self.x_val.shape[0], self.img_rows,
                                       self.img_cols, 1)
            x_test = self.x_test.reshape(self.x_test.shape[0], self.img_rows,
                                         self.img_cols, 1)
            input_shape = (self.img_rows, self.img_cols, 1)
        x_train = x_train.astype('float32')
        x_val = x_val.astype('float32')
        x_test = x_test.astype('float32')
        x_train, X_min, X_max = scale(x_train, 0, 255)
        x_val, _, _ = scale(x_val, 0, 255, X_min=X_min, X_max=X_max)
        x_test, _, _ = scale(x_test, 0, 255, X_min=X_min, X_max=X_max)
        x_train /= 255
        x_val /= 255
        x_test /= 255
        # convert class vectors to binary class matrices
        f = False
        if f:
            i = 0
            for row in x_train:
                x_train[i, :] = tf.keras.preprocess_input(row)
                i = i + 1
            i = 0
            for row in x_val:
                x_val[i, :] = tf.keras.preprocess_input(row)
                i = i + 1
            for row in x_test:
                x_test[i, :] = tf.keras.preprocess_input(row)
                i = i + 1
        if self.one_hot:
            self.y_train = tf.keras.utils.to_categorical(
                self.y_train, self.num_classes)
            self.y_val = tf.keras.utils.to_categorical(self.y_val,
                                                       self.num_classes)
            self.y_test = tf.keras.utils.to_categorical(
                self.y_test, self.num_classes)

        return  #x_train, y_train, x_val, y_val, x_test, y_test, input_shape
Exemplo n.º 10
0
def alpha28(df):
    """  
    Alpha#28
    scale(((correlation(adv20, low, 5) + ((high + low) / 2)) - close))
    """
    return u.scale(((u.corr(u.adv(df, 20), df.low, 5) +
                     ((df.high + df.low) / 2)) - df.close))
Exemplo n.º 11
0
    def predict(self, X, threshold=0.8):
        """Predict whether samples in X ar anomalous based on reconstruction
            performance of the BiGAN.

            Parameters
            ----------
            X : np.array of shape=(n_samples, n_features)
                Samples to predict.

            threshold : float, default=0.8
                Maximum MSE to be concidered normal.

            Returns
            -------
            result : np.array of shape=(n_samples,)
                Prediction of -1 (anomalous) or +1 (normal).
            """
        # Rescale X to range -1 to 1
        X = scale(X, min=-1, max=1)
        # Get latent representation of X
        z = self.encoder.predict(X)
        # Reconstruct output of X
        r = self.generator_data.predict(z)

        # Compute MSE between original and reconstructed
        mse = np.square(X - r).reshape(X.shape[0], -1).mean(axis=1)

        # Apply threshold for prediction
        predict = 2 * (mse <= threshold) - 1

        # Return result
        return predict
Exemplo n.º 12
0
    def closest_point(self, f_x, f_y, s_x, s_y, obs_bearing):
        '''
        Calculate the closest point on the line to the feature
        The feature is the point (probably not on the line)
        The line is defined by a point (state x and y) and direction (heading)
        This probably won't return a point that is behind the x-y-bearing.
        Input:
            f_x float (feature's x coordinate)
            f_y float (feature's y coordinate)
            s_x float (robot state's x)
            s_y float (robot state's y)
            obs_bearing float (robot state's heading)
        '''
        origin_to_feature = (
            f_x - s_x,
            f_y - s_y,
            0.0,
        )
        line_parallel = unit((cos(obs_bearing), sin(obs_bearing), 0.0))

        # origin_to_feature dot line_parallel = magnitude of otf along line
        magmag = dot_product(origin_to_feature, line_parallel)

        if magmag < 0:
            return (s_x, s_y)

        scaled_line = scale(line_parallel, magmag)
        scaled_x = scaled_line[0]
        scaled_y = scaled_line[1]

        return (float(s_x + scaled_x), float(s_y + scaled_y))
def split_image_to_4(image):
    image_1 = image
    image_2 = rotate(image, 20, BATCH_SIZE_DEFAULT)
    image_3 = scale(image, 20, 4, BATCH_SIZE_DEFAULT)
    image_4 = random_erease(image, BATCH_SIZE_DEFAULT)

    image_1 = image_1.to('cuda')
    image_2 = image_2.to('cuda')
    image_3 = image_3.to('cuda')
    image_4 = image_4.to('cuda')

    # image = image.to('cuda')
    # show_mnist(image_1[0], 20, 28)
    # show_mnist(image_1[1], 20, 28)
    # show_mnist(image_1[2], 20, 28)
    # show_mnist(image_1[3], 20, 28)
    #
    # show_mnist(image_2[0], 20, 28)
    # show_mnist(image_2[1], 20, 28)
    # show_mnist(image_2[2], 20, 28)
    # show_mnist(image_2[3], 20, 28)
    #
    # input()
    # print(image_1.shape)
    # print(image_2.shape)
    # print(image_3.shape)
    # print(image_4.shape)
    # input()

    return image_1, image_2, image_3, image_4
Exemplo n.º 14
0
    def sample_images(self, outfile, data=None, width=5, height=5):
        """Generate width x height images and write them to outfile.

            Parameters
            ----------
            outfile : string
                Path to outfile to write image to.

            width : int, default=5
                Number of generated images in width of output figure.

            height : int, default=5
                Number of generated images in height of output figure.
            """
        # Generate random images
        if data is None:
            X_fake = self.generate(amount=(height * width))
        else:
            X_fake = data

        # Rescale images 0 - 1
        X_fake = scale(X_fake, 0, 1)

        # Create subplot
        fig, axs = plt.subplots(height, width)
        counter = 0
        for x in range(height):
            for y in range(width):
                axs[x, y].imshow(X_fake[counter], cmap='gray')
                axs[x, y].axis('off')
                counter += 1
        fig.savefig(outfile)
        plt.close()
Exemplo n.º 15
0
    def create_image(self):
        img = self.original_image.copy()

        if self.cfg["scale"]:
            img = u.scale(img, (
                int(img.get_rect().width * self.cfg["scale"]),
                int(img.get_rect().height * self.cfg["scale"]),
            ))

        if self.cfg["rotation"] > 0:
            img = pg.transform.rotate(img, self.cfg["rotation"])

        if self.bow:
            width = int(self.rect.width)
            height = int(self.rect.height)

            if self.bow == "left" or self.bow == "right":
                width -= 3
            if self.bow == "up" or self.bow == "down":
                height -= 5

            img = pg.transform.scale(img, (width, height))

        if self.cfg["box"]:
            img = u.drawBorder(img, size=1, color=(255, 0, 0))

        return img
Exemplo n.º 16
0
def get_harmony_generator():
    other_harmonies = get_random_valids()

    if random.random() < .7:
        # fewer other harmonies
        percent_other = scale(random.random(), 0, 1, 0.0, 0.1)

    else:
        # more other harmonies
        percent_other = scale(random.random(), 0, 1, 0.1, 0.6)

    while True:
        if random.random() < percent_other:
            yield random.choice(other_harmonies)
        else:
            yield context_free_harmony.choose()
Exemplo n.º 17
0
    def closest_point(self, f_x, f_y, s_x, s_y, obs_bearing):
        '''
        Calculate the closest point on the line to the feature
        The feature is the point (probably not on the line)
        The line is defined by a point (state x and y) and direction (heading)
        This probably won't return a point that is behind the x-y-bearing.
        Input:
            f_x float (feature's x coordinate)
            f_y float (feature's y coordinate)
            s_x float (robot state's x)
            s_y float (robot state's y)
            obs_bearing float (robot state's heading)
        '''
        origin_to_feature = (f_x - s_x, f_y - s_y, 0.0,)
        line_parallel = unit((cos(obs_bearing), sin(obs_bearing), 0.0))

        # origin_to_feature dot line_parallel = magnitude of otf along line
        magmag = dot_product(origin_to_feature, line_parallel)
        
        if magmag < 0:
            return (s_x, s_y)
        
        scaled_line = scale(line_parallel, magmag)
        scaled_x = scaled_line[0]
        scaled_y = scaled_line[1]

        return (float(s_x + scaled_x), float(s_y + scaled_y))
Exemplo n.º 18
0
 def get_confidence(self):
     if self.confidence_level is None or self.confidence_level > constants.ZERO_CONFIDENCE:
         return '0', False
     elif self.confidence_level == 0:
         return '100', False
     elif self.confidence_level < constants.SUCCESS_CONFIDENCE:
         scaled_val = utils.scale(self.confidence_level,
                                  constants.SUCCESS_CONFIDENCE_SCALE,
                                  constants.SUCCESS_PERCENT_SCALE)
         perc = 100 - scaled_val
         return '%.2f' % perc, True
     else:
         scaled_val = utils.scale(self.confidence_level,
                                  constants.FAIL_CONFIDENCE_SCALE,
                                  constants.FAIL_PERCENT_SCALE)
         perc = constants.SUCCESS_PERCENT - scaled_val
         return '%.2f' % perc, False
Exemplo n.º 19
0
 def get_confidence(self):
     if self.confidence_level is None or self.confidence_level > constants.ZERO_CONFIDENCE:
         return '0', False
     elif self.confidence_level == 0:
         return '100', False
     elif self.confidence_level < constants.SUCCESS_CONFIDENCE:
         scaled_val = utils.scale(self.confidence_level,
                                  constants.SUCCESS_CONFIDENCE_SCALE,
                                  constants.SUCCESS_PERCENT_SCALE)
         perc = 100 - scaled_val
         return '%.2f' % perc, True
     else:
         scaled_val = utils.scale(self.confidence_level,
                                  constants.FAIL_CONFIDENCE_SCALE,
                                  constants.FAIL_PERCENT_SCALE)
         perc = constants.SUCCESS_PERCENT - scaled_val
         return '%.2f' % perc, False
def main():
    z, x, y = read('data/alaska/clipped_elev.tif')
    rgb, _, _ = read('data/alaska/clipped_map.tif')
    rgb = np.swapaxes(rgb.T, 0, 1)

    fig = mlab.figure()

    surf = mlab.mesh(x[::2, ::2], y[::2, ::2], z[::2, ::2])
    utils.texture(surf, rgb)
    build_sides(x, y, z, -1000)
    build_bottom(x, y, z, -1000)

    
    utils.scale(fig, (1, 1, 2.5))
    utils.scale(fig, 0.00001)
#    shapeways_io.save_vrml(fig, 'models/alaska_textured_sides.zip')
    utils.present(fig)
Exemplo n.º 21
0
def parse_scale(element, csg_graph):
    # Get the current csg object
    V, F = csg_graph[element.find('operand').text.strip()]
    # Scale parameter (required)
    s = element.find('scale').text
    s = list(map(float, s[s.find('[') + 1:s.find(']')].split(',')))
    # Translate and return the "new" vertices
    return scale(V, s), F
Exemplo n.º 22
0
def test_across_scale_difference(img: np.ndarray, out_file: str,
                                 finer_scale: int, coarse_scale: int):
    pyramid = utils.pyramid_dictionary(img, 8)
    difference = utils.across_scale_difference(pyramid, finer_scale,
                                               coarse_scale)
    difference = utils.scale(difference)

    cv2.imwrite(out_file, difference)
    def hessian(self, m_dot, project=False):
        """ Evaluates the Hessian action at the most recently evaluated control
        value in direction m_dot.

	Args:
            m_dot: The direction in control space in which to compute the
                Hessian. Must be of the same type as the Control (e.g. Function,
                Constant or lists of latter).

            project (Optional[bool]): If True, the returned value will be the L2
                Riesz representer, if False it will be the l2 Riesz representative.
                The L2 projection requires one additional linear solve.  Defaults to
                False.

	Returns:
	    The directional second derivative. The returned type is the same as the control
            type.

        Note: Hessian evaluations never delete the forward state.
        """

        # Check if we have the gradient already in the cash.
        # If so, return the cached value
        if self.cache is not None:
            hash = value_hash([x.data() for x in self.controls] + [m_dot])
            fnspaces = [p.data().function_space() if isinstance(p.data(),
                Function) else None for p in self.controls]

            if hash in self._cache["hessian_cache"]:
                info_green("Got a Hessian cache hit.")
                return cache_load(self._cache["hessian_cache"][hash], fnspaces)
            else:
                info_red("Got a Hessian cache miss")

        # Compute the Hessian action by solving the second order adjoint equations
        Hm = self.H(m_dot, project=project)

        # Apply the scaling factor
        scaled_Hm = utils.scale(Hm, self.scale)

        # Call callback
        control_data = [p.data() for p in self.controls]
        if self.current_func_value is not None:
            current_func_value = self.scale * self.current_func_value
        else:
            current_func_value = None

        self.hessian_cb(current_func_value,
                        delist(control_data, list_type=self.controls),
                        m_dot, scaled_Hm)

        # Cache the result
        if self.cache is not None:
            self._cache["hessian_cache"][hash] = cache_store(scaled_Hm, self.cache)

        return scaled_Hm
Exemplo n.º 24
0
    def __init__(self, ranges=False):
        score = self.score = Score()
        self.instruments = self.i = Instruments()
        self.parts = Parts(self.i)

        # Make Metadata
        timestamp = datetime.datetime.utcnow()
        metadata = Metadata()
        metadata.title = 'Short Stories'
        metadata.composer = 'Jonathan Marmor'
        metadata.date = timestamp.strftime('%Y/%m/%d')
        score.insert(0, metadata)

        [score.insert(0, part) for part in self.parts.l]
        score.insert(0, StaffGroup(self.parts.l))

        if ranges:
            # Don't make a piece, just show the instrument ranges
            for inst, part in zip(self.instruments.l, self.parts.l):
                measure = Measure()
                measure.timeSignature = TimeSignature('4/4')
                low = Note(inst.lowest_note)
                measure.append(low)
                high = Note(inst.highest_note)
                measure.append(high)
                part.append(measure)
            return

        self.duet_options = None

        # 8 to 12 minutes
        max_duration = 12
        piece_duration_minutes = scale(random.random(), 0, 1, 8, max_duration)

        # Make the "songs"
        self.songs = []
        total_minutes = 0
        n = 1
        while total_minutes < piece_duration_minutes - .75:
            print
            print 'Song', n
            song = Song(self, n)
            self.songs.append(song)
            print 'Song Duration:', int(round(song.duration_minutes * 60.0))
            print 'Tempo:', song.tempo
            print 'Number of Beats:', song.duration_beats

            n += 1
            total_minutes += song.duration_minutes

        _minutes, _seconds = divmod(total_minutes, 1.0)
        print
        print 'Total Duration: {}:{}'.format(int(_minutes), int(round(_seconds * 60)))
        print

        self.make_notation()
Exemplo n.º 25
0
    def choose_movement_durations(self):
        """Choose the durations of the two major sections of the piece.

        Choose a split point between the golden mean of the whole piece and
        the golden mean of the section between the golden mean of the piece and
        the end of the piece."""

        minimum = GOLDEN_MEAN

        # The golden mean of the section between the golden mean and 1
        maximum = scale(minimum, 0, 1, minimum, 1)

        # Pick a random float between minimum and maximum
        division = scale(random.random(), 0, 1, minimum, maximum)

        # Get the durations of each section
        one = int(scale(division, 0, 1, 0, self.piece_duration))
        two = self.piece_duration - one
        return one, two
Exemplo n.º 26
0
    def goal_force(self, pose, goal):
        dx = goal.position.x - pose.position.x
        dy = goal.position.y - pose.position.y

        weight = 5.0

        farce = (dx, dy, 0)
        farce = unit(farce)
        farce = scale(farce, weight)
        return farce
Exemplo n.º 27
0
def render(edges,
           vertices,
           scale=(1, 1),
           position=(0, 0),
           offset=(0, 0),
           color="black"):
    wn = turtle.Screen()
    t = turtle.Turtle()
    t.speed(0)
    t.pensize(1)
    t.hideturtle()
    wn.tracer(0, 0)
    t.pencolor(color)
    t.penup()

    # copy by value
    local_vertices = [] + vertices

    # find center of object
    midpoint = utils.vertices_midpoint(local_vertices)

    # adjust scale and position
    # move center of object to origin point for ease implulation
    local_vertices = utils.translate(local_vertices,
                                     [-midpoint[0], -midpoint[1]])

    local_vertices = utils.scale(local_vertices, scale)

    min_x = utils.get_min_x(local_vertices)
    min_y = utils.get_max_y(local_vertices)

    local_vertices = utils.translate(
        local_vertices,
        (-min_x + offset[0] + position[0], min_y + offset[1] + position[1]))

    # drawing
    for edge in edges:
        t.penup()

        from_edge = edge[0] - 1
        to_edge = edge[1] - 1
        p = local_vertices[from_edge]

        x_2d = p[0]
        y_2d = p[1]
        t.goto(x_2d, y_2d)

        p = local_vertices[to_edge]
        t.pendown()
        x_2d = p[0]
        y_2d = p[1]
        t.goto(x_2d, y_2d)

    wn.update()
Exemplo n.º 28
0
def compute_scores(f_ground_truth, f_prediction, parameters):

    # Load ground truth
    raw_graph = np.loadtxt(f_ground_truth, delimiter=",")
    row = raw_graph[:, 0] - 1
    col = raw_graph[:, 1] - 1
    data = raw_graph[:, 2]
    valid_index = data > 0
    y_true = coo_matrix((data[valid_index],
                         (row[valid_index], col[valid_index])),
                        shape=(1000, 1000))

    y_true = y_true.toarray()

    if parameters.get("killing", None):

        # load name_kill_var
        killing_file = os.path.join(WORKING_DIR, "datasets", "hidden-neurons",
                                    "{0}_kill_{1}.txt"
                                    "".format(parameters["network"],
                                              parameters["killing"]))
        kill = np.loadtxt(killing_file, dtype=np.int)

        # make a mask
        alive = np.ones((y_true.shape[0],), dtype=bool)
        alive[kill - 1] = False  # we need to make -1 since it's matlab indexing
        y_true = y_true[alive][:, alive]


    # Load predictions
    rows = []
    cols = []
    scores = []
    with open(f_prediction) as fhandle:
        fhandle.next()

        for line in fhandle:
            line = line.strip()

            prefix, score = line.rsplit(",", 1)
            scores.append(float(score))
            row, col = prefix.split("_")[-2:]
            rows.append(int(row) - 1)
            cols.append(int(col) - 1)
    y_scores = scale(coo_matrix((scores, (rows, cols))).toarray())

    print(y_true.shape)
    print(y_scores.shape)

    # Compute scores
    measures = dict((name, metric(y_true.ravel(), y_scores.ravel()))
                    for name, metric in METRICS.items())

    return measures
Exemplo n.º 29
0
    def create_image(self):  # pg.surface
        surface = pg.Surface(self.cfg["size"], pg.SRCALPHA)
        image = self.original_image.copy()
        if bool(random.getrandbits(1)):
            pg.transform.flip(image, bool(random.getrandbits(1)),
                              bool(random.getrandbits(1)))
        image = u.scale(image, self.cfg["size"])

        surface.blit(image, (0, 0))

        return surface
Exemplo n.º 30
0
    def __getitem__(self, item):
        image_path = self.images[item]
        boxes = self.objects[item]

        image = Image.open(image_path)
        shape = image.size
        image = self.transforms(image)

        boxes = torch.tensor(boxes, dtype=torch.float32)
        boxes = scale(boxes, shape)

        return image, boxes
Exemplo n.º 31
0
def compute_scores(f_ground_truth, f_prediction, parameters):

    # Load ground truth
    raw_graph = np.loadtxt(f_ground_truth, delimiter=",")
    row = raw_graph[:, 0] - 1
    col = raw_graph[:, 1] - 1
    data = raw_graph[:, 2]
    valid_index = data > 0
    y_true = coo_matrix(
        (data[valid_index], (row[valid_index], col[valid_index])),
        shape=(1000, 1000))

    y_true = y_true.toarray()

    if parameters.get("killing", None):

        # load name_kill_var
        killing_file = os.path.join(
            WORKING_DIR, "datasets", "hidden-neurons", "{0}_kill_{1}.txt"
            "".format(parameters["network"], parameters["killing"]))
        kill = np.loadtxt(killing_file, dtype=np.int)

        # make a mask
        alive = np.ones((y_true.shape[0], ), dtype=bool)
        alive[kill -
              1] = False  # we need to make -1 since it's matlab indexing
        y_true = y_true[alive][:, alive]

    # Load predictions
    rows = []
    cols = []
    scores = []
    with open(f_prediction) as fhandle:
        fhandle.next()

        for line in fhandle:
            line = line.strip()

            prefix, score = line.rsplit(",", 1)
            scores.append(float(score))
            row, col = prefix.split("_")[-2:]
            rows.append(int(row) - 1)
            cols.append(int(col) - 1)
    y_scores = scale(coo_matrix((scores, (rows, cols))).toarray())

    print(y_true.shape)
    print(y_scores.shape)

    # Compute scores
    measures = dict((name, metric(y_true.ravel(), y_scores.ravel()))
                    for name, metric in METRICS.items())

    return measures
Exemplo n.º 32
0
    def off_axis_error(self, location, goal):
        """
        calc error normal to axis defined by the goal position and direction

        input: two nav_msgs.msg.Odometry, current best location estimate and
         goal
        output: double distance along the axis

        axis is defined by a vector from the unit circle aligned with the goal
         heading
        relative position is the vector from the goal x, y to the location x, y

        distance is defined by subtracting the parallel vector from the total
         relative position vector

        example use:
        see calc_errors above
        """
        relative_position_x = (location.pose.pose.position.x -
            goal.pose.pose.position.x)
        relative_position_y = (location.pose.pose.position.y -
            goal.pose.pose.position.y)
        
        # relative position of the best estimate position and the goal
        # vector points from the goal to the location
        ## relative_position = (relative_position_x, relative_position_y, 0.0)

        goal_heading = quaternion_to_heading(goal.pose.pose.orientation)
        goal_vector_x = math.cos(goal_heading)
        goal_vector_y = math.sin(goal_heading)

        # vector in the direction of the goal heading, axis of desired motion
        goal_vector = (goal_vector_x, goal_vector_y, 0.0)

        along_axis_error = self.along_axis_error(location, goal)

        along_axis_vec = scale(unit(goal_vector), along_axis_error)

        new_rel_x = relative_position_x - along_axis_vec[0]
        new_rel_y = relative_position_y - along_axis_vec[1]
        
        new_rel_vec = (new_rel_x, new_rel_y, 0.0)

        error_magnitude = math.sqrt(new_rel_x*new_rel_x + 
            new_rel_y*new_rel_y)

        if error_magnitude < .0001:
            return 0.0

        if cross_product(goal_vector, new_rel_vec)[2] >= 0.0:
            return error_magnitude
        else:
            return -error_magnitude
Exemplo n.º 33
0
def main():
    USING_CACHE = True
    series_cache_path = './data/series_cache.pkl'

    data_series = extractHourlyPower(series_cache_path, USING_CACHE)
    
    training_day = 20 # 18 for period; 20 for encoder-decoder
    total_day = 24
    train_series, test_series = data_series[:training_day*24], data_series[(training_day-total_day)*24:]
    y_true = copy.copy(test_series[-72:].values)
    
    #history_average 
    #prediction = history_average(data_series)
    
    scaler, train_series, test_series = scale(train_series, test_series)

    # fetch vanilla LSTM training data and model
    X_train, y_train, X_test, y_test = SeriesToXy_vanilla(train_series, test_series, window=25)
    model = fit_lstm(input_shape=(24,1))

    # fetch encoder_decoder training data and model
    #X_train, y_train, X_test, y_test = SeriesToXy_ed(train_series, test_series, window=25)
    #model = fit_encoder_decoder(12, 1, 1)

    #fetch period training data and model
    #X_train, y_train, X_test, y_test = SeriesToXy_period(train_series, test_series, window = 73)
    #model = fit_period_lstm()

    early_stopping = EarlyStopping(monitor='loss', patience=5, mode='min')
    # Run training
    model.compile(optimizer='adam', loss='mean_squared_error')
    model.summary()

    model.fit(X_train, y_train,
              batch_size=1,
              epochs=1, 
              validation_split=0.2,
              callbacks = [early_stopping])
    
    prediction = model.predict(X_test)

    prediction = scaler.inverse_transform(prediction)

    # encoder decoder 
    #prediction = prediction[:, -1,:]

    prediction = prediction.reshape(-1)
    rmse = sqrt(smet.mean_squared_error(y_true, prediction))
    
    print('rmse: ', rmse)
    save_cache(prediction, './data/pre_vanilla_batch1_lstm128-32_patience5.pkl')

    K.clear_session()# tensorflow bug
Exemplo n.º 34
0
    def get_loss(self, trip_od, scaled_trip_volume, in_flows, out_flows, g, multitask_weights=[0.5, 0.25, 0.25]):
        '''
        defines the procedure of evaluating loss function

        Inputs:
        ----------------------------------
        trip_od: list of origin destination pairs
        trip_volume: ground-truth of volume of trip which serves as our target.
        g: DGL graph object

        Outputs:
        ----------------------------------
        loss: value of loss function
        '''
        # calculate the in/out flow of nodes
        # scaled back trip volume
        trip_volume = utils.scale_back(scaled_trip_volume)
        # get in/out nodes of this batch
        out_nodes, out_flows_idx = torch.unique(trip_od[:, 0], return_inverse=True)
        in_nodes, in_flows_idx = torch.unique(trip_od[:, 1], return_inverse=True)
        # scale the in/out flows of the nodes in this batch
        scaled_out_flows = utils.scale(out_flows[out_nodes])
        scaled_in_flows = utils.scale(in_flows[in_nodes])
        # get embeddings of each node from GNN
        src_embedding = self.forward(g)
        dst_embedding = self.forward2(g)
        # get edge prediction
        edge_prediction = self.predict_edge(src_embedding, dst_embedding, trip_od)
        # get in/out flow prediction
        in_flow_prediction = self.predict_inflow(dst_embedding, in_nodes)
        out_flow_prediction = self.predict_outflow(src_embedding, out_nodes)
        # get edge prediction loss
        edge_predict_loss = MSE(edge_prediction, scaled_trip_volume)
        # get in/out flow prediction loss
        in_predict_loss = MSE(in_flow_prediction, scaled_in_flows)
        out_predict_loss = MSE(out_flow_prediction, scaled_out_flows)
        # get regularization loss
        reg_loss = 0.5 * (self.regularization_loss(src_embedding) + self.regularization_loss(dst_embedding))
        # return the overall loss
        return multitask_weights[0] * edge_predict_loss + multitask_weights[1] * in_predict_loss + multitask_weights[2] * out_predict_loss + self.reg_param * reg_loss
Exemplo n.º 35
0
def alpha31(df):
    """
    Alpha#31
    ((rank(rank(rank(decay_linear((-1 * rank(rank(delta(close, 10)))), 10)))) 
    + rank((-1 * delta(close, 3)))) + sign(scale(correlation(adv20, low, 12))))
    """
    temp1 = u.rank(
        u.rank(
            u.rank(
                u.decay_linear((-1 * u.rank(u.rank(u.delta(df.close, 10)))),
                               10))))
    temp2 = u.rank((-1 * u.delta(df.close, 3))) + np.sign(
        u.scale(u.corr(u.adv(df, 20), df.low, 12)))
    return temp1 + temp2
Exemplo n.º 36
0
def split_image_to_4(image, vae_enc, vae_dec):
    # split_at_pixel = 19
    # width = image.shape[2]
    # height = image.shape[3]
    #
    # image_1 = image[:, :, 0: split_at_pixel, :]
    # image_2 = image[:, :, width - split_at_pixel:, :]
    # image_3 = image[:, :, :, 0: split_at_pixel]
    # image_4 = image[:, :, :, height - split_at_pixel:]

    # # image_1, _ = torch.split(image, split_at_pixel, dim=3)
    # # image_3, _ = torch.split(image, split_at_pixel, dim=2)
    #
    image_1 = image
    image_2 = rotate(image, 20, BATCH_SIZE_DEFAULT)
    image_3 = scale(image, BATCH_SIZE_DEFAULT)
    #image_4 = random_erease(image, BATCH_SIZE_DEFAULT)

    vae_in = torch.reshape(image, (BATCH_SIZE_DEFAULT, 784))

    sec_mean, sec_std = vae_enc(vae_in)
    e = torch.zeros(sec_mean.shape).normal_()
    sec_z = sec_std * e + sec_mean
    image_4 = vae_dec(sec_z)
    image_4 = torch.reshape(image_4, (BATCH_SIZE_DEFAULT, 1, 28, 28))

    image_1 = image_1.to('cuda')
    image_2 = image_2.to('cuda')
    image_3 = image_3.to('cuda')
    image_4 = image_4.to('cuda')

    #image = image.to('cuda')
    # show_mnist(image_1[0], 20, 28)
    # show_mnist(image_1[1], 20, 28)
    # show_mnist(image_1[2], 20, 28)
    # show_mnist(image_1[3], 20, 28)
    #
    # show_mnist(image_2[0], 20, 28)
    # show_mnist(image_2[1], 20, 28)
    # show_mnist(image_2[2], 20, 28)
    # show_mnist(image_2[3], 20, 28)
    #
    # input()
    # print(image_1.shape)
    # print(image_2.shape)
    # print(image_3.shape)
    # print(image_4.shape)
    # input()

    return image_1, image_2, image_3, image_4
Exemplo n.º 37
0
    def derivative(self, forget=True, project=False):
        ''' Evaluates the derivative of the reduced functional for the most
        recently evaluated control value. '''

        # Check if we have the gradient already in the cash.
        # If so, return the cached value
        if self.cache is not None:
            hash = value_hash([x.data() for x in self.controls])
            fnspaces = [p.data().function_space() if isinstance(p.data(),
                Function) else None for p in self.controls]

            if hash in self._cache["derivative_cache"]:
                info_green("Got a derivative cache hit.")
                return cache_load(self._cache["derivative_cache"][hash], fnspaces)

        # Call callback
        values = [p.data() for p in self.controls]
        self.derivative_cb_pre(delist(values, list_type=self.controls))

        # Compute the gradient by solving the adjoint equations
        dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project)
        dfunc_value = enlist(dfunc_value)

        # Reset the checkpointing state in dolfin-adjoint
        adjointer.reset_revolve()

        # Apply the scaling factor
        scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)]

        # Call callback
        # We might have forgotten the control values already,
        # in which case we can only return Nones
        values = []
        for c in self.controls:
            try:
                values.append(p.data())
            except libadjoint.exceptions.LibadjointErrorNeedValue:
                values.append(None)
        if self.current_func_value is not None:
            self.derivative_cb_post(self.scale * self.current_func_value,
                    delist(scaled_dfunc_value, list_type=self.controls),
                    delist(values, list_type=self.controls))

        # Cache the result
        if self.cache is not None:
            info_red("Got a derivative cache miss")
            self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache)

        return scaled_dfunc_value
Exemplo n.º 38
0
def alpha29(df):
    """
    Alpha#29
    (min(product(rank(rank(scale(log(sum(ts_min(rank(rank((-1 * rank(delta((close - 1),
    5))))), 2), 1))))), 1), 5) + ts_rank(delay((-1 * returns), 6), 5)) 
    """
    temp1 = u.scale(
        np.log(
            u.ts_sum(
                u.ts_min(
                    u.rank(u.rank((-1 * u.rank(u.delta((df.close - 1), 5))))),
                    2), 1)))
    temp2 = u.product(u.rank(u.rank(temp1)), 1)
    temp3 = u.ts_rank(u.delay((-1 * df.returns), 6), 5)
    return (np.where(temp1 < temp2, temp1, temp2) + temp3)
Exemplo n.º 39
0
def update_feat_with_RBMs(s_data, greedy_pre_train=1):

    data = scale(s_data.get_data())
    print(np.min(data))
    print(np.max(data))
    # Fit and Transform data
    for i in range(greedy_pre_train):
        # Initialize the RBM
        rbm = BernoulliRBM(n_components=90,
                           n_iter=50,
                           learning_rate=0.01,
                           verbose=True)
        rbm.fit(data)
        s_data.update_features(rbm.transform)
        data = s_data.get_data()
Exemplo n.º 40
0
    def __init__(self, sections, n_ticks):
        """TODO: Put description here.

            `sections`:
                       If `sections` is an int, then equally divide `n_ticks` into
                       this number of sections
                       If `sections` is a list of ints or floats, divide `n_ticks`
                       into len(`sections`) number of sections with relative
                       durations matching the values in `sections`

            `n_ticks`: The total number of the smallest unit of duration in all
                       sequential sections combined. Eg, Length of audio in samples.


        """

        self.n_ticks = n_ticks

        if isinstance(sections, int):
            self.n_sections = sections
            self.starts = [
                int(round(start)) for start in np.linspace(
                    0, self.n_ticks, self.n_sections, endpoint=False)
            ]

        if isinstance(sections, (list, tuple)):
            self.n_sections = len(sections)
            self.relative_durations = sections
            self.sum_relative_durations = sum(sections)

            durations = [
                scale(duration, 0, self.sum_relative_durations, 0,
                      self.n_ticks) for duration in sections
            ]
            start = 0
            self.starts = []
            for duration in durations:
                self.starts.append(start)
                start += duration
            self.starts = [int(round(start)) for start in self.starts]

        self.next_starts = self.starts[1:] + [self.n_ticks + 1]

        index = 0
        for start, next_start in zip(self.starts, self.next_starts):
            section = Section(start, next_start, index, self.n_sections, self)
            self.append(section)
            index += 1
Exemplo n.º 41
0
def intersection_with_xz_plane(origin, vector):
	# matrix to drop y component
	to_2d = np.array([
		[1, 0, 0, 0],
		[0, 0, 1, 0],
		[0, 0, 0, 1]
	])

	# find intersections with xz plane
	#          0 = (origin + vector * a).y 
	#     =>   a = -origin.y / vector.y
	#     => int = origin + vector * a
	return to_2d.dot(
		origin + utils.scale(all=-origin[1] / vector[1])
					 .dot(vector)
	)
Exemplo n.º 42
0
def load_test_images():
    '''
    Loads 64 random images from SVNH test data sets

    :return: Tuple of (test images, image labels)
    '''
    utils.download_train_and_test_data()
    _, testset = utils.load_data_sets()

    idx = np.random.randint(0, testset['X'].shape[3], size=64)
    test_images = testset['X'][:, :, :, idx]
    test_labels = testset['y'][idx]

    test_images = np.rollaxis(test_images, 3)
    test_images = utils.scale(test_images)

    return test_images, test_labels
Exemplo n.º 43
0
    def hessian(self, m_dot, project=False):
        ''' Evaluates the Hessian action in direction m_dot. '''

        assert(len(self.controls) == 1)

        # Check if we have the gradient already in the cash.
        # If so, return the cached value
        if self.cache is not None:
            hash = value_hash([x.data() for x in self.controls] + [m_dot])
            fnspaces = [p.data().function_space() if isinstance(p.data(),
                Function) else None for p in self.controls]

            if hash in self._cache["hessian_cache"]:
                info_green("Got a Hessian cache hit.")
                return cache_load(self._cache["hessian_cache"][hash], fnspaces)
            else:
                info_red("Got a Hessian cache miss")

        # Compute the Hessian action by solving the second order adjoint equations
        if isinstance(m_dot, list):
            assert len(m_dot) == 1
            Hm = self.H(m_dot[0], project=project)
        else:
            Hm = self.H(m_dot, project=project)

        # Apply the scaling factor
        scaled_Hm = [utils.scale(Hm, self.scale)]

        # Call callback
        control_data = [p.data() for p in self.controls]
        if self.current_func_value is not None:
            current_func_value = self.scale * self.current_func_value
        else:
            current_func_value = None

        self.hessian_cb(current_func_value,
                        delist(control_data, list_type=self.controls),
                        m_dot, scaled_Hm[0])

        # Cache the result
        if self.cache is not None:
            self._cache["hessian_cache"][hash] = cache_store(scaled_Hm, self.cache)

        return scaled_Hm
Exemplo n.º 44
0
def make_tuned_inference(X):
    print('Making tuned inference...')

    t = [0.100, 0.101, 0.102, 0.103, 0.104, 0.105, 0.106, 0.107, 0.108, 0.109,
         0.110, 0.111, 0.112, 0.113, 0.114, 0.115, 0.116, 0.117, 0.118, 0.119,
         0.120, 0.121, 0.122, 0.123, 0.124, 0.125, 0.126, 0.127, 0.128, 0.129,
         0.130, 0.131, 0.132, 0.133, 0.134, 0.135, 0.136, 0.137, 0.138, 0.139,
         0.140, 0.141, 0.142, 0.143, 0.144, 0.145, 0.146, 0.147, 0.148, 0.149,
         0.150, 0.151, 0.152, 0.154, 0.155, 0.156, 0.157, 0.158, 0.159, 0.160,
         0.161, 0.162, 0.163, 0.164, 0.165, 0.166, 0.167, 0.168, 0.169, 0.170,
         0.171, 0.172, 0.173, 0.174, 0.175, 0.176, 0.177, 0.178, 0.179, 0.180,
         0.181, 0.182, 0.183, 0.184, 0.185, 0.186, 0.187, 0.188, 0.189, 0.190,
         0.191, 0.192, 0.193, 0.194, 0.195, 0.196, 0.197, 0.198, 0.199, 0.200,
         0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.200,
         0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.210]

    weight = 0

    n_samples, n_nodes = X.shape
    y_pred_agg = np.zeros((n_nodes, n_nodes))


    for threshold in t:
        for filtering in ["f1", "f2", "f3", "f4"]:
            print('Current: %0.3f, %s' % (threshold, filtering))

            X_new = tuned_filter(X, LP = filtering, threshold = t, weights = True)
            pca = PCA(whiten=True, n_components=int(0.8 * n_nodes)).fit(X_new)
            y_pred = - pca.get_precision()

            if filtering == 'f1':
                y_pred_agg += y_pred
                weight += 1
            elif filtering == 'f2':
                y_pred_agg += y_pred * 0.9
                weight += 0.9
            elif filtering == 'f3':
                y_pred_agg += y_pred * 0.01
                weight += 0.01
            elif filtering == 'f4':
                y_pred_agg += y_pred * 0.7
                weight += 0.7

    return scale(y_pred_agg / weight)
Exemplo n.º 45
0
def make_prediction_directivity(X, threshold=0.12, n_jobs=1):
    """Score neuron connectivity using a precedence measure

    Parameters
    ----------
    X : numpy array of shape (n_samples, n_nodes)
        Fluorescence signals

    threshold : float, (default=0.11)
        Threshold value for hard thresholding filter:
        x_new[i] = x[i] if x[i] >= threshold else 0.

    n_jobs : integer, optional (default=1)
        The number of jobs to run the algorithm in parallel.
        If -1, then the number of jobs is set to the number of cores.

    Returns
    -------
    score : numpy array of shape (n_nodes, n_nodes)
        Pairwise neuron connectivity score.

    """

    # Perform filtering
    X_new = np.zeros((X.shape))
    for i in range(1, X.shape[0] - 1):
        for j in range(X.shape[1]):
            X_new[i, j] = (X[i, j] + 1 * X[i - 1, j] + 0.8 * X[i - 2, j] +
                           0.4 * X[i - 3, j])

    X_new = np.diff(X_new, axis=0)
    thresh1 = X_new < threshold * 1
    thresh2 = X_new >= threshold * 1
    X_new[thresh1] = 0
    X_new[thresh2] = pow(X_new[thresh2], 0.9)

    # Score directivity
    n_jobs, starts = _partition_X(X, n_jobs)
    all_counts = Parallel(n_jobs=n_jobs)(
        delayed(_parallel_count)(X_new, starts[i], starts[i + 1])
        for i in range(n_jobs))
    count = np.vstack(list(chain.from_iterable(all_counts)))

    return scale(count - np.transpose(count))
Exemplo n.º 46
0
    def derivative(self, forget=True, project=False):
        ''' Evaluates the derivative of the reduced functional for the most
        recently evaluated control value. '''

        # Check if we have the gradient already in the cash.
        # If so, return the cached value
        if self.cache is not None:
            hash = value_hash([x.data() for x in self.controls])
            fnspaces = [p.data().function_space() if isinstance(p.data(),
                Function) else None for p in self.controls]

            if hash in self._cache["derivative_cache"]:
                info_green("Got a derivative cache hit.")
                return cache_load(self._cache["derivative_cache"][hash], fnspaces)

        # Compute the gradient by solving the adjoint equations
        dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project)
        dfunc_value = enlist(dfunc_value)

        # Reset the checkpointing state in dolfin-adjoint
        adjointer.reset_revolve()

        # Apply the scaling factor
        scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)]

        # Call the user-specific callback routine
        if self.derivative_cb:
            if self.current_func_value is not None:
              values = [p.data() for p in self.controls]
              self.derivative_cb(self.scale * self.current_func_value,
                      delist(scaled_dfunc_value, list_type=self.controls),
                      delist(values, list_type=self.controls))
            else:
              info_red("Gradient evaluated without functional evaluation, not calling derivative callback function")

        # Cache the result
        if self.cache is not None:
            info_red("Got a derivative cache miss")
            self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache)

        return scaled_dfunc_value
Exemplo n.º 47
0
import numpy as N
import pylab as P

from scipy.sandbox import pyem
import utils

oldfaithful = utils.get_faithful()

# We want the relationship between d(t) and w(t+1), but get_faithful gives
# d(t), w(t), so we have to shift to get the "usual" faithful data
waiting = oldfaithful[1:, 1:]
duration = oldfaithful[:len(waiting), :1]
dt = N.concatenate((duration, waiting), 1)

# Scale the data so that each component is in [0..1]
dt = utils.scale(dt)

# This function train a mixture model with k components, returns the trained
# model and the BIC
def cluster(data, k, mode = 'full'):
    d = data.shape[1]
    gm = pyem.GM(d, k, mode)
    gmm = pyem.GMM(gm)
    em = pyem.EM()
    em.train(data, gmm, maxiter = 20)
    return gm, gmm.bic(data)

# bc will contain a list of BIC values for each model trained
bc = []
mode = 'full'
P.figure()
Exemplo n.º 48
0
channels, crop, scale, color, avail, colorPicker)
from proto import alias, sharpen, group, find, edge, center, distance
from PIL import Image

print "# fast stuff"
img = load('samples/abstract/colors.png')
#b = take()
show(img)
b, g, r = bgr(img)
img = image(b,b,b)
test = like(img)
bound = bounds(b)
channel = channels(b)
coord = (0,0,50,50)
closer = crop(img, coord)
bigger = scale(closer, 2.0)
eyedrop = color(img, 0, 30)
pallet = avail(img)
colorPicker(img,0,30)

print "# slow stuff"
res1 = alias(img, .3)
res2 = sharpen(img, .3)
blob1 = group(img)
mask = Image.new("RGB", (50, 10), "white")
blob3 = find(img,mask,(3,3))
coords1 = edge(img)
coords2 = center(blob1)
dist = distance(0,3)

print "# yay, got to the end!"
 def weighted_traverse(self, start, end, current):
     traverse_vector = self.traverse_vector(start, end, current)
     w = self.traverse_weight(start, end, current)
     # rospy.loginfo('wtr: '+str(traverse_vector[0])[0:4]+' , '+str(traverse_vector[1])[0:4]+' , '+str(w)[0:4])
     return scale(traverse_vector, w)
 def weighted_arrive(self, start, end, current):
     arrive_vector = self.arrive_vector(start, end, current)
     w = self.arrive_weight(start, end, current)
     # rospy.loginfo('war: '+str(arrive_vector[0])[0:4]+' , '+str(arrive_vector[1])[0:4]+' , '+str(w)[0:4])
     return scale(arrive_vector, w)
 def weighted_depart(self, start, end, current):
     depart_vector = self.depart_vector(start, end, current)
     w = self.depart_weight(start, end, current)
     # rospy.loginfo('wdp: '+str(depart_vector[0])[0:4]+' , '+str(depart_vector[1])[0:4]+' , '+str(w)[0:4])
     return scale(depart_vector, w)
Exemplo n.º 52
0
def get_file():
	global frame_map
	global widget_master

	file = tkFileDialog.askopenfilename(parent=widget_master,title='Choose a file')
	field_file.delete(0,999)
	field_file.insert(0,file)

	handle= open(file,'r',0)
	handle.readline()
	start_freq, end_freq,  step= ((handle.readline()).strip()).split(',')
	start_freq= int(start_freq)
	end_freq= int(end_freq)
	step= int(step)
	height= (end_freq - start_freq) / step
	yscalefactor= float(height) / float(WINDOW_HEIGHT)
	field_map.config(height= (height / yscalefactor) + (WINDOW_MARGIN * 2))
	# draw scale
	for freq in range(start_freq,end_freq,step * 45):
		field_map.create_text(WINDOW_MARGIN,((height - ((freq - start_freq) / step)) / yscalefactor) + WINDOW_MARGIN + TEXT_OFFSET,fill= 'white', text= freq, anchor=SW)


	handle.readline()
	# get signal strength range
	strength_min= 999999
	strength_max= 0
	for data in handle.readline().strip():
		try:
			position, frequency, symbol_rate, polarity, strength= data.split(',')
			if int(strength) < strength_min:
				strength_min= int(strength)
			if int(strength) > strength_max:
				strength_max= int(strength)
		except:
			break
	# reset data
	handle.seek(0)
	for x in range(4):
		handle.readline()
	# read data one line at a time. one line == one pixel
	y_bot= height - 1
	y_top= y_bot
	current_polarity= '-'
	regions= 0
	start_frequency= 0
	while True:
		data= (handle.readline()).strip()
		if not data == '':
			position, frequency, symbol_rate, polarity, strength= data.split(',')
			x= int(position) + X_OFFSET
			if start_frequency == 0:
				start_frequency= frequency
			if not polarity == current_polarity:
				#if not current_polarity == '-':
					#field_map.create_line(x,(y_bot / yscalefactor) + WINDOW_MARGIN,x,(y_top / yscalefactor) + WINDOW_MARGIN, fill=COLOURS[current_polarity], tag='%s,%s,%s,%s,%s' % (position, start_frequency, end_frequency, symbol_rate, current_polarity))
				field_map.create_line(x,(y_bot / yscalefactor) + WINDOW_MARGIN,x,(y_top / yscalefactor) + WINDOW_MARGIN, fill=COLOURS[current_polarity] % utils.scale(int(strength), strength_min, strength_max, 0xff), tag='%s,%s,%s,%s,%s' % (position, start_frequency, end_frequency, symbol_rate, current_polarity))
				current_polarity= polarity
				y_bot= y_top
				regions += 1
				start_frequency= frequency
			end_frequency= frequency
			y_top -= 1
		else:
			break
	print '%d regions' % regions
For now, regularized EM is pretty crude, but is enough for simple cases where
you need to avoid singular covariance matrices."""

import numpy as N
import pylab as P

from scikits.learn.machine.em import EM, GM, GMM
# Experimental RegularizedEM
from scikits.learn.machine.em.gmm_em import RegularizedEM
import utils

x, y = utils.get_pendigits()

# Take only the first point of pendigits for pdf estimation
dt1 = N.concatenate([x[:, N.newaxis], y[:, N.newaxis]], 1)
dt1 = utils.scale(dt1.astype(N.float))

# pcnt is the poportion of samples to use as prior count. Eg if you have 1000
# samples, and pcn is 0.1, then the prior count would be 100, and 1100 samples
# will be considered as overall when regularizing the parameters.
pcnt = 0.05
# You should try different values of pval. If pval is 0.1, then the
# regularization will be strong. If you use something like 0.01, really sharp
# components will appear. If the values are too small, the regularizer may not
# work (singular covariance matrices).
pval = 0.05

# This function train a mixture model with k components, returns the trained
# model and the BIC
def cluster(data, k, mode = 'full'):
    d = data.shape[1]
Exemplo n.º 54
0
 def choose_when_quarter_tones_start(self):
     # somewhere between 2 and 4 minutes in
     # in quarter notes at 120 beats per minute (ie, half seconds)
     self.quarter_tones_start = scale(random.random(), 0, 1, 120 * 2, 120 * 4)
    def derivative(self, forget=True, project=False):
        """ Evaluates the derivative of the reduced functional at the most
            recently evaluated control value.

	Args:
	    forget (Optional[bool]): Delete the forward state while solving the
                adjoint equations. If you want to reevaluate derivative at the same
                point (or the Hessian) you will need to set this to False or None. Defaults to True.
	    project (Optional[bool]): If True, the returned value will be the L2
                Riesz representer, if False it will be the l2 Riesz representative.
                The L2 projection requires one additional linear solve.
                Defaults to False.

	Returns:
	    The functional derivative. The returned type is the same as the control
            type.
        """

        # Check if we have the gradient already in the cash.
        # If so, return the cached value
        if self.cache is not None:
            hash = value_hash([x.data() for x in self.controls])
            fnspaces = [p.data().function_space() if isinstance(p.data(),
                Function) else None for p in self.controls]

            if hash in self._cache["derivative_cache"]:
                info_green("Got a derivative cache hit.")
                return cache_load(self._cache["derivative_cache"][hash], fnspaces)

        # Call callback
        values = [p.data() for p in self.controls]
        self.derivative_cb_pre(delist(values, list_type=self.controls))

        # Compute the gradient by solving the adjoint equations
        dfunc_value = drivers.compute_gradient(self.functional, self.controls, forget=forget, project=project)
        dfunc_value = enlist(dfunc_value)

        # Reset the checkpointing state in dolfin-adjoint
        adjointer.reset_revolve()

        # Apply the scaling factor
        scaled_dfunc_value = [utils.scale(df, self.scale) for df in list(dfunc_value)]

        # Call callback
        # We might have forgotten the control values already,
        # in which case we can only return Nones
        values = []
        for c in self.controls:
            try:
                values.append(p.data())
            except libadjoint.exceptions.LibadjointErrorNeedValue:
                values.append(None)
        if self.current_func_value is not None:
            self.derivative_cb_post(self.scale * self.current_func_value,
                    delist(scaled_dfunc_value, list_type=self.controls),
                    delist(values, list_type=self.controls))

        # Cache the result
        if self.cache is not None:
            info_red("Got a derivative cache miss")
            self._cache["derivative_cache"][hash] = cache_store(scaled_dfunc_value, self.cache)

        return scaled_dfunc_value
Exemplo n.º 56
0
mixtures, etc..."""

import numpy as N
import pylab as P
import matplotlib as MPL

from scipy.sandbox import svm
import utils

from scikits.learn.datasets import german
data = german.load()

features = N.vstack([data['feat']['feat' + str(i)].astype(N.float) for i in range(1, 25)]).T
label = data['label']

t, s = utils.scale(features)

training = svm.LibSvmClassificationDataSet(label, features)

def train_svm(cost, gamma, fold = 5):
    """Train a SVM for given cost and gamma."""
    kernel = svm.RBFKernel(gamma = gamma)
    model = svm.LibSvmCClassificationModel(kernel, cost = cost)
    cv = model.cross_validate(training, fold)
    return cv

c_range = N.exp(N.log(2.) * N.arange(-5, 15))
g_range = N.exp(N.log(2.) * N.arange(-15, 3))

# Train the svm on a log distributed grid
gr = N.meshgrid(c_range, g_range)
Exemplo n.º 57
0
def scale_corners(corners):
    result = []
    for point in corners:
        point = scale(point, 1/RESIZE)
        result.append(shift(point, (60,70)))
    return result
Exemplo n.º 58
0
    def __init__(self, ranges=False):
        score = self.score = Score()
        self.instruments = self.i = Instruments()
        self.parts = Parts(self.i)


        # Make Metadata
        timestamp = datetime.datetime.utcnow()
        metadata = Metadata()
        metadata.title = 'Early Montreal'
        metadata.composer = 'Jonathan Marmor'
        metadata.date = timestamp.strftime('%Y/%m/%d')
        score.insert(0, metadata)

        [score.insert(0, part) for part in self.parts.l]
        score.insert(0, StaffGroup(self.parts.l))

        if ranges:
            # Don't make a piece, just show the instrument ranges
            for inst, part in zip(self.instruments.l, self.parts.l):
                measure = Measure()
                measure.timeSignature = TimeSignature('4/4')
                low = Note(inst.lowest_note)
                measure.append(low)
                high = Note(inst.highest_note)
                measure.append(high)
                part.append(measure)
            return


        # 18 to 21 minutes
        piece_duration_minutes = scale(random.random(), 0, 1, 18, 21)

        # Make the "songs"
        songs = []
        total_minutes = 0
        n = 1
        while total_minutes < piece_duration_minutes:
            print 'Song {}'.format(n)
            n += 1
            song = Song(self)
            songs.append(song)
            total_minutes += song.duration_minutes

        # Make notation
        previous_duration = None
        for song in songs:
            for bar in song.bars:
                for part in bar.parts:
                    measure = Measure()
                    if bar.tempo:
                        measure.insert(0, MetronomeMark(number=bar.tempo, referent=Duration(1)))
                        measure.leftBarline = 'double'
                    if bar.duration != previous_duration:
                        ts = TimeSignature('{}/4'.format(bar.duration))
                        measure.timeSignature = ts

                    # Fix Durations
                    durations = [note['duration'] for note in part['notes']]

                    components_list = split_at_beats(durations)
                    components_list = [join_quarters(note_components) for note_components in components_list]
                    for note, components in zip(part['notes'], components_list):
                        note['durations'] = components


                    for note in part['notes']:
                        if note['pitch'] == 'rest':
                            n = Rest()
                        if isinstance(note['pitch'], list):
                            pitches = []
                            for pitch_number in note['pitch']:
                                p = Pitch(pitch_number)
                                # Force all flats
                                if p.accidental.name == 'sharp':
                                    p = p.getEnharmonic()
                                pitches.append(p)
                            n = Chord(notes=pitches)

                            # TODO add slurs
                            # TODO add glissandos
                            # TODO add -50 cent marks


                        else:
                            p = Pitch(note['pitch'])
                            # Force all flats
                            if p.accidental.name == 'sharp':
                                p = p.getEnharmonic()
                            n = Note(p)

                            # TODO add slurs
                            # TODO add glissandos
                            # TODO add -50 cent marks

                        d = Duration()
                        if note['duration'] == 0:
                            d.quarterLength = .5
                            d = d.getGraceDuration()
                        else:
                            d.fill(note['durations'])
                        n.duration = d

                        measure.append(n)

                    self.parts.d[part['instrument_name']].append(measure)
                previous_duration = bar.duration
Exemplo n.º 59
0
def make_prediction_PCA(X):
    """Score neuron connectivity using a partial correlation approach

    Parameters
    ----------
    X : numpy array of shape (n_samples, n_nodes)
        Fluorescence signals

    Returns
    -------
    score : numpy array of shape (n_nodes, n_nodes)
        Pairwise neuron connectivity score.

    """

    n_samples, n_nodes = X.shape

    # Init for a given data set
    y_pred_agg = np.zeros((n_nodes, n_nodes))

    # Thresholds to evaluate
    # Some thresohlds are duplicated or missing.
    t = [0.100, 0.101, 0.102, 0.103, 0.104, 0.105, 0.106, 0.107, 0.108, 0.109,
         0.110, 0.111, 0.112, 0.113, 0.114, 0.115, 0.116, 0.117, 0.118, 0.119,
         0.120, 0.121, 0.122, 0.123, 0.124, 0.125, 0.126, 0.127, 0.128, 0.129,
         0.130, 0.131, 0.132, 0.133, 0.134, 0.135, 0.136, 0.137, 0.138, 0.139,
         0.140, 0.141, 0.142, 0.143, 0.144, 0.145, 0.146, 0.147, 0.148, 0.149,
         0.150, 0.151, 0.152, 0.154, 0.155, 0.156, 0.157, 0.158, 0.159, 0.160,
         0.161, 0.162, 0.163, 0.164, 0.165, 0.166, 0.167, 0.168, 0.169, 0.170,
         0.171, 0.172, 0.173, 0.174, 0.175, 0.176, 0.177, 0.178, 0.179, 0.180,
         0.181, 0.182, 0.183, 0.184, 0.185, 0.186, 0.187, 0.188, 0.189, 0.190,
         0.191, 0.192, 0.193, 0.194, 0.195, 0.196, 0.197, 0.198, 0.199, 0.200,
         0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.200,
         0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.210]

    weight = 0

    # Loop over all the tresholds and methods
    for threshold in t:
        for filtering in ['sym', 'future', 'past', 'alt']:
            print(threshold, filtering)

            # Preprocess data
            X_new = _preprocess(X, filtering=filtering, threshold=threshold)

            # Making the prediction
            pca = PCA(whiten=True, n_components=int(0.8 * n_nodes)).fit(X_new)
            y_pred = - pca.get_precision()

            # Adding the (weigthed) prediction to global prediction
            if filtering == 'sym':
                y_pred_agg += y_pred
                weight += 1
            elif filtering == 'alt':
                y_pred_agg += y_pred * 0.9
                weight += 0.9
            elif filtering == 'future':
                y_pred_agg += y_pred * 0.01
                weight += 0.01
            elif filtering == 'past':
                y_pred_agg += y_pred * 0.7
                weight += 0.7

    # Normalizing the global prediction
    return scale(y_pred_agg / weight)