Exemple #1
0
    def seek(self, target_pos, on_torus=True):

        if on_torus:
            dv = utility.dist_vec_on_torus(target_pos, self.agent.pos,
                                           game_state.WIDTH, game_state.HEIGHT)
            desired_vel = self.agent.max_speed * utility.normalize(dv)
        else:
            desired_vel = self.agent.max_speedmax_speed * utility.normalize(
                target_pos - self.agent.pos)

        return desired_vel - self.agent.vel
Exemple #2
0
    def update(self, scene):
        if self.owner.move_component is None:
            return

        vel = self.owner.move_component.vel
        friction = np.array([vel[0], vel[1]])
        if friction[0] != 0 or friction[1] != 0:
            utility.normalize(friction)
            friction_coefficient = 0.2
            friction[
                0] *= -friction_coefficient * self.owner.move_component.mass
            friction[
                1] *= -friction_coefficient * self.owner.move_component.mass
            self.owner.move_component.apply_force(friction)
Exemple #3
0
def encrypt(str_input):
    str_input = normalize(str_input)
    bacon_dictionary = {
        "A": "AAAAA",
        "B": "AAAAB",
        "C": "AAABA",
        "D": "AAABB",
        "E": "AABAA",
        "F": "AABAB",
        "G": "AABBA",
        "H": "AABBB",
        "I": "ABAAA",
        "J": "ABAAB",
        "K": "ABABA",
        "L": "ABABB",
        "M": "ABBAA",
        "N": "ABBAB",
        "O": "ABBBA",
        "P": "ABBBB",
        "Q": "BAAAA",
        "R": "BAAAB",
        "S": "BAABA",
        "T": "BAABB",
        "U": "BABAA",
        "V": "BABAB",
        "W": "BABBA",
        "X": "BABBB",
        "Y": "BBAAA",
        "Z": "BBAAB"
    }
    bring_home_the_bacon = ""
    for eachletter in str_input:
        bring_home_the_bacon += bacon_dictionary[eachletter]
    return bring_home_the_bacon
Exemple #4
0
def decrypt(str_input):
    str_input = normalize(str_input)
    baconlist = chunks_of_bacon(str_input)
    bacon_dictionary = {
        "AAAAA": "A",
        "AAAAB": "B",
        "AAABA": "C",
        "AAABB": "D",
        "AABAA": "E",
        "AABAB": "F",
        "AABBA": "G",
        "AABBB": "H",
        "ABAAA": "I",
        "ABAAB": "J",
        "ABABA": "K",
        "ABABB": "L",
        "ABBAA": "M",
        "ABBAB": "N",
        "ABBBA": "O",
        "ABBBB": "P",
        "BAAAA": "Q",
        "BAAAB": "R",
        "BAABA": "S",
        "BAABB": "T",
        "BABAA": "U",
        "BABAB": "V",
        "BABBA": "W",
        "BABBB": "X",
        "BBAAA": "Y",
        "BBAAB": "Z"
    }
    bring_home_the_bacon = ""
    for bacon in baconlist:
        bring_home_the_bacon += bacon_dictionary[bacon]
    return bring_home_the_bacon
Exemple #5
0
    def turn_around_time(self, to_target):

        to_target = utility.normalize(to_target)
        dot = np.dot(self.agent.heading, to_target)
        coefficient = 0.5

        return (dot - 1.0) * -coefficient
Exemple #6
0
 def __init__(self, B):
     """ Algo specific parameters here.
     """
     super(low_regret, self).__init__()
     self.B = B
     self.experts = [fixed_spread(b) for b in B]
     self.Expert_distribution = utility.normalize(np.ones(len(B)))
Exemple #7
0
    def arrive(self, target_pos, deceleration=3, on_torus=True):

        to_target = utility.dist_vec_on_torus(target_pos, self.agent.pos,
                                              game_state.WIDTH,
                                              game_state.HEIGHT)
        dist = utility.magnitude(to_target)

        if dist <= 0:
            return np.array([0, 0])

        if deceleration > 5:
            deceleration = 5
        elif deceleration < 1:
            deceleration = 1

        deceleration_tweaker = 0.5

        speed = dist / float(deceleration) * deceleration_tweaker
        if speed > self.agent.max_speed:
            speed = self.agent.max_speed

        if on_torus:
            desired_vel = speed * utility.normalize(to_target)
        else:
            desired_vel = (speed / dist) * (target_pos - self.agent.pos)
        return desired_vel - self.agent.vel
Exemple #8
0
def encrypt(plaintext, n):
    plaintext = normalize(plaintext)
    count = cycle(range(0, n) + range(1, n - 1)[::-1])
    rails = [''] * n
    for i in plaintext:
        index = count.next()
        rails[index] = rails[index] + i
    return ''.join(rails)
Exemple #9
0
def encrypt(plaintext, n):
    plaintext = normalize(plaintext)
    count = cycle(range(0, n) + range(1, n - 1)[::-1])
    rails = [""] * n
    for i in plaintext:
        index = count.next()
        rails[index] = rails[index] + i
    return "".join(rails)
Exemple #10
0
def encrypt(key, str_input):
    alphabet = string.ascii_uppercase
    str_input = normalize(str_input)
    returnlist = []
    for i, c in enumerate(str_input):
        for j, d in enumerate(alphabet):
            if c == d:
                returnlist.append(key[j])
    return "".join(returnlist).upper()
Exemple #11
0
def encrypt(key, str_input):
    alphabet = string.ascii_uppercase
    str_input = normalize(str_input)
    returnlist = []
    for i, c in enumerate(str_input):
        for j, d in enumerate(alphabet):
            if (c == d):
                returnlist.append(key[j])
    return ''.join(returnlist).upper()
Exemple #12
0
 def forward(self, inputs, inputs_lengths):
     input_emb = self.item_embeddings(inputs)
     pos_emb_input = torch.cat(inputs.size(0)*[torch.arange(start=0,end=inputs.size(1)).unsqueeze(0)])
     pos_emb_input = pos_emb_input.long()
     pos_emb = self.pos_embeddings(pos_emb_input)
     x = input_emb+pos_emb
         
     x = self.dropout(x)
         
     mask = torch.ne(inputs, self.item_num).float().unsqueeze(-1)
     x *= mask
         
     for i in range (self.num_blocks):
         x = self.multihead_attention(queries=normalize(x),keys=x)
     x = self.feedforward(normalize(x))
     x *= mask
         
     x = normalize(x)
     out = extract_axis_1_torch(x,inputs_lengths-1)
     out = self.fc1(out)
     return out
Exemple #13
0
def encrypt(str_input):
    str_input = normalize(str_input)
    bacon_dictionary = {
        "A":"AAAAA","B":"AAAAB","C":"AAABA","D":"AAABB",
        "E":"AABAA","F":"AABAB","G":"AABBA","H":"AABBB",
        "I":"ABAAA","J":"ABAAB","K":"ABABA","L":"ABABB",
        "M":"ABBAA","N":"ABBAB","O":"ABBBA","P":"ABBBB",
        "Q":"BAAAA","R":"BAAAB","S":"BAABA","T":"BAABB",
        "U":"BABAA","V":"BABAB","W":"BABBA","X":"BABBB",
        "Y":"BBAAA","Z":"BBAAB"}
    bring_home_the_bacon = ""
    for eachletter in str_input:
        bring_home_the_bacon += bacon_dictionary[eachletter]
    return bring_home_the_bacon
 def dataLoaded(self):
     # Note - this function only runs once the data directory has been loaded
     self.setMouseTracking(True)
     color = QColor(255, 255, 255)
     self.samplesSelected.clear()
     self.samplePoints.clear()
     self.sampleAreaVisible.clear()
     self.samplePointsInFile.clear()
     self.penSelected.clear()
     for t, p in common.SamplingPattern:
         self.samplePoints.append((0, 0))  # these will need to be recomputed as photo scales
         self.samplePointsInFile.append((0, 0))  # these only need to be computed once per photo
         self.sampleAreaVisible.append([])
         color.setHsv(t, int(utility.normalize(p, 0, 90) * 127 + 128), 255)
         self.penSelected.append(QPen(color, 3, Qt.SolidLine))
Exemple #15
0
def decrypt(str_input):
    str_input = normalize(str_input)
    baconlist = chunks_of_bacon(str_input)
    bacon_dictionary = {
        "AAAAA":"A","AAAAB":"B","AAABA":"C","AAABB":"D",
        "AABAA":"E","AABAB":"F","AABBA":"G","AABBB":"H",
        "ABAAA":"I","ABAAB":"J","ABABA":"K","ABABB":"L",
        "ABBAA":"M","ABBAB":"N","ABBBA":"O","ABBBB":"P",
        "BAAAA":"Q","BAAAB":"R","BAABA":"S","BAABB":"T",
        "BABAA":"U","BABAB":"V","BABBA":"W","BABBB":"X",
        "BBAAA":"Y","BBAAB":"Z"}
    bring_home_the_bacon = ""
    for bacon in baconlist:
        bring_home_the_bacon += bacon_dictionary[bacon]             
    return bring_home_the_bacon
Exemple #16
0
    def __init__(self, body_i, ai_bar, bi_bar, body_j, aj_bar, bj_bar, k,
                 theta_0, c, h):
        """
        ai_bar: axis about which the torque is applied to body i (unit vector)
        bi_bar: perpendicular to ai_bar and used with bj_bar to define rotation
                angle theta_ij
        
        aj_bar: axis about which the torque is applied to body j (unit vector)
        bj_bar: perpendicular to aj_bar and used with bi_bar to define rotation
                angle theta_ij
                
        k: spring constant of actuator
        theta_0: zero_tension angle
        c: damping coefficient
        h: function that describes the effects of an actuator (hydraulic, electric, etc.)
           must be of the form h(theta, theta_dot, t)
        t: time         
        
        """

        self.body_i = body_i
        self.body_j = body_j

        self.ai_bar = normalize(column(ai_bar))
        self.bi_bar = normalize(column(bi_bar))

        self.aj_bar = normalize(column(aj_bar))
        self.bj_bar = normalize(column(bj_bar))

        self.k = k
        self.theta_0 = theta_0
        self.c = c
        self.h = h

        self.theta_old = 0
        self.n = 0  #number of full revolutions
Exemple #17
0
def get_normals(points):
    normals = []

    length = len(points)

    for i in range(0, length - 1):
        segment = [
            points[i + 1][0] - points[i][0], points[i + 1][1] - points[i][1]
        ]

        normal = [-segment[1], segment[0]]  # left normal
        normal = utility.normalize(normal)
        normals.append(normal)

    segment = [
        points[0][0] - points[length - 1][0],
        points[0][1] - points[length - 1][1]
    ]

    normal = [-segment[1], segment[0]]  # left normal
    normal = utility.normalize(normal)
    normals.append(normal)

    return normals
Exemple #18
0
    def flee(self, target_pos, on_torus=True):

        panic_dis = 100.0 * 100.0

        if on_torus:
            dv = utility.dist_vec_on_torus(self.agent.pos, target_pos,
                                           game_state.WIDTH, game_state.HEIGHT)
            distance = utility.magnitude_square(dv)
        else:
            dv = self.agent.pos - target_pos
            distance = utility.magnitude_square(self.agent.pos - target_pos)

        if distance > panic_dis:
            return np.array([0.0, 0.0])

        desired_vel = self.agent.max_speed * utility.normalize(dv)
        return desired_vel - self.agent.vel
Exemple #19
0
def circle_circle_collision(a, b, scene):

    a_pos = scene.transformed_entity_pts[a.id][0]
    b_pos = scene.transformed_entity_pts[b.id][0]

    r = a.render_component.bounding_radius(
    ) + b.render_component.bounding_radius()
    dis = math.sqrt((a_pos[0] - b_pos[0]) * (a_pos[0] - b_pos[0]) +
                    (a_pos[1] - b_pos[1]) * (a_pos[1] - b_pos[1]))

    if r < dis:
        return False

    dv = [b_pos[0] - a_pos[0], b_pos[1] - a_pos[1]]

    dv = utility.normalize(dv) * (r - dis)

    return dv
def translate_title(name: str) -> str:
    name = utility.normalize(name)
    name = utility.fullwidth_to_halfwidth(name)
    name = utility.escape_markdown_symbol(name)
    # remove 【...】
    name = re.sub(r'【[^【】]*(電子|特典|OFF)[^【】]*】', '', name)
    name = re.sub(r'【(期間限定|)([^【】]+セット)】', ' \g<2>', name)
    name = name.strip()
    # '(N)' -> ' N'
    name = re.sub(r'\(([0-9]+)\)$', r' \g<1>', name)
    # ': N' -> ' N'
    name = re.sub(r': ([0-9]+)$', r' \g<1>', name)
    # 'N巻' -> 'N'
    name = re.sub(r'([0-9]+)巻$', r'\g<1>', name)
    # replace continuous space
    name = re.sub(r'\s+', ' ', name)
    # coin
    coin_match = re.match(r'BOOK☆WALKER 期間限定コイン (?P<coin>[0-9,]+)円分', name)
    if coin_match:
        name = '期間限定コイン {0}円分'.format(
            coin_match.group('coin').replace(',', ''))
    return name
Exemple #21
0
    def wander(self):

        self.wander_target += np.array([
            random.uniform(-1, 1) * self.wander_jitter,
            random.uniform(-1, 1) * self.wander_jitter
        ])
        self.wander_target = self.wander_radius * utility.normalize(
            self.wander_target)

        target_local = self.wander_target + np.array([self.wander_distance, 0])
        utility.rotate_point(target_local, self.agent.side, self.agent.up)

        target_world = self.agent.pos + target_local
        """
        circle_pos = np.array([self.wander_distance, 0])
        utility.rotate_point(circle_pos, self.agent.side, self.agent.heading)
        circle_pos += self.agent.pos

        pygame.draw.circle(engine.surface, color.RED, [int(circle_pos[0]), int(circle_pos[1])], self.wander_radius, 1)
        pygame.draw.circle(engine.surface, color.RED, [int(target_world[0]), int(target_world[1])], 3)
        """

        return target_world - self.agent.pos
Exemple #22
0
    def update(self, time_elapsed):

        if self.owner.move_component is None:
            return

        speed = self.owner.move_component.max_speed
        heading = self.owner.up
        right = -self.owner.side

        move_force = np.array([0.0, 0.0])

        side = np.array([
            self.owner.side[0] * self.max_turn_rate * self.turn,
            self.owner.side[1] * self.max_turn_rate * self.turn
        ])

        if self.move_left_right != 0.0:
            move_force[0] -= right[0] * self.move_left_right * speed
            move_force[1] -= right[1] * self.move_left_right * speed
            self.move_left_right = 0.0

        if self.move_backward_forward != 0.0:
            move_force[0] += heading[0] * self.move_backward_forward * speed
            move_force[1] += heading[1] * self.move_backward_forward * speed
            self.move_backward_forward = 0.0

        if self.turn != 0.0:
            self.owner.up[0] += side[0]
            self.owner.up[1] += side[1]
            self.owner.up = utility.normalize(self.owner.up)
            self.owner.side = utility.perpendicular(self.owner.up)
            self.turn = 0.0

        length = pyrr.vector3.length(move_force)

        if length > 0.1:
            self.owner.move_component.apply_force(move_force)
    locations1, features1 = sift.siftFeature(img1)
    locations2, features2 = sift.siftFeature(img2)

    # use PCAac to reduce dimensions
    numberOfFeaturesOne = locations1.shape[0]
    numberOfFeaturesTwo = locations2.shape[0]

    features = np.vstack((features1, features2))
    V, S, mean = pca.pca(features)

    pcaFeatures1 = pca.project(features1, V, 36)
    pcaFeatures2 = pca.project(features2, V, 36)

    # normalize features
    util.normalize(pcaFeatures1)
    util.normalize(pcaFeatures2)

    np.savetxt("pcafeature1", pcaFeatures1, delimiter="\t")
    np.savetxt("pcafeature2", pcaFeatures2, delimiter="\t")

    # interface with Java program to do matching
    rowX = pcaFeatures1.shape[0]
    rowY = pcaFeatures2.shape[0]
    column = pcaFeatures1.shape[1]

    matchCommand = "java match " +str(rowX)+ " " +str(rowY)+ " " +str(column)
    print matchCommand
    os.system(matchCommand)

    # plot according to match stored in "data" folder
Exemple #24
0
 def update_expert_distribution(self):
     k = 0.0001
     inventory = np.array([e.inventory for e in self.experts])
     Loss = np.exp(-k * np.abs(inventory))
     self.Expert_distribution = utility.normalize(Loss)
Exemple #25
0
#
# cktboard_texture = "input\\cktboard.raw"
# img3 = MyImage("cktboard_texture", [365, 120], cktboard_texture)
# dimensions = [256, 256]
# img3.matrix = co_Occurrence(img3, dimensions)
# my_io.customised_matrix_write8(img3, dimensions)

flowers = "input\\flowers.raw"
img1 = MyImage("flowers", [400, 300], flowers)
flowers_template = "input\\flowers-template.raw"
img2 = MyImage("flowers_template", [42, 45], flowers_template)

img3 = MyImage("flowers_correlation_coefficient", [358, 255])
img3.matrix = CorrelationCoefficient.CC(img1, img2)
# my_io.write(img3, "int8")
img3.matrix = utility.normalize(img3.matrix, [358, 255])
# my_io.write(img3, "int8")
my_io.customised_matrix_write8(img3, [358, 255])

# lena = "input\\lena.raw"
# img3 = MyImage("lena_resampling", [512, 512], lena)
# img3.matrix = DU.down(img3)
# img3.matrix = DU.up(img3)
# my_io.write(img3, "int8")

# lena = "input\\lena_resampling2.raw"
# img3 = MyImage("lena_resampling", [256, 256], lena)
# # img3.matrix = DU.down(img3)
# img3.matrix = DU.up(img3)
# # my_io.write(img3, "int8")
# my_io.customised_matrix_write8(img3, [512, 512])
Exemple #26
0
    def DataGenBB(self, DataStrs, train_start, train_end):
        generateFunc = [
            "original", "scale", "rotate", "translate", "scaleAndTranslate",
            "brightnessAndContrast"
        ]
        # generateFunc = ["original", "scale", "rotate", "translate", "scaleAndTranslate"]

        InputData = np.zeros(
            [self.batch_size * len(generateFunc), self.imSize, self.imSize, 3],
            dtype=np.float32)
        # InputLabel = np.zeros([self.batch_size * len(generateFunc), 7], dtype = np.float32)
        InputLabel = np.zeros([self.batch_size * len(generateFunc), 3],
                              dtype=np.float32)

        InputNames = []
        count = 0
        for i in range(train_start, train_end):
            strLine = DataStrs[i]
            strCells = strLine.rstrip(' \n').split(' ')
            imgName = strCells[0]

            labels = np.array(strCells[1:]).astype(np.float)

            if len(labels) == 78:
                # print "switch to menpo39"
                labelsPTS = labels[:136].reshape([39, 2])
                self.ifMenpo39Data = True
            else:
                # print "not menpo39"
                labelsPTS = labels[:136].reshape([68, 2])
                self.ifMenpo39Data = False

            # if self.debug:
            #     print "imgName: ", imgName
            img = cv2.imread(imgName)

            if img != None:
                # print "find image: ", imgName
                # print "img.shape: ", img.shape

                img = cv2.resize(img, (self.imSize, self.imSize))
                # print "img.shape: ", img.shape
                if self.ifMenpo39Data:
                    x, y = self.unpackLandmarks(labelsPTS)
                else:
                    x, y = ut.unpackLandmarks(labelsPTS, self.imSize)

                # newImg, newX, newY = img, x, y

                for index in range(len(generateFunc)):
                    method = generateFunc[index]
                    (w, h, _) = img.shape
                    # tag = random.choice(generateFunc)
                    if method == "resize":
                        newImg, newX, newY = ut.resize(img,
                                                       x,
                                                       y,
                                                       xMaxBound=w,
                                                       yMaxBound=h,
                                                       random=True)
                    elif method == "rotate":
                        newImg, newX, newY = ut.rotate(img, x, y, w=w, h=h)
                    elif method == "mirror":
                        newImg, newX, newY = ut.mirror(img, x, y, w=w, h=h)
                    elif method == "translate" or method == "scaleAndTranslate":
                        newImg, newX, newY = ut.translate(img, x, y, w=w, h=h)
                    elif method == "brightnessAndContrast":
                        newImg, newX, newY = ut.contrastBrightess(img, x, y)
                    elif method == "original":
                        newImg, newX, newY = img, x, y
                    elif method == "scale":
                        newImg, newX, newY = img, x, y
                    else:
                        raise "not existing function"

                    # if self.debug:
                    #     plotOriginal = ut.plotLandmarks(img, x, y, self.imSize, ifReturn = True)
                    #     plotNew = ut.plotLandmarks(newImg, newX, newY, self.imSize, ifReturn = True)

                    #     cv2.imwrite(self.outputDir + 'testOriginal' + str(count) + '.jpg', img)
                    #     cv2.imwrite(self.outputDir + 'testNew' + str(count) + '.jpg', newImg)
                    #     cv2.imwrite(self.outputDir + 'plotOriginal' + str(count) + '.jpg', plotOriginal)
                    #     cv2.imwrite(self.outputDir + 'plotNew' + str(count) + '.jpg', plotNew)

                    # print "before normalize: ", newX

                    # normX = ut.normalize(newX)
                    # normY = ut.normalize(newY)

                    # print "after normalize: ", newX
                    # print "after denormalize again: ", ut.deNormalize(newX)

                    # normXMin = min(normX)
                    # normYMin = min(normY)
                    # normXMax = max(normX)
                    # normYMax = max(normY)
                    # normXMean = (normXMax + normXMin)/2.0
                    # normYMean = (normYMax + normYMin)/2.0
                    # normEdge = max(normYMax - normYMin, normXMax - normXMin)
                    newXMin = min(newX)
                    newYMin = min(newY)
                    newXMax = max(newX)
                    newYMax = max(newY)
                    newXMean = (newXMax + newXMin) / 2.0
                    newYMean = (newYMax + newYMin) / 2.0
                    edge = max(newYMax - newYMin, newXMax - newXMin)

                    # if method == "scale":
                    #     cv2.imshow("originalImg", newImg)
                    #     cv2.waitKey(0)

                    if method == "scale" or method == "scaleAndTranslate":
                        newEdge = np.random.uniform(0.7, 0.9) * edge
                        newXMin = int(newXMean - newEdge / 2.0)
                        newXMax = int(newXMean + newEdge / 2.0)
                        newYMin = int(newYMean - newEdge / 2.0)
                        newYMax = int(newYMean + newEdge / 2.0)

                        newXMean = newXMean - newXMin
                        newYMean = newYMean - newYMin

                        # print "newXMin, newYMin, newXMax, newYMax: ", newXMin, newYMin, newXMax, newYMax

                        newImg = Image.fromarray(newImg.astype(np.uint8))
                        cropImg = newImg.crop(
                            (newXMin, newYMin, newXMax, newYMax))
                        newImg = np.array(cropImg)

                        # cv2.imshow("processing", newImg)
                        # cv2.waitKey(0)

                        w, h, _ = newImg.shape
                        edge = edge * self.imSize / w
                        newXMean = newXMean * self.imSize / w
                        newYMean = newYMean * self.imSize / h
                        newImg = cv2.resize(newImg, (self.imSize, self.imSize))

                    # print "newXMin: ", newXMin
                    # print "newYMin: ", newYMin
                    # print "newXMax: ", newXMax
                    # print "newYMax: ", newYMax
                    # print "newXMean: ", newXMean
                    # print "newYMean: ", newYMean
                    # print "newEdge: ", newEdge

                    # if method == "scale":
                    #     newImg = ut.plotTarget(newImg, [newXMean, newYMean, edge], ifSquareOnly = True, ifGreen = True)
                    #     cv2.imshow("newImg", newImg)
                    #     cv2.waitKey(0)

                    # if self.ifMenpo39Data == False:
                    print "imgName: ", imgName.split("/")[-2] + imgName.split(
                        "/")[-1].split(".")[0]
                    print "inputCheck/" + imgName.split(
                        "/")[-2] + imgName.split("/")[-1].split(".")[0] + str(
                            method) + str(count) + '.jpg'
                    cv2.imwrite(
                        "inputCheck/" + imgName.split("/")[-2] +
                        imgName.split("/")[-1].split(".")[0] + str(method) +
                        str(count) + '.jpg', newImg)

                    normX = ut.normalize(newX, self.imSize)
                    normY = ut.normalize(newY, self.imSize)
                    # normPTS = np.asarray(ut.packLandmarks(normX, normY))
                    normXMean, normYMean, normEdge = ut.normalize(
                        newXMean, self.imSize), ut.normalize(
                            newYMean,
                            self.imSize), ut.normalize(edge, self.imSize)
                    # print "newPTS: ", newPTS.shape

                    # print "ut.deNormalize(normXMin): ", ut.deNormalize(normXMin)
                    # print "ut.deNormalize(normYMin): ", ut.deNormalize(normYMin)
                    # print "ut.deNormalize(normXMax): ", ut.deNormalize(normXMax)
                    # print "ut.deNormalize(normYMax): ", ut.deNormalize(normYMax)
                    # print "ut.deNormalize(normXMean): ",ut.deNormalize(normXMean)
                    # print "ut.deNormalize(normYMean): ",ut.deNormalize(normYMean)
                    # print "ut.deNormalize(normEdge): ", ut.deNormalize(normEdge)

                    # print "method: ", method
                    # print "newImg.shape: ", newImg.shape

                    # print "len(InputData): ", len(InputData)
                    InputData[count, ...] = newImg
                    # labels = np.array([normPTS[27][0], normPTS[27][1], normPTS[8][0],
                    #     normPTS[8][1], normXMean, normYMean, normEdge])
                    labels = np.array([normXMean, normYMean, normEdge])
                    InputLabel[count, ...] = labels
                    InputNames.append(imgName)

                    # print "count: ", count
                    count += 1

            else:
                print "cannot : ", imgName

        return InputData, InputLabel, np.asarray(InputNames)
def model_xor_snn():
  hrs_set_voltage = -4
  positive_bias = 1
  fixing_pulses = 37 * 10 ** 4
  current_application_time = 500
  spike = 1
  true = 20
  false = 0
  input_spikes_1 = [false, false, true, true]
  input_spikes_2 = [false, true, false, true]
  targets = [false, true, true, false]

  # Create a network  with 2 input neurons, 2 hidden layer neurons
  # and 1 output neuron. The input neurons are simulated by their spikes.
  hidden_neuron_1 = spiking_neuron.SpikingNeuron()
  hidden_neuron_2 = spiking_neuron.SpikingNeuron()
  output_neuron = spiking_neuron.SpikingNeuron()

  # Create 6 synapses with input_synapse_ij representing the connection
  # between the input neuron i and the hidden neuron j, and hidden_synapse_k
  # representing the connection between the hidden neuron k and the output neuron.
  # The inhibitory synapses are input_synapse_12 and input_synapse_21.
  input_synapse_11 = memristor.Memristor()
  input_synapse_12 = memristor.Memristor()
  input_synapse_21 = memristor.Memristor()
  input_synapse_22 = memristor.Memristor()
  hidden_synapse_1 = memristor.Memristor()
  hidden_synapse_2 = memristor.Memristor()

  # Set all the synapses to a high resistance state.
  input_synapse_11.set_resistance(hrs_set_voltage)
  input_synapse_12.set_resistance(hrs_set_voltage)
  input_synapse_21.set_resistance(hrs_set_voltage)
  input_synapse_22.set_resistance(hrs_set_voltage)
  hidden_synapse_1.set_resistance(hrs_set_voltage)
  hidden_synapse_2.set_resistance(hrs_set_voltage)

  # Fix the resistance values in the synapses by applying consecutive
  # positive bias voltage pulses.
  for _ in range(fixing_pulses):
    input_synapse_11.apply_voltage(positive_bias)
    input_synapse_12.apply_voltage(positive_bias)
    input_synapse_21.apply_voltage(positive_bias)
    input_synapse_22.apply_voltage(positive_bias)
    hidden_synapse_1.apply_voltage(positive_bias)
    hidden_synapse_2.apply_voltage(positive_bias)

  c_11 = []
  c_12 = []
  c_21 = []
  c_22 = []
  hidden_spikes_1 = []
  hidden_spikes_2 = []
  z_1 = []
  z_2 = []
  output_spikes = []
  for spikes_1, spikes_2 in zip(input_spikes_1, input_spikes_2):
    # The normalized current c_ij passes through the input_synapse_ij.
    # The current in the inhibitory synapses is inverted. The input voltage
    # is 1 V if the input neuron fired as many pulses as the encoding of TRUE
    # or more, else it is 0 V.
    c_11.append(utility.normalize(utility.burst(spikes_1, true) / input_synapse_11.read_resistance()))
    c_12.append(utility.invert(utility.normalize(utility.burst(spikes_1, true) / input_synapse_12.read_resistance())))
    c_21.append(utility.invert(utility.normalize(utility.burst(spikes_2, true) / input_synapse_21.read_resistance())))
    c_22.append(utility.normalize(utility.burst(spikes_2, true) / input_synapse_22.read_resistance()))

    hidden_spikes_1.append(hidden_neuron_1.apply_current(c_11[-1] + c_21[-1],
                                                         current_application_time)[0].count(spike))
    hidden_spikes_2.append(hidden_neuron_2.apply_current(c_12[-1] + c_22[-1],
                                                         current_application_time)[0].count(spike))

    # The normalized curent z_i passes through the hidden_synapse_i. The
    # input voltage is 1 V if the hidden neuron fired as many pulses as
    # the encoding of TRUE or more, else it is 0 V.
    z_1.append(utility.normalize(utility.burst(hidden_spikes_1[-1], true) / hidden_synapse_1.read_resistance()))
    z_2.append(utility.normalize(utility.burst(hidden_spikes_2[-1], true) / hidden_synapse_2.read_resistance()))

    output_spikes.append(output_neuron.apply_current(z_1[-1] + z_2[-1],
                                                     current_application_time)[0].count(spike))

    # The neurons are resting until the next input.
    hidden_neuron_1.rest()
    hidden_neuron_2.rest()
    output_neuron.rest()

  # Store the data into a data frame.
  data = pd.DataFrame()
  data["input_spikes_1"] = input_spikes_1
  data["input_spikes_2"] = input_spikes_2
  data["c_11"] = c_11
  data["c_12"] = c_12
  data["c_21"] = c_21
  data["c_22"] = c_22
  data["hidden_spikes_1"] = hidden_spikes_1
  data["hidden_spikes_2"] = hidden_spikes_2
  data["z_1"] = z_1
  data["z_2"] = z_2
  data["output_spikes"] = output_spikes
  data["target"] = targets
  data["current_application_time"] = [current_application_time] * 4

  print(data)
def solve_xor_complex_noisy_snn(learning_pulses, epochs, output_to_csv=False):
  hrs_set_voltage = -4
  current_application_time = 500
  spike = 1
  training_voltage = 1
  true = 20
  false = 0
  input_spikes_1 = [false, false, true, true]
  input_spikes_2 = [false, true, false, true]
  targets = [false, true, true, false]

  # Create a network  with 2 input neurons, 2 hidden layer neurons
  # and 1 output neuron. The input neurons are simulated by their spikes.
  hidden_neuron_1 = spiking_neuron.SpikingNeuron()
  hidden_neuron_2 = spiking_neuron.SpikingNeuron()
  output_neuron = spiking_neuron.SpikingNeuron()

  # Create 6 synapses with pos_input_synapse_ij representing the excitatory
  # connection between the input neuron i and the hidden neuron j, and
  # pos_hidden_synapse_k representing the excitatory connection between
  # the hidden neuron k and the output neuron.
  pos_input_synapse_11 = memristor.Memristor()
  pos_input_synapse_12 = memristor.Memristor()
  pos_input_synapse_21 = memristor.Memristor()
  pos_input_synapse_22 = memristor.Memristor()
  pos_hidden_synapse_1 = memristor.Memristor()
  pos_hidden_synapse_2 = memristor.Memristor()

  # Create 6 synapses with neg_input_synapse_ij representing the inhibitory
  # connection between the input neuron i and the hidden neuron j, and
  # neg_hidden_synapse_k representing the inhibitory connection between
  # the hidden neuron k and the output neuron.
  neg_input_synapse_11 = memristor.Memristor()
  neg_input_synapse_12 = memristor.Memristor()
  neg_input_synapse_21 = memristor.Memristor()
  neg_input_synapse_22 = memristor.Memristor()
  neg_hidden_synapse_1 = memristor.Memristor()
  neg_hidden_synapse_2 = memristor.Memristor()

  # Set all the synapses to a high resistance state.
  pos_input_synapse_11.set_resistance(hrs_set_voltage)
  pos_input_synapse_12.set_resistance(hrs_set_voltage)
  pos_input_synapse_21.set_resistance(hrs_set_voltage)
  pos_input_synapse_22.set_resistance(hrs_set_voltage)
  pos_hidden_synapse_1.set_resistance(hrs_set_voltage)
  pos_hidden_synapse_2.set_resistance(hrs_set_voltage)

  neg_input_synapse_11.set_resistance(hrs_set_voltage)
  neg_input_synapse_12.set_resistance(hrs_set_voltage)
  neg_input_synapse_21.set_resistance(hrs_set_voltage)
  neg_input_synapse_22.set_resistance(hrs_set_voltage)
  neg_hidden_synapse_1.set_resistance(hrs_set_voltage)
  neg_hidden_synapse_2.set_resistance(hrs_set_voltage)

  c_11 = []
  c_12 = []
  c_21 = []
  c_22 = []
  hidden_spikes_1 = []
  hidden_spikes_2 = []
  z_1 = []
  z_2 = []
  output_spikes = []
  errors = []
  squared_errors = []
  epoch_numbers = []
  for epoch_number in range(1, epochs + 1):
    for spikes_1, spikes_2, target in zip(input_spikes_1, input_spikes_2, targets):
      epoch_numbers.append(epoch_number)

      # The normalized noisy current c_ij passes through the input_synapse_ij. The input
      # voltage is 1 V if the input neuron fired as many pulses as the encoding of
      # TRUE or more, else it is 0 V.
      pos_c_11 = utility.add_noise(utility.normalize(utility.burst(spikes_1, true)
                                                     / pos_input_synapse_11.read_resistance()))
      pos_c_12 = utility.add_noise(utility.normalize(utility.burst(spikes_1, true)
                                                     / pos_input_synapse_12.read_resistance()))
      pos_c_21 = utility.add_noise(utility.normalize(utility.burst(spikes_2, true)
                                                     / pos_input_synapse_21.read_resistance()))
      pos_c_22 = utility.add_noise(utility.normalize(utility.burst(spikes_2, true)
                                                     / pos_input_synapse_22.read_resistance()))

      # The normalized noisy current neg_c_ij passes through the neg_input_synapse_ij.
      # The current is inverted to represent the inhibitory connection. The input
      # voltage is 1 V if the input neuron fired as many pulses as the encoding
      # of TRUE or more, else it is 0 V.
      neg_c_11 = utility.invert(utility.add_noise(utility.normalize(utility.burst(spikes_1, true)
                                                                    / neg_input_synapse_11.read_resistance())))
      neg_c_12 = utility.invert(utility.add_noise(utility.normalize(utility.burst(spikes_1, true)
                                                                    / neg_input_synapse_12.read_resistance())))
      neg_c_21 = utility.invert(utility.add_noise(utility.normalize(utility.burst(spikes_2, true)
                                                                    / neg_input_synapse_21.read_resistance())))
      neg_c_22 = utility.invert(utility.add_noise(utility.normalize(utility.burst(spikes_2, true)
                                                                    / neg_input_synapse_22.read_resistance())))

      c_11.append(pos_c_11 + neg_c_11)
      c_12.append(pos_c_12 + neg_c_12)
      c_21.append(pos_c_21 + neg_c_21)
      c_22.append(pos_c_22 + neg_c_22)

      hidden_spikes_1.append(hidden_neuron_1.apply_current(c_11[-1] + c_21[-1],
                                                           current_application_time)[0].count(spike))
      hidden_spikes_2.append(hidden_neuron_2.apply_current(c_12[-1] + c_22[-1],
                                                           current_application_time)[0].count(spike))

      # The normalized noisy curent pos_z_i passes through the pos_hidden_synapse_i.
      # The input voltage is 1 V if the hidden neuron fired as many pulses as
      # the encoding of TRUE or more, else it is 0 V.
      pos_z_1 = utility.add_noise(utility.normalize(utility.burst(hidden_spikes_1[-1], true)
                                                    / pos_hidden_synapse_1.read_resistance()))
      pos_z_2 = utility.add_noise(utility.normalize(utility.burst(hidden_spikes_2[-1], true)
                                                    / pos_hidden_synapse_2.read_resistance()))

      # The normalized noisy curent neg_z_i passes through the neg_hidden_synapse_i.
      # The current is inverted to represent the inhibitory connection. The
      # input voltage is 1 V if the hidden neuron fired as many pulses as the
      # encoding of TRUE or more, else it is 0 V.
      neg_z_1 = utility.invert(utility.add_noise(utility.normalize(utility.burst(hidden_spikes_1[-1], true)
                                                                   / neg_hidden_synapse_1.read_resistance())))
      neg_z_2 = utility.invert(utility.add_noise(utility.normalize(utility.burst(hidden_spikes_2[-1], true)
                                                                   / neg_hidden_synapse_2.read_resistance())))

      z_1.append(pos_z_1 + neg_z_1)
      z_2.append(pos_z_2 + neg_z_2)

      output_spikes.append(output_neuron.apply_current(z_1[-1] + z_2[-1],
                                                       current_application_time)[0].count(spike))

      errors.append(target - output_spikes[-1])
      squared_errors.append(errors[-1] ** 2)

      # Update the synapses based on the error.
      # If the output neuron should have fired more times and it did not,
      # then strengthen the excitatory hidden synapses of the hidden neurons
      # that fired as many pulses as the encoding of TRUE or more. Otherwise,
      # do the equivalent update for the input synapses of the input neurons
      # that fired as many pulses as the encoding of TRUE or more.
      if utility.is_false_negative(errors[-1]):
        if utility.burst(hidden_spikes_1[-1], true) or utility.burst(hidden_spikes_2[-1], true):
          if utility.burst(hidden_spikes_1[-1], true):
            for _ in range(learning_pulses):
              pos_hidden_synapse_1.apply_voltage(training_voltage)
          if utility.burst(hidden_spikes_2[-1], true):
            for _ in range(learning_pulses):
              pos_hidden_synapse_2.apply_voltage(training_voltage)
        else:
          if utility.burst(spikes_1, true):
            for _ in range(learning_pulses):
              pos_input_synapse_11.apply_voltage(training_voltage)
              pos_input_synapse_12.apply_voltage(training_voltage)
          if utility.burst(spikes_2, true):
            for _ in range(learning_pulses):
              pos_input_synapse_21.apply_voltage(training_voltage)
              pos_input_synapse_22.apply_voltage(training_voltage)
      # If the output neuron should not have fired and it did, then strengthen
      # the inhibitory hidden synapses of the hidden neurons that fired as many
      # pulses as the encoding of TRUE or more.
      elif utility.is_false_positive(errors[-1]):
        if utility.burst(hidden_spikes_1[-1], true):
          for _ in range(learning_pulses):
            neg_hidden_synapse_1.apply_voltage(training_voltage)
        if utility.burst(hidden_spikes_2[-1], true):
          for _ in range(learning_pulses):
            neg_hidden_synapse_2.apply_voltage(training_voltage)

      # The neurons are resting until the next input.
      hidden_neuron_1.rest()
      hidden_neuron_2.rest()
      output_neuron.rest()

  # Store the data into a data frame.
  data = pd.DataFrame()
  data["input_spikes_1"] = input_spikes_1 * epochs
  data["input_spikes_2"] = input_spikes_2 * epochs
  data["c_11"] = c_11
  data["c_12"] = c_12
  data["c_21"] = c_21
  data["c_22"] = c_22
  data["hidden_spikes_1"] = hidden_spikes_1
  data["hidden_spikes_2"] = hidden_spikes_2
  data["z_1"] = z_1
  data["z_2"] = z_2
  data["output_spikes"] = output_spikes
  data["target"] = targets * epochs
  data["error"] = errors
  data["squared_error"] = squared_errors
  data["epoch"] = epoch_numbers
  data["learning_pulses"] = [learning_pulses] * epochs * 4
  data["training_voltage"] = [training_voltage] * epochs * 4
  data["current_application_time"] = [current_application_time] * epochs * 4

  if output_to_csv:
    utility.save_data(data, "./output", "solve-xor-complex-noisy-snn")

  # Plot the MSE as a function of epoch.
  utility.plot_mse(data)
Exemple #29
0
# 构建N个带有Pi个图片的predictor的数组
i = 0
while i < N:
    j = 0
    path1 = sys.path[0] + "\\orl_faces\\s" + str(i + 1) + "\\"  # 第一层路径循环文件夹
    ims = []  # 初始化临时存储图片变量
    # 读取一组图片到list中
    while j < Pi:
        # 读取一张图片
        path = path1 + str(j + 1) + ".pgm"  # 第二层路径循环图片文件
        im = Image.open(path)
        im = im.resize(downsampling_size, sampling_type)  # 重采样
        # 将该图片插入到该类数组中
        ims.append(list(im.getdata()))
        # 标准化
        normalize(ims[j])
        j += 1
    # 将ims转换为predictor并插入到数组中
    p = np.matrix(ims[0])  # 先插入头一个图片
    j = 1  # 此时j已经有一个
    # 将剩下的插入临时predictor
    while j < Pi:
        p = np.vstack((p, ims[j]))
        j += 1
    # 将predictor转置之后插入list
    predictor.append(p.T)
    i = i + 1

# 创建一个N*Pj大小的存储隶属类别的数组
inner = []
result = []
Exemple #30
0
datafiles = ['dataspam']
k = [1,3,5,7,9]
metrics = [sim_cosine, sim_pearson, euclidean]
r_method = ['uniform', 'distance']
n = [True, False]
#percentage = [10, 20, 30]
#k = [5]
#percentage = [10]
prod = product(datafiles, k, metrics, r_method, n)
for filename, k, metric, r_m, n in prod:
    if filename == 'datahouse':
        X, y = getXy(filename)
    else:
        X, y = getXy(filename, delimiter=',')
    if n:
        normalize(X)
        print 'data is normalized'
    else:
        print 'data is not normalized'
    reg = KNeighborClassifier(X, y, k, metric, r_method=r_m)
    e = Evaluater(reg, test_percentage = 30)
    print '\t', e.evaluate()
    print '\n'




    


Exemple #31
0
    def __init__(self, df, model=Models.PROPHET,
                 upsample_freq=None,
                 train_test_split_ratio=Constants.TRAIN_TEST_SPLIT_RATIO.value,
                 epochs=Constants.EPOCHS.value,
                 initial_epoch=Constants.INITIAL_EPOCH.value,
                 batch_size=Constants.BATCH_SIZE.value,
                 sliding_window_size_or_time_steps=Constants.SLIDING_WINDOW_SIZE_OR_TIME_STEPS.value,
                 do_shuffle=True):
        logging.info("resample: {}. future_prediction: {}, epochs: {}, batch_size: {},"
                     " window_size: {}, eurons: {}"
                     .format(Constants.RESAMPLING_FREQ.value
                             , Constants.SHIFT_IN_TIME_STEP_TO_PREDICT.value
                             , epochs
                             , batch_size
                             , sliding_window_size_or_time_steps
                             , Constants.NEURONS.value
                             ))
        if logging.getLogger().isEnabledFor(logging.INFO):
            explore_data(df)
        # first step is to create a timestamp column as index to turn it to a TimeSeries data
        df.index = pd.to_datetime(df[ColumnNames.DATE.value] + df[ColumnNames.TIME.value],
                                  format='%Y-%m-%d%H:%M:%S', errors='raise')
        if 'Unnamed: 0' in df.columns:
            df.drop('Unnamed: 0', axis=1, inplace=True)

        # keep a copy of original dataset for future comparison
        self.df_original = df.copy()

        # we interpolate temperature using prophet to use it in a multivariate forecast
        temperature = ColumnNames.TEMPERATURE.value
        interpolated_df = facebook_prophet_filter(df, temperature,
                                                  Constants.FORECASTED_TEMPERATURE_FILE.value)
        interpolated_df.index = df.index
        df[[temperature]] = interpolated_df[[ColumnNames.FORECAST.value]]

        # lets also interpolate missing kwh using facebook prophet (or we could simply drop them)

        # now turn to kwh and make the format compatible with prophet
        power = ColumnNames.POWER.value
        interpolated_df = facebook_prophet_filter(df, power,
                                                  Constants.FORECASTED_POWER_FILE.value)
        interpolated_df.index = df.index
        df[[power]] = interpolated_df[[ColumnNames.FORECAST.value]]

        df = df.rename(columns={power: ColumnNames.LABEL.value})
        df.drop(columns=[ColumnNames.DATE.value,
                         ColumnNames.TIME.value,
                         ColumnNames.DAY_OF_WEEK.value,
                         ColumnNames.MONTH.value],
                inplace=True
                )
        if upsample_freq is not None:
            df = df.resample(upsample_freq).mean()

        # for any regression or forecasting it is better to work with normalized data
        self.transformer = QuantileTransformer()  # handle outliers better than MinMaxScalar
        features = ColumnNames.FEATURES.value
        normalized = normalize(df, features, transformer=self.transformer)

        # we use the last part (after 12/1/2013) that doesnt have temperature for testing
        cutoff_date = Constants.CUTOFF_DATE.value
        self.df = normalized[normalized.index < cutoff_date]
        self.testing = normalized[normalized.index >= cutoff_date]

        self.df[ColumnNames.DATE_STAMP.value] = self.df.index
        self.df_blocked = None
        self.train_test_split_ratio = train_test_split_ratio
        self.model_type = model
        self.train_X, self.test_X, self.train_test_split_index = self.train_test_split(self.df[features])
        self.train_y, self.test_y, _ = self.train_test_split(self.df[ColumnNames.LABELS.value])
        self.model_fit = None
        self.epochs = epochs
        self.initial_epoch = initial_epoch
        self.batch_size = batch_size
        self.history = None
        # following is defines in sliding_window
        self.do_shuffle = do_shuffle
        self.val_idx = None
        self.shuffled_X = None
        self.shuffled_y = None
        self.train = None
        self.label = None
        self.train_size = None
        self.val_size = None

        if logging.getLogger().isEnabledFor(logging.INFO):
            explore_data(self.df)
    def identifyKeyFrame(self, SIFTFeatures, indices, threshold = 0.15):
        if len(indices) in [1,2]:
            lst = []
            lst.append(indices[0])
            return lst

        # build up graph structure
        numberOfNodes = len(SIFTFeatures)
        graph = Graph(numberOfNodes)

        for i in range(numberOfNodes):
            for j in range(i+1, numberOfNodes, 1):

                one = SIFTFeatures[i]
                two = SIFTFeatures[j]

                # check whether one and two are near duplicate
                pcaFeatures1 = pca.project(one, self.V, 36)
                pcaFeatures2 = pca.project(two, self.V, 36)

                # normalize features
                util.normalize(pcaFeatures1)
                util.normalize(pcaFeatures2)

                np.savetxt("pcafeature1", pcaFeatures1, delimiter="\t")
                np.savetxt("pcafeature2", pcaFeatures2, delimiter="\t")

                # interface with Java program to do matching
                rowX = pcaFeatures1.shape[0]
                rowY = pcaFeatures2.shape[0]
                column = pcaFeatures1.shape[1]

                matchCommand = "java match " +str(rowX)+ " " +str(rowY)+ " " +str(column)
                print matchCommand
                os.system(matchCommand)

                # plot according to match stored in "data" folder
                matchFile = open("data/match", 'r')
                lines = matchFile.readlines()

                matchSize = len(lines)
                oneSize = one.shape[0]
                twoSize = two.shape[0]

                ratio = matchSize / float(min(oneSize, twoSize))

                if ratio > threshold:
                    graph.connect(i,j)

        # Find nodes with largest edges in each connected component
        connectedComponents = graph.connectedComponent()

        tempkeyFrames = []
        for component in connectedComponents:
            edges = []

            for node in component:
                edges.append(graph.getNumOfEdges(node))

            maxIndice = 0
            maxValue = edges[0]
            for i in range(1, len(edges), 1):
                if maxValue < edges[i]:
                    maxValue = edges[i]
                    maxIndice = i

            # random choose one if there are many within one component
            maxEdges = []
            for i in range(len(edges)):
                if edges[i] == maxValue:
                    maxEdges.append(i)

            if len(maxEdges) == 1:
                tempkeyFrames.append(component[maxIndice])
            else:
                maxEdgeSize = len(maxEdges)
                randomNumber = randint(0, maxEdgeSize - 1)
                tempkeyFrames.append(component[randomNumber])

        keyFrames = []
        for indice in tempkeyFrames:
            keyFrames.append(indices[indice])

        return keyFrames
Exemple #33
0
def encrypt(plaintext, key):
    return normalize(plaintext).translate(maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ', normalize(key)))
Exemple #34
0
def decrypt(cipher, key):
    return normalize(cipher).translate(
        maketrans(normalize(key), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'))
Exemple #35
0
def encrypt(plaintext, key):
    return normalize(plaintext).translate(
        maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ', normalize(key)))
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]

mt1 = np.array([a, b, c], dtype=int)  # 这个完美解决ndarray之中存储的是list的问题,但是要求每行长度相等
print(mt1)
mt1.transpose()
print("转置后", mt1)

mt2 = np.array([[1.0, 2.0], [3.0, 4.0]]).transpose()
print(mt2)
mt2.transpose()
print(mt2.transpose())  # 该矩阵的转置矩阵,这里的transpose()并不是modification函数,所以不会修改原始数据

mt3 = np.vstack((a, b))
print(mt3)
mt3 = np.vstack((mt3, c))
print(mt3)

mt0 = np.array(a)
print(mt0)

matrix1 = np.matrix([1, 2, 3])
matrix2 = np.matrix([3, 2, 3])
m = matrix1 - matrix2
print(np.sqrt(np.sum(np.square(m))))

list1 = [[1, 2, 3], [4, 5, 6]]
list1[1][2] = 0
utility.normalize(list1[0])
print(list1)
    locations1, features1 = sift.siftFeature(img1)
    locations2, features2 = sift.siftFeature(img2)

    # use PCAac to reduce dimensions
    numberOfFeaturesOne = locations1.shape[0]
    numberOfFeaturesTwo = locations2.shape[0]

    features = np.vstack((features1, features2))
    V, S, mean = pca.pca(features)

    pcaFeatures1 = pca.project(features1, V, 36)
    pcaFeatures2 = pca.project(features2, V, 36)

    # normalize features
    util.normalize(pcaFeatures1)
    util.normalize(pcaFeatures2)

    np.savetxt("pcafeature1", pcaFeatures1, delimiter="\t")
    np.savetxt("pcafeature2", pcaFeatures2, delimiter="\t")

    # interface with Java program to do matching
    rowX = pcaFeatures1.shape[0]
    rowY = pcaFeatures2.shape[0]
    column = pcaFeatures1.shape[1]

    matchCommand = "java match " + str(rowX) + " " + str(rowY) + " " + str(
        column)
    print matchCommand
    os.system(matchCommand)
Exemple #38
0
def decrypt(cipher, key):
    return normalize(cipher).translate(maketrans(normalize(key),'ABCDEFGHIJKLMNOPQRSTUVWXYZ'))
Exemple #39
0
    def DataGenBB(self, train_start, train_end, DataStrs=None):
        generateFunc = ["original"]
        # generateFunc = ["original", "resize", "rotate", "mirror", "translate", "brightnessAndContrast" ]

        InputData = np.zeros(
            [self.batch_size * len(generateFunc), self.imSize, self.imSize, 3],
            dtype=np.float32)
        # InputLabel = np.zeros([self.batch_size * len(generateFunc), 7], dtype = np.float32)
        InputLabel = np.zeros([self.batch_size * len(generateFunc), 3],
                              dtype=np.float32)
        InputNames = []
        # InputNames = np.zeros([self.batch_size * len(generateFunc), 1], dtype = np.float32)
        count = 0
        print "train_start,train_end: ", train_start, train_end

        # for i in range(train_start,train_end):
        i = train_start
        while (i < train_end):
            if self.ifMenpo39DataSet:
                imgName = self.imgs[i]
                imgNameHeader = imgName.split('.')[0]
                index = imgNameHeader[imgNameHeader.find('e') + 1:]
                labelsPTS = np.loadtxt(self.PTSDir + 'pts' + index + ".txt")
                img = cv2.imread(self.ImgDir + imgName)
            elif self.ifpreProcessedSemifrontal:
                imgName = self.imgs[i]

                # print "imgName: ", imgName
                imgNameHeader = imgName.split('.')[0]
                print "self.ImgDir + imgName: ", self.ImgDir + imgName
                img = cv2.imread(self.ImgDir + imgName)
                print "self.labelDir + imgNameHeader + .txt: ", self.labelDir + imgNameHeader + ".txt"
                label = np.loadtxt(self.labelDir + imgNameHeader + ".txt")
            else:
                strLine = DataStrs[i]
                strCells = strLine.rstrip(' \n').split(' ')
                imgName = strCells[0]

                label = np.array(strCells[1:]).astype(np.float)
                labelsPTS = labels[:136].reshape([68, 2])
                img = cv2.imread(imgName)

            if self.debug:
                cv2.imshow("original", img)
                cv2.waitKey(0)

            if img != None:
                #     print "img.shape: ", img.shape

                img = cv2.resize(img, (self.imSize, self.imSize))

                #     (w, h, _) = img.shape

                if self.ifMenpo39DataSet:
                    # x, y = self.unpackLandmarks(labelsPTS)
                    x, y = None, None
                elif self.ifpreProcessedSemifrontal:
                    # x, y = None, None
                    # labels = [None, None, None]
                    newImg = img
            #     else:
            #         x, y = ut.unpackLandmarks(labelsPTS, self.imSize)

            # for index in range(len(generateFunc)):
            #     method = generateFunc[index]
            #     # tag = random.choice(generateFunc)
            #     if method == "resize":
            #         newImg, newX, newY = ut.resize(img, x, y, xMaxBound = w, yMaxBound = h, random = True)
            #     elif method == "rotate":
            #         newImg, newX, newY = ut.rotate(img, x, y, w = w, h = h)
            #     elif method == "mirror":
            #         newImg, newX, newY = ut.mirror(img, x, y, w = w, h = h)
            #     elif method == "translate":
            #         newImg, newX, newY = ut.translate(img, x, y, w = w, h = h)
            #     elif method == "brightnessAndContrast":
            #         newImg, newX, newY = ut.contrastBrightess(img, x, y)
            #     elif method == "original":
            #         newImg, newX, newY = img, x, y
            #     else:
            #         raise "not existing function"
                if self.ifMenpo39DataSet:
                    label = labelsPTS
                elif self.ifpreProcessedSemifrontal:
                    pass
                else:
                    newXMin = min(newX)
                    newYMin = min(newY)
                    newXMax = max(newX)
                    newYMax = max(newY)
                    newXMean = (newXMax + newXMin) / 2.0
                    newYMean = (newYMax + newYMin) / 2.0
                    newEdge = max(newYMax - newYMin, newXMax - newXMin)

                    normX = ut.normalize(newX, self.imSize)
                    normY = ut.normalize(newY, self.imSize)
                    normXMean, normYMean, normEdge = ut.normalize(
                        newXMean, self.imSize), ut.normalize(
                            newYMean,
                            self.imSize), ut.normalize(newEdge, self.imSize)
                    label = np.array([normXMean, normYMean, normEdge])

                InputData[count, ...] = newImg
                InputLabel[count, ...] = label
                InputNames.append(imgName)
                # InputNames[count,...] = imgName

                # print "InputNames.shape: ", len(InputNames)

            else:
                print "cannot : ", imgName

            print "count: ", count
            count += 1
            i += 1

        return InputData, InputLabel, np.asarray(InputNames)