コード例 #1
0
 def setWH(self, dt):
     x, y = director.window.width, director.window.height
     nmin = sc.scale(int((self.x) - (self.bgImage.width / 2)),
                     int((self.y) - (self.bgImage.height / 2)))
     nmax = sc.scale(int((self.x) + (self.bgImage.width / 2)),
                     int((self.y) + (self.bgImage.height / 2)))
     self.width_range = [int(nmin[0]), int(nmax[0])]
     self.height_range = [int(nmin[1]), int(nmax[1])]
コード例 #2
0
 def setWH(self, dt):
     x, y = director.window.width, director.window.height
     nmin = sc.scale(int(self.parent.x + (self.x - (self.width / 2))),
                     int(self.parent.y + (self.y - (self.height / 2))))
     nmax = sc.scale(int(self.parent.x + (self.x + (self.width / 2))),
                     int(self.parent.y + (self.y + (self.height / 2))))
     self.width_range = [int(nmin[0]), int(nmax[0])]
     self.height_range = [int(nmin[1]), int(nmax[1])]
コード例 #3
0
 def setWH(self, dt):
     x, y = director.window.width, director.window.height
     nmin = sc.scale(int((self.px + self.x) - (self.bgImage.width / 2)),
                     int((self.py + self.y) - (self.bgImage.height / 2)))
     nmax = sc.scale(int((self.px + self.x) + (self.bgImage.width / 2)),
                     int((self.py + self.y) + (self.bgImage.height / 2)))
     if not self.scrollManager == None:
         nmin = [
             nmin[0], (nmin[1] + self.scrollManager.get_children()[0].y)
         ]
         nmax = [
             nmax[0], (nmax[1] + self.scrollManager.get_children()[0].y)
         ]
     self.width_range = [int(nmin[0]), int(nmax[0])]
     self.height_range = [int(nmin[1]), int(nmax[1])]
コード例 #4
0
def normalize(allTriPolygones):
    # 平移变换, 将模型重心作为空间原点
    mp, transed_tripoints = translation.translate(allTriPolygones)
    '''
    # 旋转变换
    rotated_points = rotation.rotate(transed_points)
    '''
    # 缩放变换, 返回最终预处理好的模型
    # final_points是一个二维的nx3数组,每一行代表三角形的一个顶点
    final_tripoints = scaling.scale(mp, transed_tripoints)

    return final_tripoints
コード例 #5
0
ファイル: gradient_descent.py プロジェクト: tadasv/ml
def main():
    inputs = [ [1, 1, 4],
               [1, 2, 5],
               [1, 3, 6],
             ]
    output = [ 3,
               2,
               1,
             ]

    print("## NON SCALED ##")
    test(inputs, output)
    print("## SCALED ##")
    from scaling import scale
    scaled = scale(inputs)
    test(scaled, output)
コード例 #6
0
ファイル: gradient_descent.py プロジェクト: tadasv/ml
def main():
    inputs = [
        [1, 1, 4],
        [1, 2, 5],
        [1, 3, 6],
    ]
    output = [
        3,
        2,
        1,
    ]

    print("## NON SCALED ##")
    test(inputs, output)
    print("## SCALED ##")
    from scaling import scale
    scaled = scale(inputs)
    test(scaled, output)
コード例 #7
0
def normalize(motion, translate='', rotate='', scale='', clean=True):
    """!
    Apply normalization to a motion.

    The input motion is not modified.

    @param motion numpy.array: The motion to normalize
    @param translate: The normalization for translating the motions
    @param rotate: The normalization for rotating the motions
    @param scale: The normalization for scaling the motions
    @param cleaning: Remove duplicate points and large jumps, default true
    @return: The normalized motion and the normalization parameters
    """
    out = motion
    translationRef = translation.translate(out[:, 1:4], translate)
    rotationRef = rotation.rotate(out[:, 1:8], rotate)
    scalingRef = scaling.scale(out[:, 1:4], scale)
    out, removedPoints = cleaning.clean(motion)
    return out, translationRef, rotationRef, scalingRef
コード例 #8
0
ファイル: main.py プロジェクト: daix6/DIP
def test_scale(filename, dist):
  im = Image.open(filename)

  # different cases from given problems
  expections = []
  expections.append([(192, 128), (96, 64), (48, 32), (24, 16), (12, 8)]) # 1.
  expections.append([(300, 200)]) # 2.
  expections.append([(450, 300)]) # 3.
  expections.append([(500, 200)]) # 4.

  for case in expections:
    print "Scaling case %d" % expections.index(case)

    for size in case:
      out = scale(im, size)

      out_name = "scaling-%dx%d.png" % out.size
      out_path = os.path.join(dist, out_name)
      out.save(out_path)
      print "    Picture %s has been saved to the assets folder !" % out_name
コード例 #9
0
def normalize(allPolygones, name):
    # 平移变换, 将模型重心作为空间原点
    mp, transed_points = translation.translate(allPolygones)
    # 旋转变换
    rotated_points = rotation.rotate(transed_points)
    # 缩放变换, 返回最终预处理好的模型
    # final_points是一个二维的nx3数组,每一行代表三角形的一个顶点
    final_points = scaling.scale(mp, rotated_points)
    # 原本final_points是list类型,转换为<type 'numpy.ndarray'>
    final_points = np.array(final_points)


    #extractEigvect(final_points, name)
    #eigvectflat(final_points, name)


    # 测试生成二维投影直方图
    # histogram.getHist(final_points)
    allTris = []
    for i in range(len(final_points)/3):
        allTris.append(final_points[i*3:i*3+3])
    #allTris = np.array(allTris)
    # print 'type(allTris): ', type(allTris)
    histogram.getTriHist(allTris, final_points, name, 200, 200)
コード例 #10
0
tf.compat.v1.keras.backend.set_session(session)

parser = argparse.ArgumentParser()
parser.add_argument('-p', '--process', action='store_true')
options = parser.parse_args()

lighthouse_image_dir = "l"
other_image_dir = "o"

# Scale the data if flag exists
if options.process:
    shutil.rmtree("./" + lighthouse_image_dir)
    shutil.rmtree("./" + other_image_dir)
    os.mkdir(lighthouse_image_dir)
    os.mkdir(other_image_dir)
    scaling.scale("lighthouses", lighthouse_image_dir, (64, 64))
    scaling.scale("365", other_image_dir, (64, 64))

# Read images in as grayscale images and store as numpy arrays
# Label the classes as 1 for lighthouse, 0 for other
images = []
y = []
for file in os.listdir(lighthouse_image_dir):
    image = cv.imread(lighthouse_image_dir + "/" + file, 0)
    images.append(image)
    y.append(1)

for file in os.listdir(other_image_dir):
    images.append(cv.imread(other_image_dir + "/" + file, cv.IMREAD_GRAYSCALE))
    y.append(0)
コード例 #11
0
                lis1.append(i.text.strip())
        results['directions'] = lis1
        # Fill out this list comprehension and get each element's text
        #

        pattern = re.compile(r'(window.lazyModal\(\')(.*)\'\);')
        nutr_link = page_graph.find("script", text=pattern)

        nutr_link_url = pattern.search(nutr_link.text).group(2)
        page_html_nutr = requests.get(nutr_link_url)
        page_graph_nutr = BeautifulSoup(page_html_nutr.content)
        # print(page_graph_nutr)
        for i in page_graph_nutr.find_all('span', {'class', "nutrient-name"}):
            # print("Nutrients")
            # print(i.text)
            lis2.append(i.text)
        results['nutrients'] = lis2
        print("fin", type(results))
        print(results)
        return results


rf = RecipeFetcher()
# meat_lasagna = rf.search_recipes('meat lasagna')[0]
print("Please input URL")
url = input()
# print(type(meat_lasagna))
#
results = rf.scrape_recipe(url)
res = scaling.scale(results, 2, "down")
コード例 #12
0
import prePlot
import plot
import readTRC
import RRA
import CMC
import directories
import JR

allDir = list(directories.main(directories))
paramsDir = allDir[1]
idResultsDir = allDir[7]
soResultsDir = allDir[8]
cmcResultsDir = allDir[10]
jrResultsDir = allDir[11]

scaling.scale()
# IK.inverseKinematics()
# ID.inverseDynamics()

# Static Optimization
"""
input parameters 
time - t0 & t1
"""
# t0 = 0
# t1 = 2
# SO.run(t0,t1)

# # readTRC.plot()
# RRA.reduceResidualActuators()
"""