def main(): # create a connection to the display with size 8 with matrixdisplay.MatrixDispaly(DDIM) as display: im = pil.Image.open('img/mario.jpg') display.draw_frame(to_pix(im)) time.sleep(5) while True: game = gameoflife.GameOfLife(display, (179, 240, 0), 100) display.clear_screen() game.play() fr = frame.Frame(DDIM) while True: for i in range(0, 16): draw_line(fr, (i, 0), (15 - i, 15), (250, 0, 10)) draw_line(fr, (0, i), (15, 15 - i), (0, 0, 255)) display.draw_frame(fr) #time.sleep(0.1) fr = frame.Frame(DDIM) #time.sleep(0.1) t = 0.09 while True: display.fill_screen((0, 255, 0)) time.sleep(t) display.fill_screen((0, 255, 255)) time.sleep(t) display.fill_screen((255, 255, 0)) time.sleep(t) display.fill_screen((255, 255, 255)) time.sleep(t)
def generate_frames(name_of_video, source_of_frames, params): frames = [] for counter in range(1, params["frames"]+1): print("Frame",counter, "/",params["frames"]) left = [] right = [] top = [] bottom = [] confidence = [] image = pyplot.imread(name_of_video + "_frames/" + name_of_video + "{0:0=3d}.jpg".format(counter)) detector = MTCNN() faces = detector.detect_faces(image) for face in faces: l, t, w, h = face['box'] r = l+w b = t+h left.append(l) right.append(r) top.append(t) bottom.append(b) confidence.append(face['confidence']) new_frame = frame.Frame(left, right, top, bottom, confidence, counter) frames.append(new_frame) return frames
def generate_frames(name_of_video, source_of_frames, params): frames = [] for counter in range(1, params["frames"]+1): with open(name_of_video+"_face_bounds_"+source_of_frames+"/"+name_of_video+"{0:0=3d}.txt".format(counter)) as file: left = [] right = [] top = [] bottom = [] confidence = [] line = file.readline() while line: box = parse_line(line) if int(box[0])<0: box[0] = 0 if int(box[1])<0: box[1] = 0 left.append(int(box[0])) right.append(int(box[2])) top.append(int(box[1])) bottom.append(int(box[3])) confidence.append(float(box[4])) line = file.readline() new_frame = frame.Frame(left, right, top, bottom, confidence, counter) frames.append(new_frame) return frames
def run(self): f = frame.Frame() img2 = numpy.zeros((600,600,3),numpy.uint8) for i in range(0, 200): for j in range(0,600): img2[i][j] = (255,0,0) for i in range(200, 400): for j in range(0,600): img2[i][j] = (0,255,0) for i in range(400, 600): for j in range(0,600): img2[i][j] = (0,0,255) index = 0; while(True): cv2.imshow('parser',img2) f.from_array(index, img2) index += 1 index = index % 600 for j in range(0,600): img2[(000 + index)%600][j] = (255,0,0) img2[(200 + index)%600][j] = (0,255,0) img2[(400 + index)%600][j] = (0,0,255) self.sender.send_frame(f) if cv2.waitKey(1) & 0xFF == ord('q'): break
def create_scene_on_click(b): global current_frame scene_buttons.get_button(1).on_release = create_scene_on_click scene = create_scene() if scene is None: # to exit program from loading screen return current_frame = frame.Frame([create_still_surface(scene), scene_buttons], [scene_buttons.button_list])
def computeMotionTechnicalFrame(self,aqui,segName,dictRef,method,options=None): segPicked=self.getSegment(segName) segPicked.getReferential("TF").motion =[] if method == enums.motionMethod.Sodervisk : tms= segPicked.m_tracking_markers for i in range(0,aqui.GetPointFrameNumber()): visibleMarkers = btkTools.getVisibleMarkersAtFrame(aqui,tms,i) # constructuion of the input of sodervisk arrayStatic = np.zeros((len(visibleMarkers),3)) arrayDynamic = np.zeros((len(visibleMarkers),3)) j=0 for vm in visibleMarkers: arrayStatic[j,:] = segPicked.getReferential("TF").static.getNode_byLabel(vm).m_global arrayDynamic[j,:] = aqui.GetPoint(vm).GetValues()[i,:] j+=1 Ropt, Lopt, RMSE, Am, Bm=motion.segmentalLeastSquare(arrayStatic,arrayDynamic) R=np.dot(Ropt,segPicked.getReferential("TF").static.getRotation()) tOri=np.dot(Ropt,segPicked.getReferential("TF").static.getTranslation())+Lopt cframe=frame.Frame() cframe.setRotation(R) cframe.setTranslation(tOri) cframe.m_axisX=R[:,0] cframe.m_axisY=R[:,1] cframe.m_axisZ=R[:,2] segPicked.getReferential("TF").addMotionFrame(copy.deepcopy(cframe) ) else: raise Exception("[pyCGM2] : motion method doesn t exist")
def to_framelist(file_path, maxseq): """Pack Frames into dictionary and transform to bytes. Usage: import file2frame as f2f f2f.flie2frame.to_framelist(None, path, 10) Args: file_path (string): The file path maxseq (bytes): The maximum sequence number of a Frame Returns: frame_list: The list of Frame OBJECT in bytes divided from file """ if not os.path.isfile(file_path): raise TypeError(file_path + " does not exist") lenlimit = 2000 frame_list = [] seq = 0 data = '' with open(file_path) as f: for line in f: data += line if len(data) < lenlimit: continue frame_obj = frame.Frame(None, seq, None, data) frame_list.append(frame_obj.to_bytes()) data = '' seq = (seq + 1) % maxseq return frame_list
def to_pix(image): img = image.resize((DDIM, DDIM)).convert('RGB') # AA maybe bad pixels = frame.Frame(DDIM) # 8 bit for 0-255 values of RGB for i in range(0, DDIM): for j in range(0, DDIM): #pixels[j][i] = img.getpixel((i, j)) # for some reason correctly rotated pixels.set_pixel(j, i, img.getpixel((i, j))) return pixels
def getFrame(self): """Return the frame this proc is running. @rtype: Frame @return: The fame this proc is running.""" response = self.stub.GetFrame( host_pb2.ProcGetFrameRequest(proc=self.data), timeout=Cuebot.Timeout) return frame.Frame(response.frame)
def run(self, frame_rate=30): self.connection = connection.Connection( sock=self.server_socket, callback=self.handle_new_connection) self.connection.start() self.frame = frame.Frame(frame_rate=frame_rate, callback=self.callback_frame) self.frame.start()
def testIsStrike(testName, frameNumber, rolls, expectedStrike): aFrame = frame.Frame(frameNumber) for roll in rolls: aFrame.addRoll(roll) if aFrame.isStrike() == expectedStrike: print('PASS: '******'FAIL: ' + testName + ' expected ' + str(expectedStrike) + ' but got ' + str(aFrame.isStrike()))
def createframe(self): self.temporary_frame = frame.Frame() if verbose: print("GF:",self.global_frame.frame_variables) if self.temporary_frame: print("TF", self.temporary_frame.frame_variables) if self.frame_stack.frame_stack: print("LF-top:", self.frame_stack.top().frame_variables) return 0
def create_fractal_screen(): floor = 600 tree = create_tree(550, floor, 600) tree2 = create_bush(800, floor, 600) mountain = create_mountain(250, floor, random.randrange(200, 500)) bg = Surface_Drawable((WIN_WIDTH, WIN_HEIGHT)) bg.fill(c.SKY) pygame.draw.rect(bg, c.DARK_GREEN, (0, floor, WIN_WIDTH, WIN_HEIGHT - floor)) return frame.Frame([bg, mountain, tree, tree2]) # frame for showing a few trees
def testCorrectNumberOfRolls(testName, frameNumber, rolls, expectedNumberOfRolls): aFrame = frame.Frame(frameNumber) for roll in rolls: allowed = aFrame.addRoll(roll) if len(aFrame.rolls) == expectedNumberOfRolls: print('PASS: '******'FAIL: ' + testName + ' expected ' + str(expectedNumberOfRolls) + ' but got ' + str(len(aFrame.rolls)))
def recv_frame(self): conn, addr = self.control_socket.accept() inputs = [conn, self.data_socket] with conn: print('Connected by', addr) data_control = conn.recv(cns.control_buffer_size) frame_id, x, y, z = frame.control_packer.unpack(data_control) estimated_packets = int(round(x * y * z / cns.chunk_size)) - 1 h = {} conn.send('Ok'.encode()) loop_val = True while loop_val: readable, writable, exceptional = select.select( inputs, [], inputs) for s in readable: if s is self.data_socket: data_in = self.data_socket.recv(cns.chunk_size + 100) data = cns.picture_chunk_packer.unpack(data_in) #print(data[0]) h[data[0]] = data[1:] #if(data[0]==estimated_packets): # print("out loop") # loop_val = 0 # break if s is conn: data = conn.recv(cns.control_buffer_size) if data: print("out loop") loop_val = False break serial_data = [] for d in range(0, estimated_packets + 1): if d not in h: print("miss chunk {}".format(d)) h[d] = bytearray() for sd in range(0, cns.chunk_size): h[d].append(0) if d == estimated_packets: h[d] = h[d][0:(x * y * z) % cns.chunk_size] serial_data += h[d] f = frame.Frame() f.from_bytes(data_control, bytes(serial_data)) print(f.to_string()) return f
def getFrames(self, **options): """Returns the list of up to 1000 frames from within the job. frames = job.getFrames(show=["edu","beo"],user="******") frames = job.getFrames(show="edu",shot="bs.012") Allowed: offset, limit, states+, layers+. frameset, changedate @rtype: list<Frame> @return: List of frames""" criteria = FrameSearch.criteriaFromOptions(**options) response = self.stub.GetFrames(job_pb2.JobGetFramesRequest( job=self.data, req=criteria), timeout=Cuebot.Timeout) frameSeq = response.frames return [frame.Frame(frm) for frm in frameSeq.frames]
def vypis_ramcov_hex(packets, p_name_by_val, p_val_by_name): src_ip_addresses = [] frame_objects = [] i = 0 for p in packets: i += 1 print(f'rámec {i}') dlzka_ramca = p.packet_len dlzka_po_mediu = (dlzka_ramca + 4) if (dlzka_ramca >= 60) else 64 l2_protocol = get_l2_protocol_from_packet(p) src_mac, dst_mac = get_mac_addresses(p) l3_protocol = dst_ip = src_ip = l4_protocol = None print(f'dĺžka rámca poskytnutá pcap API - {dlzka_ramca} B') print(f'dĺžka rámca prenášaná po médiu - {dlzka_po_mediu} B') print(l2_protocol) print(f'Zdrojová MAC adresa:{group_by_two(src_mac)}') print(f'Cieľová MAC adresa:{group_by_two(dst_mac)}') if l2_protocol == 'Ethernet II': l3_protocol = get_l3_protocol_from_packet(p, p_name_by_val) if l3_protocol is not None: print(l3_protocol) if l3_protocol == 'IPv4': src_ip, dst_ip = get_ip_addresses(p) add_src_ip_to_list(src_ip, src_ip_addresses) l4_protocol = get_l4_protocol_from_ip_packet( p, p_name_by_val) print(f'zdrojová IP adresa: {dec_ip_from_bytes(src_ip)}') print(f'cieľová IP adresa: {dec_ip_from_bytes(dst_ip)}') print(l4_protocol) frame = f.Frame(i, dlzka_ramca, l2_protocol, dst_mac, src_mac, l3_protocol, dst_ip, src_ip, l4_protocol, p.packet) frame_objects.append(frame) elif l2_protocol == 'LLC': l3_protocol = get_llc_l3_protocol_from_packet(p, p_name_by_val) if l3_protocol is not None: print(l3_protocol) if l3_protocol == 'SNAP': l4_protocol = get_l4_protocol_from_snap_packet( p, p_name_by_val) if l4_protocol is not None: print(l4_protocol) else: print('Unknown Protocol') print_packet_bytes(p) print('\n') ip_statistics(src_ip_addresses) return frame_objects
def prepareLevel(self): x = 352 y = -55 self.boss = chara.makeBoss(x, y) self.boss_has_entered = False prepOrder = [self.prepareMissiles, prepareCircle, prepareCircle, prepareCircle] for i in range(self.bullet_rows): if prepOrder[i] is not prepareCircle: prepOrder[i](i) else: prepareCircle(self.bullets_perRow[i], (self.boss.rect.x + 48, self.boss.rect.y + 27), 0, [self.bulletLists[i], self.boss.children, self.allSprites]) addToGroups(self.boss, [self.enemyLists[0], self.allSprites]) self.frame = frame.Frame(40) self.act = 0
def init_background(self): self.background = gfx.Surface((800, 600)) frame = frm.Frame(self.background, 0, 0) frame.X_OFFSET, frame.Y_OFFSET = 20, 300 for tile in self.model.tiles: if not isinstance(tile, tiles.Enterance): x = tile.pos.x * 32 + tile.pos.y * 32 + frame.X_OFFSET y = -tile.pos.x * 16 + tile.pos.y * 16 + frame.Y_OFFSET pos = geo.Vec2D(x, y) tile_surf = resman.get("game.tile%d_surf" % tile.type) tile_surf.nr = random.randint(0, 4) tile_surf.draw(frame.surface, pos)
def computeMotionAnatomicalFrame(self,aqui,segName,dictAnatomic,options=None): segPicked=self.getSegment(segName) segPicked.anatomicalFrame.motion=[] ndO = str(dictAnatomic[segName]['labels'][3]) ptO = segPicked.getReferential("TF").getNodeTrajectory(ndO) csFrame=frame.Frame() for i in range(0,aqui.GetPointFrameNumber()): R = np.dot(segPicked.getReferential("TF").motion[i].getRotation(), segPicked.getReferential("TF").relativeMatrixAnatomic) csFrame.update(R,ptO) segPicked.anatomicalFrame.addMotionFrame(copy.deepcopy(csFrame))
def run(self): while True: f = self.receiver.recv_frame() #do stuff out = [] edges = cv2.Canny(f.get_data(),100,200) out.append(edges) out.append(edges) out.append(edges) pict = cv2.merge(out) output = frame.Frame() output.from_array(f.get_index(), pict) self.sender.send_frame(output)
def get_frame(self): ''' Returns frames from the live video stream, encoded as jpeg bytes jpeg bytes is the format used by the Flask server to display the live stream ''' global previous_frame global step_images # Read the live feed frame success, image = self.live_stream.read() # Sample frames according to the sampling rate if self.frame_count % (int)(self.video_fps / self.sampling_rate) == 0: # Create a Frame Object # def manipulate_frame(): frame_object = Frame.Frame(image) # Preprocess the frame for prediction frame_object.preprocess(112) # Generate the optical flow for the image frame_object.generate_optical_flow(previous_frame) # Predict the frame object's class here frame_object.predict_frame() self.frame_buffer.add_to_buffer(frame_object) # frame_thread = threading.Thread(target=manipulate_frame) # # t.daemon = True # frame_thread.start() ret, jpeg = cv2.imencode('.jpg', image) # Store the current Image as the previous image previous_frame = image # Increment frame_count self.frame_count += 1 if( self.frame_count % 60 == 0 and self.frame_count > 90 ): step_completed = self.frame_buffer.get_step_predicted() return [jpeg.tobytes(), step_completed] # self.action_steps.add_step(step_completed) return [jpeg.tobytes()]
def __init__(self, description): super(MemoryPull, self).__init__(description) self._relativeName = description.relative_name self._frame = frame.Frame(description.frame) self._object = None with frame.Selector(self.frame.frame) as fs: sym = self.description.symbol if sym is not None: typ = sym.type if typ.code in { gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_ARRAY, gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_INT, gdb.TYPE_CODE_FUNC, }: try: self._object = gdb.parse_and_eval(self.name) except TypeError: try: self._object = sym.value(fs.frame.frame) self._value = str(self.object) except TypeError: print("DEBUG: TypeError detected!") else: try: self._object = gdb.parse_and_eval(self.name) except gdb.error as e: print("DEBUG:") traceback.print_exc() # pass if self.description.symbol and self.description.symbol.type: self._type_name = str(self.description.symbol.type) elif isinstance(self.object, gdb.Value): self._type_name = Pull.get_true_type_name(self.object.type) else: # TODO: This is for dev. Remove in production code. self._type_name = "void" # raise Exception("Untyped memory", self) if self.index is None: if self.object is None: self._index = "?" else: self._index = str(self.object.address)
def __init__(self, height, width, mine_amount): self.height = height self.width = width self.mine_amount = mine_amount self.lost = False self.won = False self.frame_array = [] for y in range(height): # On créé le tableau contenant les objets cases L = [] for x in range(width): L.append(frame.Frame()) self.frame_array.append(L) for y in range(height): # On affecte aux cases leurs voisins for x in range(width): for adjacent in around_frame: if y + adjacent[0] >= 0 and x + adjacent[1] >= 0: # Pour éviter d'avoir une grille en forme de tor ( pour éviter d'avoir les bords collés entre eux) try: self.frame_array[y][x]._neighbours.append(self.frame_array[y + adjacent[0]][x + adjacent[1]]) except IndexError: pass
def _fetch(cls, description): if description is None: raise Exception("Description required to fetch object!") execution = description.execution frameDescription = descriptions.MemoryDescription("myframe", address=str(gdb.selected_frame())) # TODO: replace selected_frame call with something more flexible frm = frame.Frame(gdb.selected_frame()) address = description.address memories = cls.objects( execution=execution, frame=str(frm), address=address ) if len(memories) > 1: raise Memory.DuplicateAddress("Duplicate address for memory!") elif len(memories) == 0: raise Memory.NewObject("New object found") return memories[0]
def __init__(self, thread_id, name): self.thread_id = thread_id self.name = name self.pwm = Device(0x40) # setup PCA9685 servo driver device self.pwm.set_pwm_frequency(60) # setup PCA9685 servo driver frequency self.steering_angle = 90 # set initial angle of servo for steering self.motor_angle = 133 # set initial angle of servo for motor # numpy data setup self.npy_file = 'datasets/dataset.npy' # numpy file for storing training data self.left_val = 0 # [0,*,*] self.forward_val = 0 # [*,0,*] self.right_val = 0 # [*,*,0] self.training_data = [ ] # array for controller input data [left_val, motor_val, right_val] self.printed_length = False # used to print length of training data self.stream = frame.Frame(1, 'SaveFrame') # setup camera stream self.stream.start() # start camera stream self.start() # start data collection
def create_scene(): current_task = 0 total_tasks = (settings.mountain_end-settings.mountain_start) / settings.mountain_frequency + \ settings.foreground_end - settings.foreground_start # mountains mList = [] for d in range(settings.mountain_start, settings.mountain_end, settings.mountain_frequency): mList.append(create_mountain(random.randrange(WIN_WIDTH), d, random.randrange(100, 800))) current_task += 1 end = loading_screen.load(current_task, total_tasks) if end: # to exit program from loading screen return # trees, bushes, flowers tList = [] for t in range(settings.foreground_start, settings.foreground_end): # t is the y coordinate/depth if t > settings.secondary_foreground_start: if random.randrange(settings.bush_chance) == 0: # bushes tList.append(create_bush(random.randrange(WIN_WIDTH), t, WIN_HEIGHT)) if random.randrange(settings.flower_chance) == 0: # flowers tList.append(create_flower(random.randrange(WIN_WIDTH), t, WIN_HEIGHT)) if random.randrange(settings.tree_chance) == 0: # trees tList.append(create_tree(random.randrange(WIN_WIDTH), t, WIN_HEIGHT)) current_task += 1 end = loading_screen.load(current_task, total_tasks) if end: # to exti program from loading screen return bg = Surface_Drawable((WIN_WIDTH, WIN_HEIGHT)) bg.fill(c.SKY) pygame.draw.rect(bg, c.DARK_GREEN, (0, settings.mountain_start, WIN_WIDTH, WIN_HEIGHT - settings.mountain_start)) return frame.Frame([bg] + mList + tList)
def advanceFrame(self): self.frames.append(frame.Frame(len(self.frames) + 1))
import sys sys.path.append('..') import frame arr_tmp = [] for imgm in os.listdir('../templates'): arr_tmp.append(cv2.imread('../templates/' + imgm, 3)) vidcap = cv2.VideoCapture(0) sx = raw_input('male or female? ') os.chdir('../templates_ml/' + sx) with open('last.txt', 'r') as f: lst_ln = f.read() lst_ln = int(lst_ln) + 1 for i in range(0, 90): succ, img = vidcap.read() h, w, d = img.shape frame1 = frame.Frame(img, 640, 480, 3) if i % 10 == 0: for tmp in arr_tmp: threash = 0.95 if frame1.templater_(tmp, threash)[0]: b_r = frame1.templater_(tmp, threash)[2][1] t_l = frame1.templater_(tmp, threash)[2][0] print(t_l), print(b_r) fr = frame1.get_img() fr_new = fr[t_l[1]:b_r[1], t_l[0]:b_r[0]] break if i >= 40: cv2.imwrite('image_gg%d.PNG' % (lst_ln + i - 40), fr_new) cv2.imshow('recorder', img) if cv2.waitKey(1) & 0xFF == ord('q'):
def generate_frames(name_of_video, source_of_frames, params): rows, cols, _ = cv2.imread(name_of_video + "_frames/" + name_of_video + "001.jpg").shape body_parts = {"chest": 1, "left_shoulder": 2, "left_elbow": 3, "left_hand": 4, "right_shoulder": 5, "right_elbow": 6, "right_hand": 7, "pelvis": 8, "left_hip": 9, "left_knee": 10, "left_ankle": 11, "right_hip": 12, "right_knee": 13, "right_ankle": 14} body_part_pairs = [("chest", "pelvis"), ("left_shoulder", "left_elbow"), ("left_elbow", "left_hand"), ("right_shoulder", "right_elbow"), ("right_elbow", "right_hand"), ("left_hip", "left_knee"), ("left_knee", "left_ankle"), ("right_hip", "right_knee"), ("right_knee", "right_ankle")] frames = [] for counter in range(1, params["frames"]+1): with open(name_of_video+"_keypoints_with_face/"+name_of_video+"_{0:0=12d}_keypoints.json".format(counter - 1)) as file: data = json.load(file) left = [] right = [] top = [] bottom = [] confidence = [] polygons = [] for list in data["people"]: keypoints = list["pose_keypoints_2d"] + list["face_keypoints_2d"] face_keypoints = list["face_keypoints_2d"] if getFaceConfidence(keypoints) > .80: # Face bounding box based on eye distance # eyea_x = keypoints[36 * 3] + keypoints[37 * 3] + keypoints[38 * 3] \ # + keypoints[39 * 3] + keypoints[40 * 3] + keypoints[41 * 3] # eyea_x = eyea_x / 6 * cols # eyea_y = keypoints[36 * 3 + 1] + keypoints[37 * 3 + 1] + keypoints[38 * 3 + 1] \ # + keypoints[39 * 3 + 1] + keypoints[40 * 3 + 1] + keypoints[41 * 3 + 1] # eyea_y = eyea_y / 6 * rows # eyeb_x = keypoints[42 * 3] + keypoints[43 * 3] + keypoints[44 * 3] \ # + keypoints[45 * 3] + keypoints[46 * 3] + keypoints[47 * 3] # eyeb_x = eyeb_x / 6 * cols # eyeb_y = keypoints[42 * 3 + 1] + keypoints[43 * 3 + 1] + keypoints[44 * 3 + 1] \ # + keypoints[45 * 3 + 1] + keypoints[46 * 3 + 1] + keypoints[47 * 3 + 1] # eyeb_y = eyeb_y / 6 * rows eyea_x = keypoints[15*3] * cols eyea_y = keypoints[15*3 + 1] * rows eyeb_x = keypoints[16*3] * cols eyeb_y = keypoints[16*3 + 1] * rows inter_eye_distance = eyeb_x - eyea_x if inter_eye_distance > 1: x1 = int(eyea_x - (.55 * inter_eye_distance)) x2 = int(eyeb_x + (.55 * inter_eye_distance)) y1 = int(eyea_y + (1.45 * inter_eye_distance)) y2 = int(eyea_y - (1 * inter_eye_distance)) x1 = min(cols, x1) x1 = max(0, x1) x2 = min(cols, x2) x2 = max(0, x2) y1 = min(rows, y1) y1 = max(0, y1) y2 = min(rows, y2) y2 = max(0, y2) assert(x1!=x2) assert(y1!=y2) left.append(min(x1,x2)) right.append(max(x1,x2)) top.append(min(y1,y2)) bottom.append(max(y1,y2)) confidence.append(getFaceConfidence(keypoints)) # Create polygons polygons_list = [None for x in range(len(body_part_pairs))] for i in range(len(body_part_pairs)): pair = body_part_pairs[i] parta, partb = pair aindex = body_parts[parta] * 3 bindex = body_parts[partb] * 3 x1 = int(keypoints[aindex] * cols) y1 = int(keypoints[aindex + 1] * rows) x2 = int(keypoints[bindex] * cols) y2 = int(keypoints[bindex + 1] * rows) if x1 != 0 and y2 != 0 and x1 != 0 and y2 != 0: a = np.array((x1, y1)) b = np.array((x2, y2)) width = .4 * np.linalg.norm(a - b) points = generateRectCoords(x1, y1, x2, y2, width, rows, cols) polygons_list[i] = geometry.Polygon(points) polygons.append(polygons_list) new_frame = frame.Frame(left, right, top, bottom, confidence, counter, polygons=polygons) frames.append(new_frame) return frames