def compare_image_lists(new_result, old_result, decimals): fns = [] for _ in range(2): tmpfd, tmpname = tempfile.mkstemp(suffix=".png") os.close(tmpfd) fns.append(tmpname) num_images = len(old_result) assert num_images > 0 for i in range(num_images): mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i]))) mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i]))) results = compare_images(fns[0], fns[1], 10 ** (-decimals)) if results is not None: if os.environ.get("JENKINS_HOME") is not None: tempfiles = [ line.strip() for line in results.split("\n") if line.endswith(".png") ] for fn in tempfiles: sys.stderr.write(f"\n[[ATTACHMENT|{fn}]]") sys.stderr.write("\n") assert_equal(results, None, results) for fn in fns: os.remove(fn)
def load_data(hypes): data_directory = 'working_dir/data/%s' % hypes['data_directory'] # read data control dictionaries metadata = load_metadata(hypes) # read numpy arrays idx_q = np.loads(storage.get('%s/idx_q.npy' % data_directory)) idx_a = np.loads(storage.get('%s/idx_a.npy' % data_directory)) (trainX, trainY), (testX, testY), (validX, validY) = split_dataset(idx_q, idx_a) trainX = trainX.tolist() trainY = trainY.tolist() testX = testX.tolist() testY = testY.tolist() validX = validX.tolist() validY = validY.tolist() trainX = tl.prepro.remove_pad_sequences(trainX) trainY = tl.prepro.remove_pad_sequences(trainY) validX = tl.prepro.remove_pad_sequences(validX) validY = tl.prepro.remove_pad_sequences(validY) testX = tl.prepro.remove_pad_sequences(testX) testY = tl.prepro.remove_pad_sequences(testY) return metadata, trainX, trainY, testX, testY, validX, validY
def load(self, filename): dirname = os.path.dirname(inspect.stack()[-1].filename) filename = os.path.join(dirname, filename) with gzip.open(filename, mode="rb") as fp: pickled_data = fp.read() data = pickle.loads(pickled_data) # todo: version check image_list = data.get("image") if image_list: for i in range(RENDERER_IMAGE_COUNT - 1): pyxel.image(i).data[:, :] = np.loads(image_list[i]) tilemap_list = data.get("tilemap") if tilemap_list: for i in range(RENDERER_TILEMAP_COUNT): pyxel.tilemap(i).data[:, :] = np.loads(tilemap_list[i]) sound_list = data.get("sound") if sound_list: for i in range(AUDIO_SOUND_COUNT): src = sound_list[i] dest = pyxel.sound(i) dest.note = src.note dest.tone = src.tone dest.volume = src.volume dest.effect = src.effect dest.speed = src.speed
def k_means_distance(self, centers, result_name=None): """ Computes the distance between each row and each of the given center vectors for k-means """ if centers.shape[1] != self.__cols: raise BaseException('Dimensions of matrix and centers do not match') if result_name == None: result_name = MatrixFactory.getRandomMatrixName() redwrap = RedisWrapper(self.context.redis_master, self.context.key_manager) prefix = 'dist(' + self.__name + ',' + centers.name() + ')'; dist_job = kmeans_jobs.KMeansDistanceJob(self.context, self, centers, prefix) parts = dist_job.run() for p in range(0,len(parts)): part_name = parts[p] m = self.context.redis_master.lpop(part_name) sum = None while m != None: if sum == None: sum = numpy.loads(m) else: sum += numpy.loads(m) m = self.context.redis_master.lpop(part_name) self.context.redis_master.delete(part_name) redwrap.create_block(self.context.key_manager.get_block_name(result_name, p, 0), numpy.sqrt(sum)) res = Matrix(self.__rows, centers.shape[0], result_name, self.context) return res
def run(self): video_input = self.get_input("videoInput") video_input_resized = self.get_input("videoInputResized") gif_data_output = self.get_output("gifData") script = FrameAnalyzer() while self.running(): # read frames - full scale and thumb frame_obj = video_input.read() frame_obj_resized = video_input_resized.read() frame = np.loads(frame_obj) frame_resized = np.loads(frame_obj_resized) with self.filters_lock: script.change_settings(\ self.get_parameter("max_gif_length"), self.get_parameter("min_gif_length"), self.get_parameter("min_time_between_gifs"), self.get_parameter("max_acceptable_distance") ) loop_data = script(frame, frame_resized) if loop_data and len(loop_data)==4: file_path, w, h, frames_count = loop_data self.__send_to_next_service(gif_data_output, file_path, w, h, frames_count) self.__push_notification()
def process(dataframe, stage, output_file): # produce data for next stage try: config_gpu() detector = Detector() print("start gen: %s!!!!" % output_file) result = pd.DataFrame(columns=COLUMNS) for idx, ano in dataframe.iterrows(): img_path = ano.file_name gboxes = np.loads( ano.boundbox, encoding='bytes') if version_info.major >= 3 else np.loads( ano.boundbox) # py3 encoding='bytes' keypoints = np.loads( ano.keypoints, encoding='bytes') if version_info.major >= 3 else np.loads( ano.keypoints) img = cv2.imread(img_path) height, width = img.shape[:-1] candis = detector.predict(img, stage) or [] input_size = {"pnet": 12, "rnet": 24, "onet": 48}.get(stage) fp_df = mining_fp_box(img_path, img, candis, gboxes, keypoints, input_size) fn_df = mining_fn_box(img_path, img, candis, gboxes, keypoints, input_size) result = pd.concat([result, fp_df, fn_df], ignore_index=True) if idx % 100 == 0: print("idx: %s" % idx) result.to_feather(output_file) except Exception as ee: print("!!!!!%s---%s" % (process.__name__, ee)) print(ano.boundbox, ano.keypoints) print("end file %s" % output_file)
def run(self): video_input = self.get_input("videoInput") video_input_resized = self.get_input("videoInputResized") gif_data_output = self.get_output("gifData") script = FrameAnalyzer() while self.running(): # read frames - full scale and thumb frame_obj = video_input.read() frame_obj_resized = video_input_resized.read() frame = np.loads(frame_obj) frame_resized = np.loads(frame_obj_resized) with self.filters_lock: script.change_settings(\ self.get_parameter("max_gif_length"), self.get_parameter("min_gif_length"), self.get_parameter("min_time_between_gifs"), self.get_parameter("max_acceptable_distance") ) loop_data = script(frame, frame_resized) if loop_data and len(loop_data) == 4: file_path, w, h, frames_count = loop_data self.__send_to_next_service(gif_data_output, file_path, w, h, frames_count) self.__push_notification()
def image_transform(idx, row, input_size=12, is_training=True): if row.crop_image: input_img = np.loads( row.crop_image, encoding='bytes') if version_info.major >= 3 else np.loads( row.crop_image) else: img = cv2.imread(row.file_name) cropped = np.loads(row.cropped, encoding='bytes') x1, y1, x2, y2 = [int(x) for x in cropped.tolist()] input_img = cv2.resize(img[y1:y2, x1:x2, :], ( input_size, input_size, )) btype, normbox, norm_points = row.btype, trans_numpy( row.normbox), trans_numpy(row.norm_points) btype = np.array([btype]) #if btype == 2: #cv2.imwrite("%s.jpg"%idx, img[y1:y2,x1:x2, :]) #if img[y1:y2, x1:x2, :].size == 0: #print("dddddd", cropped, y1, y2, x1, x2, img.shape, row.file_name, btype) if is_training: input_img, normbox, norm_points = image_enforcing( input_img, normbox, norm_points) result = np.concatenate(( btype, normbox, norm_points, )) # 0: class, 1-4: boundbox, 5-19: keypoints return input_img, result
def _reconstruct(self): self.vertices = np.loads(self._vert_bin) self.edges = np.loads(self._edge_bin) self._dirty_edges = False self._dirty_matrix = True self._ad_matrix = None self.costs = None self.simple = self.check_simple()
def socketcomm(port, pipec, flags, uarr, dat_size=156): """Callback function to deal with incoming tcp communication. pipec,pipeu and pipesoc are pipe objects pipec: describes position of camera pipeu: describes orientation of camera. sends (True, data) for good data sends (False,.......) for bad data or loop not running pipesoc: fill socket objects and send to __main__ """ # Flags sockets = [] waiting_for_data = True # initialise socket object serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversocket.bind((_TCP_SOCKET_HOST, port)) serversocket.listen(1) serversocket.setblocking(0) # add server socket to list of sockets sockets.append(serversocket) # create epoll object _epoll = select.epoll() # register interest in read events on the server socket _epoll.register(serversocket.fileno(), select.EPOLLIN) while waiting_for_data: events = _epoll.poll(1) for fileno, event in events: if fileno == serversocket.fileno(): # initialise client socket object clientsocket, clientaddr = serversocket.accept() clientsocket.setblocking(0) # register client socket on epoll _epoll.register(clientsocket.fileno(), select.EPOLLIN) sockets.append(clientsocket) elif event & select.EPOLLIN: # read event on client socket content = clientsocket.recv(dat_size) if content[0] == "u": # pipeu.send((True,True,np.loads(content[1:]))) with uarr.get_lock(): uarr.get_obj()[:3] = np.loads(content[1:]).ravel() flags.value = 2 elif content[0] == "d": # pipeu.send((False,True)) flags.value = 1 elif content[0] == "c": pipec.send(np.loads(content[1:])) elif content[0] == "e": # pipeu.send((False,False)) flags.value = 0 waiting_for_data = False for i in sockets: _epoll.unregister(i.fileno()) elif event & select.EPOLLHUP: for i in sockets: _epoll.unregister(i.fileno()) i.close()
def get_correlation(self): pkt = self._request('') # raises NoCorrelation if none ready self.logger.debug('received: %r' % pkt) data = pkt[self._header_size:] # should be 3 arrays and 2 floats corr_time, left, right, current, total = self._header_struct.unpack(pkt[:self._header_size]) lagss, visibss, fitss, m, c = self.unpacker.unpack(data) return ( corr_time, left, right, current, total, # header information loads(lagss), loads(visibss), loads(fitss), m, c # data )
def _convert_mem_to_transition(mem): next_state = mem[ "next_state"].data if mem["next_state"] != None else None return Transition(state=torch.as_tensor(np.loads(mem["state"].data)), action=torch.tensor([[mem["action"]]], dtype=torch.long), reward=torch.tensor([[mem["reward"]]], dtype=torch.float), next_state=torch.as_tensor(np.loads(next_state)) if next_state != None else None)
def load_model(self, filename, delimiter = '<DELIMITER>'): """ Load model parameters from the file provided """ try: infile = open(filename, 'r') except IOError: print "Could not open filename '" + filename + "'." return # Get the number of units in each layer data = infile.read() data = data.split(delimiter) self.num_visible = int(data[0]) self.num_hidden = int(data[1]) self.num_rnn = int(data[2]) # Read each line and convert to weight matrices self.Whv = np.loads(data[3]) self.Wuh = np.loads(data[4]) self.Wuv = np.loads(data[5]) self.Wuu = np.loads(data[6]) self.Wvu = np.loads(data[7]) self.bias_visible = np.loads(data[8]) self.bias_hidden = np.loads(data[9]) self.bias_rnn = np.loads(data[10]) self.initial_rnn = np.loads(data[11]) infile.close()
def solve_linear_equation(arr1, arr2, arr3): try: arr1 = np.loads(arr1.encode()) arr2 = np.loads(arr2.encode()) arr3 = np.loads(arr3.encode()) except Exception as err: result = "参数有误" unknown_data = np.array([arr1, arr2]) const_data = np.array([arr3[0], arr3[1]]) result = np.linalg.solve(unknown_data, const_data) return result
def imgloader(labelq, dataq, batch_sz, pc_id, sharedmem): try: env = lmdb.open('testDB', readonly=True); miniidx=0; sendctr=0; random.seed(pc_id); np.random.seed(np.int64(pc_id)); curmem=0; mylock=sharedmem[curmem].get_lock(); while True: label=np.zeros([batch_sz,10,64,80],np.float32); data=np.zeros([batch_sz,3,im_sz[0],im_sz[1]],np.float32); temp = np.zeros([64,80,10],np.float32); j=0; while(j < batch_sz): flip = random.randint(1,10)%2; start = time.time(); im = []; if (miniidx > 800): miniidx = 0; with env.begin() as txn: str_id = "{:08}".format(miniidx) raw_datum = txn.get(str(str_id)); im = np.loads(raw_datum); with env.begin() as txn: str_id = "{:08}".format((miniidx+1)) raw_datum = txn.get(str_id); temp = np.loads(raw_datum); start = time.time(); if flip: im = im[:,::-1,:]; temp = temp[:,::-1,:]; temp[:,:,0:5] *= -1.0; data[j,0:3:,:,:]=prep_image(im,im_sz).transpose(2,0,1); label[j,:,:,:] = (temp.transpose(2,0,1)).copy()*1000; j = j + 1 miniidx = miniidx + 2 buf=np.frombuffer(sharedmem[curmem].get_obj(), dtype=np.float32).reshape([batch_sz,3,im_sz[0],im_sz[1]]); buf[:,:,:,:]=data; dataq.put((curmem,label), timeout=6000); del mylock; curmem=(curmem+1) % len(sharedmem); mylock = sharedmem[curmem].get_lock(); except Exception as e: tup2="".join(traceback.format_exception(*sys.exc_info())); dataq.put(tup2); raise
def _do_numeric(self, value, path): if PY_VER > 2: data = value['data'] if isinstance(data, str): data = value['data'].encode('utf-8') junk = gunzip_string(base64.decodebytes(data)) result = numpy.loads(junk, encoding='bytes') else: junk = gunzip_string(value['data'].decode('base64')) result = numpy.loads(junk) self._numeric[value['id']] = (path, result) self._obj_cache[value['id']] = result return result
def convert_numarray(s): #print(type(s)) try: #assume data is zipped uz = zlib.decompress(s) #print(uz) if six.PY2: return np.loads(uz) else: return np.loads(uz, encoding='bytes') except: #fall back and just try unpickling return pickle.loads(s)
def socket_cb1(socket,val): global u1,c1, running1,fr1,dat1 if val[0] == 'u': u1 = np.loads(val[1:]) fr1 += 1 dat1 = True elif val[0] == 'c': c1 = np.loads(val[1:]) elif val[0] == 'd': dat1 = False elif val[0] == 'e': dat1 = False running1 = False
def socket_cb2(socket,val): global u2,c2, running2,fr2,dat2 if val[0] == 'u': u2 = np.loads(val[1:]) fr2 += 1 dat2 = True elif val[0] == 'c': c2 = np.loads(val[1:]) elif val[0] == 'd': dat2 = False elif val[0] == 'e': dat2 = False running2 = False
def get_correlation(self): cmd = BYTE.pack(128) size, err, pkt = self._request(cmd) if err: raise NoCorrelations self.logger.debug('received: %r' % pkt) header_struct = BEE2CorrelationProvider._header_struct data = pkt[header_struct.size:] # should be 3 arrays and 2 floats corr_time, left, right, current, total = header_struct.unpack(pkt[:header_struct.size]) lagss, visibss, fitss, m, c = self.unpacker.unpack(data) return ( corr_time, left, right, current, total, # header information loads(lagss), loads(visibss), loads(fitss), m, c # data )
def load(self, filename): dirname = os.path.dirname(inspect.stack()[-1].filename) filename = os.path.join(dirname, filename) with gzip.open(filename, mode="rb") as fp: pickled_data = fp.read() data = pickle.loads(pickled_data) # todo: version check image_list = data.get("image") if image_list: for i in range(RENDERER_IMAGE_COUNT - 1): pyxel.image(i).data[:, :] = np.loads(image_list[i]) tilemap_list = data.get("tilemap") if tilemap_list: if type(tilemap_list[0]) is tuple: for i in range(RENDERER_TILEMAP_COUNT): tilemap = pyxel.tilemap(i) tilemap.data[:, :] = np.loads(tilemap_list[i][0]) tilemap.refimg = tilemap_list[i][1] else: # todo: delete this block in the future for i in range(RENDERER_TILEMAP_COUNT): pyxel.tilemap(i).data[:, :] = np.loads(tilemap_list[i]) sound_list = data.get("sound") if sound_list: for i in range(AUDIO_SOUND_COUNT - 1): src = sound_list[i] dest = pyxel.sound(i) dest.note[:] = src.note dest.tone[:] = src.tone dest.volume[:] = src.volume dest.effect[:] = src.effect dest.speed = src.speed music_list = data.get("music") if music_list: for i in range(AUDIO_MUSIC_COUNT - 1): src = music_list[i] dest = pyxel.music(i) dest.ch0[:] = src.ch0 dest.ch1[:] = src.ch1 dest.ch2[:] = src.ch2 dest.ch3[:] = src.ch3
def load(self, filename): with gzip.open(filename, mode="rb") as fp: pickled_data = fp.read() data = pickle.loads(pickled_data) # todo: version check image = data["image"] for i in range(RENDERER_IMAGE_COUNT - 1): self._module.image(i).data[:, :] = np.loads(image[i]) tilemap = data["tilemap"] for i in range(RENDERER_TILEMAP_COUNT): self._module.tilemap(i).data[:, :] = np.loads(tilemap[i])
def show_frame(self, input_connector, video_frame_label): obj = input_connector.read() frame = np.loads(obj) # załadownaie ramki do obiektu NumPy img = Image.fromarray(frame) imgTk = ImageTk.PhotoImage(image=img) video_frame_label.imgTk = imgTk video_frame_label.configure(image=imgTk)
def identify_students_in_pic(students, picture, StudentObject): image = face_recognition.load_image_file(picture) encodings = face_recognition.face_encodings(image) prescent_student_ids = [] stud_results = [] for encoding in encodings: for student in students: student_pic_encodings = np.loads(student.face_encodings) results = face_recognition.compare_faces([encoding], student_pic_encodings) if results[0]: if not student.id in prescent_student_ids: prescent_student_ids.append(student.id) stud_results.append( StudentObject(student.id, student.name, student.batch.id, student.reg_id, student.profile, True)) for student in students: if not student.id in prescent_student_ids: stud_results.append( StudentObject(student.id, student.name, student.batch.id, student.reg_id, student.profile, False)) return stud_results
def run(self): video_input = self.get_input("videoInput") video_output = self.get_output("videoOutput") objects_input = self.get_input("objectsInput") debug_output = self.get_output("debugOutput") with open('config.json') as data_file: data = json.load(data_file) classes = data["conf"]["classes"] frame = None while self.running(): #pętla główna usługi (wątku głównego obsługującego strumień wideo) frame_obj = video_input.read() #odebranie danych z interfejsu wejściowego frame = np.loads(frame_obj) #załadowanie ramki do obiektu NumPy objects = objects_input.read() size_objects = len(objects) for i in range(0,size_objects): o = objects[i] object_class = "class" + str(o[3]) color = classes[object_class] cv2.rectangle(frame, (o[0], o[1]), (o[0]+o[2], o[1]+o[2]), (color[0],color[1],color[2]), 3) #cv2.rectangle(frame, (20, 20), (100, 100), (0,255,0), 3) video_output.send(frame.dumps()) debug_output.send([])
def clientthread(conn, L): #Sending message to connected client #conn.send('Welcome to the server. Type something and hit enter\n') #send only takes string #infinite loop so that function do not terminate and thread do not end. while True: #Receiving from client buf = b'' while len(buf) < 4: buf += conn.recv(4 - len(buf)) length = struct.unpack('>I', buf)[0] data = b'' l = length while l > 0: d = conn.recv(l) l -= len(d) data += d if not data: break M = np.loads(data) # HERE IS AN ERROR if i == 1: L = M else: L += M # t0 = time.time() # data_out = pickle.dumps(L) # print("done in %fs" % (time.time() - t0)) # conn.sendall(data_out) conn.close() return(L)
def loadDictionary(self): with open('dictionary500_1.txt', 'r') as infile: data = np.loads(infile.read()) self.name_dict = data[0] self.bov_helper.clf = data[1] self.bov_helper.kmeans_obj = data[2] self.bov_helper.scale = data[3]
def network_input(current_image: np.ndarray, port: int) -> None: """Creates a socket for listening for received drone image frames. Args: current_image (np.ndarray): Cross-thread image data. port (int): TCP port a socket to be created on for listening. """ print('ImageThread > network_input') with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as rsock: rsock.bind(('0.0.0.0', port)) print('Socket binded') rsock.listen() print('Socket listnening') conn, addr = rsock.accept() with conn: print('Image Input established') while True: loading_image = True data = b'' while loading_image: part = conn.recv(4096) if len(part) == 0: break data += part print(part) print(len(part)) ack = str(len(part)) conn.sendall( ack.encode()) #send number of bytes read for part current_image = np.loads(data) print(current_image) conn.sendall(b'ACK')
def run(self): #główna metoda usługi threading.Thread(target=self.watch_settings).start( ) #uruchomienie wątku obsługującego strumień sterujący video_input = self.get_input( "videoInput") #obiekt interfejsu wejściowego video_output_master = self.get_output( "videoOutputMaster") #obiekt interfejsu wyjściowego video_output_output = self.get_output( "videoOutputOutput") #obiekt interfejsu wyjściowego while self.running(): #pętla główna usługi frame_obj = video_input.read( ) #odebranie danych z interfejsu wejściowego frame = np.loads(frame_obj) # załadowanie ramki do obiektu NumPy frame = cv2.flip(frame, 1) # odwrócenie ramki w poziomie frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGBA) # przywrocenie naturalnych kolorów frame = frame.dumps() video_output_output.send(frame) # przesłąnie obrazu na wyjście video_output_master.send( frame ) # przesłanie obrazu do mastera do kolejnego przetwarzania
def getOrigFrame(self, b_cvt_color = False): ''' returns origFrame as numpy obj, instead of a serialized string ''' img = np.loads(self.serial_origFrame) if b_cvt_color: return cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img
def data_from_pipeline(data, shape=None, dtype=None): if len(shape) == 0: return numpy.array(data, dtype=dtype) else: a = numpy.array([numpy.loads(x) for x in data], dtype=dtype) a.shape = (-1, ) + shape return a
def save_to_mat_file(init_data, outfile_name): def get_list(vocab_dict): l = np.zeros((len(vocab_dict), ), dtype=np.object) for w, i in vocab_dict.iteritems(): l[i] = w return l laff = lambda nm: np.loads(init_data[nm]) tag_vocab = get_list(init_data["tag_vocab"]) word_vocab = get_list(init_data["word_vocab"]) with open(outfile_name, "wb") as f: savemat(f, dict(word_context_size=init_data["word_context_size"], embedding_size=init_data["embedding_size"], objective_type=init_data["objective_type"], param_reg_type=init_data["param_reg_type"], param_reg_weight=init_data["param_reg_weight"], tag_vocab=tag_vocab, word_vocab=word_vocab, tagemb=laff("na_tag_emb"), wordemb=laff("na_word_emb"), T1=laff("nam_T1"), T2=laff("nam_T2"), Tt1=laff("nam_Ttld1"), Tt2=laff("nam_Ttld2"), W=laff("nat_W"), Wt=laff("nat_Wtld"), S=laff("na_S_emb")), oned_as='column', format='5', appendmat=False)
def init_flann_index(self): self.index_vectors = [] self.index_ids = [] for _id, v in self.known_vectors.items(): self.index_vectors.append(np.loads(base64.b64decode(v))) self.index_ids.append(_id) self.build_index(self.index_vectors)
def _map_message_to_signal(self, channel, data): if type(channel) == bytes: channel = channel.decode('ascii') frame_signal_map = { 'hics:webcam:frame': 'webcam_picture_received', 'hics:framegrabber:frame': 'hypcam_picture_received', } changed_signal_map = { 'hics:camera': 'camera_changed', 'hics:scanner': 'scanner_changed', 'hics:scanner:state': 'scanner_changed', 'hics:focus:state': 'focus_changed', } value_signal_map = { 'hics:plugin:announce': 'plugin_announce_received', 'hics:plugin:notification': 'plugin_notification_received', } if channel in frame_signal_map.keys(): if self._is_connected(frame_signal_map[channel]): getattr(self, frame_signal_map[channel]).emit(numpy.loads(data)) return True elif channel in changed_signal_map.keys(): getattr(self, changed_signal_map[channel]).emit() elif channel in value_signal_map.keys(): if type(data) == bytes: d = data.decode('ascii') else: d = data getattr(self, value_signal_map[channel]).emit(d) else: return False
def k_means_recalc(cmd_ctx): m = _get_matrix_block(cmd_ctx, cmd_ctx.cmdArgs[0]) d = _get_matrix_block(cmd_ctx, cmd_ctx.cmdArgs[1]) result_prefix = cmd_ctx.cmdArgs[2] # Only count if prefix is given counter_prefix = None if len(cmd_ctx.cmdArgs) > 3: counter_prefix = cmd_ctx.cmdArgs[3] result = {} mincols = numpy.argmin(d, axis=1) # Find the nearest center for each record and add the record to the centers sum # Also count how many records are nearest to each center rowcount = 0 for col in mincols: if counter_prefix != None: cmd_ctx.redis_master.incr(counter_prefix + str(col)) if result.has_key(col): result[col] += m[rowcount] else: result[col] = m[rowcount] rowcount += 1 for key in result.keys(): k = result_prefix + str(key) tmp = cmd_ctx.redis_master.lpop(k) if tmp == None: cmd_ctx.redis_master.rpush(k, result[key].dumps()) else: cmd_ctx.redis_master.rpush(k, (result[key] + numpy.loads(tmp)).dumps())
def detect(): img = np.loads(request.data, encoding='latin1') landmarks,seg_mask = process_img(img) payload = {'landmarks':landmarks.tolist(), 'seg_mask':seg_mask.tolist()} if payload is None: return jsonify([]) return jsonify(payload)
def get_segmask(bbox,img): margin = 50 if (bbox[1]<margin) or (bbox[0]<margin) or ((bbox[1]+bbox[3]+margin) > img.shape[0]) or ((bbox[0]+bbox[2]+margin) > img.shape[1]): margin = 0 print('Margin =',margin) img_bboxed = img[bbox[1]-margin:bbox[1]+bbox[3]+margin,bbox[0]-margin:bbox[0]+bbox[2]+margin,:] desired_size = np.max(img_bboxed.shape) old_size = img_bboxed.shape[:2] # old_size is in (height, width) format ratio = float(desired_size)/max(old_size) new_size = tuple([int(x*ratio) for x in old_size]) print('bboxedshape',img_bboxed.shape) print('new_size',new_size) img_bboxed = cv2.resize(img_bboxed, (new_size[1], new_size[0])) delta_w = desired_size - new_size[1] delta_h = desired_size - new_size[0] top, bottom = delta_h//2, delta_h-(delta_h//2) left, right = delta_w//2, delta_w-(delta_w//2) color = [0, 0, 0] new_im = cv2.copyMakeBorder(img_bboxed, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) output = requests.get('http://127.0.0.1:9999/',data=np.array(new_im).dumps()) seg_mask = np.loads(output.content, encoding='latin1').astype(np.uint8) seg_mask = seg_mask[left:seg_mask.shape[0]-right,top:seg_mask.shape[1] - bottom] seg_mask = cv2.resize(seg_mask,(old_size[1],old_size[0])) seg_mask = cv2.copyMakeBorder(seg_mask,bbox[1]-margin,img.shape[0] - (bbox[1]+bbox[3]+margin),bbox[0]-margin, img.shape[1] - (bbox[0]+bbox[2]+margin), cv2.BORDER_CONSTANT,value=color) return seg_mask
def save_to_mat_file(init_data, outfile_name): def get_list(vocab_dict): l=np.zeros((len(vocab_dict),), dtype=np.object) for w, i in vocab_dict.iteritems(): l[i]=w return l laff = lambda nm : np.loads(init_data[nm]) tag_vocab=get_list(init_data["tag_vocab"]) word_vocab=get_list(init_data["word_vocab"]) with open(outfile_name, "wb") as f: savemat(f, dict(word_context_size=init_data["word_context_size"], embedding_size=init_data["embedding_size"], objective_type=init_data["objective_type"], param_reg_type=init_data["param_reg_type"], param_reg_weight=init_data["param_reg_weight"], tag_vocab=tag_vocab, word_vocab=word_vocab, tagemb=laff("na_tag_emb"), wordemb=laff("na_word_emb"), T1=laff("nam_T1"), T2=laff("nam_T2"), Tt1=laff("nam_Ttld1"), Tt2=laff("nam_Ttld2"), W=laff("nat_W"), Wt=laff("nat_Wtld"), S=laff("na_S_emb")), oned_as='column', format='5', appendmat=False)
def find(self, img_path): target_img = face_recognition.load_image_file(img_path) target_encoding = face_recognition.face_encodings(target_img) if len(target_encoding) == 0: return 'Unknown', None target_encoding = target_encoding[0] min_dist = float('inf') for row in self.related_people: query = "SELECT path, encoding from images where id = %s" self.cur.execute(query, (row[0], )) for img in self.cur: if img[1] is None: continue encoding = np.loads(img[1]) distance = face_recognition.face_distance( target_encoding, [encoding]) if distance < min_dist: most_similar_img_path = img[0] most_similar_name = row[2] min_dist = distance return most_similar_name, most_similar_img_path
def check(self): word_errors = 0 file_errors = 0 t0 = time.time() self.get_tree() files_read = 0 for f in self.tree: try: arr = np.loads(f) files_read += 1 except: file_errors += 1 word_errors += np.sum(arr==self.chunk) t1 = time.time() speed = self.sweeplen / (1e6 * (t1-t0)) msg = "\nread %.2f TB at %.2f MB/s"%(files_read*self.flen,speed) msg += "\n\t\t failed file reads = %i\n" %(file_errors) msg += "\n\t\t word errors = %i\n\n" %(word_errors) logging.info(msg)
def run(self): # główna metoda usługi video_input = self.get_input("videoInput") # obiekt interfejsu wejściowego video_output = self.get_output("videoOutput") input_start = True out = None while self.running(): # pętla główna usługi try: frame_obj = video_input.read() # odebranie danych z interfejsu wejściowego video_output.send(frame_obj) except Exception as e: video_input.close() if out != None: out.release() break if input_start: video_format = self.get_parameter("videoFormat") print video_format fourcc = cv2.VideoWriter_fourcc(*"XVID") out = cv2.VideoWriter( "output.avi", fourcc, video_format[0], (int(video_format[1]), int(video_format[2])) ) input_start = False frame = np.loads(frame_obj) # załadowanie ramki do obiektu NumPy out.write(frame)
def data_from_pipeline(data, shape=None, dtype=None): if len(shape) == 0: return numpy.array(data, dtype=dtype) else: a = numpy.array([numpy.loads(x) for x in data], dtype=dtype) a.shape = (-1,)+shape return a
def run(self): #główna metoda usługi threading.Thread(target=self.watch_settings).start( ) #uruchomienie wątku obsługującego strumień sterujący video_input = self.get_input( "videoInput") #obiekt interfejsu wejściowego video_output = self.get_output( "videoOutput") #obiekt interfejsu wyjściowego while self.running(): #pętla główna usługi frame_obj = video_input.read( ) #odebranie danych z interfejsu wejściowego frame = np.loads(frame_obj) #załadowanie ramki do obiektu NumPy with self.service_lock: # blokada wątku resize_coeff = self.resize_coeff frame = cv2.resize(frame, None, fx=resize_coeff, fy=resize_coeff, interpolation=cv2.INTER_AREA) #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) video_output.send(frame.dumps( )) #przesłanie ramki za pomocą interfejsu wyjściowego
def loadDictionary(self, trainset): with open('dictionary{}.txt'.format(trainset), 'r') as infile: data = np.loads(infile.read()) self.name_dict = data[0] self.bov_helper.clf = data[1] self.bov_helper.kmeans_obj = data[2] self.bov_helper.scale = data[3]
def socket_cb(socket,val): global u2,running1,fr1 if val[0] == 's': u2 = np.loads(val[1:]) fr1 += 1 elif val[0] == 'e': running1 = False
def socket_cb2(socket,val): global u1,running2,fr2 if val[0] == 's': u1 = np.loads(val[1:]) fr2 += 1 elif val[0] == 'e': running2 = False
def run(self): #główna metoda usługi video_input = self.get_input("videoInput") #obiekt interfejsu wejściowego video_output = self.get_output("videoOutput") #obiekt interfejsu wyjściowego while self.running(): #pętla główna usługi try: frame_obj = video_input.read() #odebranie danych z interfejsu wejściowego except Exception as e: video_input.close() video_output.close() break frame = np.loads(frame_obj) #załadowanie ramki do obiektu NumPy with self.filters_lock: #blokada wątku current_filters = self.get_parameter("filtersOn") #pobranie wartości parametru "filtersOn" if 1 in current_filters: #sprawdzenie czy parametr "filtersOn" ma wartość 1, czyli czy ma być stosowany filtr frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #zastosowanie filtru COLOR_BGR2GRAY z biblioteki OpenCV na ramce wideo frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) # by można było zapisać jako obraz kolorowy if 2 in current_filters: frame = cv2.blur(frame,(7,7)) if 3 in current_filters: frame = cv2.GaussianBlur(frame,(5,5),0) if 4 in current_filters: frame = cv2.medianBlur(frame,9) ## nieparzysta liczba video_output.send(frame.dumps()) #przesłanie ramki za pomocą interfejsu wyjściowego
def data_from_bytes(data, shape=None, dtype=None): if isinstance(data, redis.client.Pipeline): return functools.partial(data_from_pipeline, shape=shape, dtype=dtype) try: return numpy.loads(data) except cPickle.UnpicklingError: return float(data)
def import_ConvergenceTest(fn): with open(fn) as f: l = f.read() c = eval(l) for mode in c.result: for arr in c.result[mode]: c.result[mode][arr] = np.loads(c.result[mode][arr]) return c
def read_and_show_frame(self, input_connector, video_frame_label): while self.running(): with self.service_lock: obj = input_connector.read() #print "odebrano ramkę:", input_connector frame = np.loads(obj) # załadownaie ramki do obiektu NumPy img = Image.fromarray(frame) imgTk = ImageTk.PhotoImage(image=img) video_frame_label.imgTk = imgTk video_frame_label.configure(image=imgTk)
def read_video(self): video_input = self.get_input("videoInput") #obiekt interfejsu wejściowego while self.running(): #pętla główna usługi frame_obj = video_input.read() #odebranie danych z interfejsu wejściowego #print "ramka odebrana" frame = np.loads(frame_obj) #załadowanie ramki do obiektu NumPy frame = self.process_frame(frame) frame = frame.dumps() with self.service_lock: self.video_frame = frame
def deserialize(self, key, value, flags): if flags == 1: # str return value if flags == 2: # ndarray #return np.loads(zlib.decompress(value)) return np.loads(value) if flags == 3: # other return json.loads(value) raise TypeError("Unknown flags for value: %d" % flags)
def test_months_in_units_calculation(self): rd = self.test_data.get_rd('clt_month_units') calc = [{'func': 'mean', 'name': 'mean'}] calc_grouping = ['month'] ops = ocgis.OcgOperations(dataset=rd, calc=calc, calc_grouping=calc_grouping) ret = ops.execute() # '[[datetime.datetime(1979, 1, 16, 0, 0) datetime.datetime(1988, 1, 16, 0, 0)]\n [datetime.datetime(1979, 2, 16, 0, 0) datetime.datetime(1988, 2, 16, 0, 0)]\n [datetime.datetime(1979, 3, 16, 0, 0) datetime.datetime(1988, 3, 16, 0, 0)]\n [datetime.datetime(1979, 4, 16, 0, 0) datetime.datetime(1988, 4, 16, 0, 0)]\n [datetime.datetime(1979, 5, 16, 0, 0) datetime.datetime(1988, 5, 16, 0, 0)]\n [datetime.datetime(1979, 6, 16, 0, 0) datetime.datetime(1988, 6, 16, 0, 0)]\n [datetime.datetime(1979, 7, 16, 0, 0) datetime.datetime(1988, 7, 16, 0, 0)]\n [datetime.datetime(1979, 8, 16, 0, 0) datetime.datetime(1988, 8, 16, 0, 0)]\n [datetime.datetime(1979, 9, 16, 0, 0) datetime.datetime(1988, 9, 16, 0, 0)]\n [datetime.datetime(1979, 10, 16, 0, 0)\n datetime.datetime(1988, 10, 16, 0, 0)]\n [datetime.datetime(1979, 11, 16, 0, 0)\n datetime.datetime(1988, 11, 16, 0, 0)]\n [datetime.datetime(1979, 12, 16, 0, 0)\n datetime.datetime(1988, 12, 16, 0, 0)]]' actual = '\x80\x02cnumpy.core.multiarray\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x0cK\x02\x86cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x03U\x01|NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK?tb\x89]q\x06(cdatetime\ndatetime\nq\x07U\n\x07\xbb\x01\x10\x00\x00\x00\x00\x00\x00\x85Rq\x08h\x07U\n\x07\xc4\x01\x10\x00\x00\x00\x00\x00\x00\x85Rq\th\x07U\n\x07\xbb\x02\x10\x00\x00\x00\x00\x00\x00\x85Rq\nh\x07U\n\x07\xc4\x02\x10\x00\x00\x00\x00\x00\x00\x85Rq\x0bh\x07U\n\x07\xbb\x03\x10\x00\x00\x00\x00\x00\x00\x85Rq\x0ch\x07U\n\x07\xc4\x03\x10\x00\x00\x00\x00\x00\x00\x85Rq\rh\x07U\n\x07\xbb\x04\x10\x00\x00\x00\x00\x00\x00\x85Rq\x0eh\x07U\n\x07\xc4\x04\x10\x00\x00\x00\x00\x00\x00\x85Rq\x0fh\x07U\n\x07\xbb\x05\x10\x00\x00\x00\x00\x00\x00\x85Rq\x10h\x07U\n\x07\xc4\x05\x10\x00\x00\x00\x00\x00\x00\x85Rq\x11h\x07U\n\x07\xbb\x06\x10\x00\x00\x00\x00\x00\x00\x85Rq\x12h\x07U\n\x07\xc4\x06\x10\x00\x00\x00\x00\x00\x00\x85Rq\x13h\x07U\n\x07\xbb\x07\x10\x00\x00\x00\x00\x00\x00\x85Rq\x14h\x07U\n\x07\xc4\x07\x10\x00\x00\x00\x00\x00\x00\x85Rq\x15h\x07U\n\x07\xbb\x08\x10\x00\x00\x00\x00\x00\x00\x85Rq\x16h\x07U\n\x07\xc4\x08\x10\x00\x00\x00\x00\x00\x00\x85Rq\x17h\x07U\n\x07\xbb\t\x10\x00\x00\x00\x00\x00\x00\x85Rq\x18h\x07U\n\x07\xc4\t\x10\x00\x00\x00\x00\x00\x00\x85Rq\x19h\x07U\n\x07\xbb\n\x10\x00\x00\x00\x00\x00\x00\x85Rq\x1ah\x07U\n\x07\xc4\n\x10\x00\x00\x00\x00\x00\x00\x85Rq\x1bh\x07U\n\x07\xbb\x0b\x10\x00\x00\x00\x00\x00\x00\x85Rq\x1ch\x07U\n\x07\xc4\x0b\x10\x00\x00\x00\x00\x00\x00\x85Rq\x1dh\x07U\n\x07\xbb\x0c\x10\x00\x00\x00\x00\x00\x00\x85Rq\x1eh\x07U\n\x07\xc4\x0c\x10\x00\x00\x00\x00\x00\x00\x85Rq\x1fetb.' actual = np.loads(actual) self.assertNumpyAll(ret[1]['clt'].temporal.bounds_datetime, actual)
def run(self): video_input = self.get_input("videoInput") out1 = self.get_output("out1") out2 = self.get_output("out2") while self.running(): frame_obj = video_input.read() frame = np.loads(frame_obj) out1.send(frame.dumps()) out2.send(frame.dumps())
def run(self): if self.observer: self.socket.sendto("I can see you.", self.add) while not self.close: sData = "" try: data = self.socket.recv(self.buffer) if not data: continue if data[0] != "b": # print("wrong type index.") continue i = 1 while i < self.buffer and data[i] != "_": i += 1 nb_packet_string = data[1:i] if nb_packet_string.isdigit(): nb_packet = int(nb_packet_string) else: # print("wrong index.") continue sData += data[i + 1:] for packet in range(1, nb_packet): data, _ = self.socket.recvfrom(self.buffer) # 262144 # 8192 if data[0] != "c": # print("wrong type index continue") continue i = 1 while i < self.buffer and data[i] != "_": i += 1 no_packet_string = data[1:i] if no_packet_string.isdigit(): no_packet = int(no_packet_string) # if no_packet != packet: # print("Wrong no packet : %d" % packet) else: # print("wrong index continue.") continue sData += data[i + 1:] self.observer(np.loads(sData)) except Exception as e: if type(e) is not exceptions.EOFError: if not self.close: logger.error("udp observer : %s", e) else: logger.error("self.observer is None.")
def talker(TCP_PORT, TCP_PORT): s = socket.socket((TCP_IP, TCP_PORT)) while not rospy.is_shutdown(): data = conn.recv(BUFFER_SIZE) if data: deserialize = np.loads(data) rospy.loginfo(deserialize) pos_publisher.publish(deserialize) conn.send(1) rate.sleep() conn.close()
def SafeSimpleStack(seq): ''' Vertically stack sequences numpy record arrays. Avoids some of the problems of numpy.v_stack ''' names = uniqify(ListUnion([list(s.dtype.names) for s in seq if s.dtype.names != None])) formats = [max([s.dtype[att] for s in seq if s.dtype.names != None and att in s.dtype.names]).str for att in names] D = numpy.rec.fromarrays([ListUnion([s[att].tolist() if (s.dtype.names != None and att in s.dtype.names) else [nullvalue(format)] * len(s) for s in seq]) for (att,format) in zip(names,formats)], names = names) D = D.dumps() return numpy.loads(D)