def main(): # Test GSL print "my_sf_bessel_J0(5.0)", demo.my_sf_bessel_J0(5.0) # print demo.test()
def segmentation(path, sliced_path): test_set = DataSet(4, 0.5, path) test_loader = DataLoader(dataset=test_set, batch_size=1, num_workers=1, shuffle=False) model = torch.load( '/home/sxchongya/unet_pytorch/output/model-625-1.pth').to(device) test(model, test_loader, sliced_path)
def segmentation(path, sliced_path): test_set = DataSet(4, 0.5, path) test_loader = DataLoader(dataset=test_set, batch_size=1, num_workers=1, shuffle=False) model = UNet(1, [32, 48, 64, 96, 128], 4, net_mode='3d', conv_block=ResBlock).to(device) model.load_state_dict( torch.load('/home/sxchongya/unet_pytorch/output/model-628-3.pth')) #model.load_state_dict(torch.load('/home/sxchongya/unet_pytorch/output/model-625-1.pth')) #model = torch.load('/home/sxchongya/test_002/demo/static/model-625-1.pth').to(device) print('----------------------------') test(model, test_loader, sliced_path)
def NER(words): entity_name, entity_type, entity_loaction = [], [], [] entities_name, entities_type, entities_location = [], [], [] result = evaluate_line(words) for i in range(len(result['entities'])): entity_name = result['entities'][i]['word'] entity_start_loction = result['entities'][i]['start'] entity_end_loction = result['entities'][i]['end'] entity_type = result['entities'][i]['type'] entities_name.append(entity_name) entities_type.append(entity_type) entities_location.append({entity_start_loction, entity_end_loction}) a = '' t = 0 if result['entities'][i]['type'] == 'VER': a = result['entities'][i]['word'] t, eachline = VER(a) if t == 1: result['entities'][i]['simlity'] = ['Find'] if t == 2: result['entities'][i]['simlity'] = [eachline] if t == 0: result['entities'][i]['simlity'] = ['Lost'] # print('VER entity:') # print(ver) else: simility = [] a = result['entities'][i]['word'] # print('Normal entity:') # print(ner) t = lookfordict(a) if t == 1: result['entities'][i]['simlity'] = ['Find'] if t == 2: # print('Looking up Pinyin....'+a) word = a pyy, t = test(word) if t == 3: # print('Lost') result['entities'][i]['simlity'] = ['Lost'] elif t == 4: simility.append(pyy) result['entities'][i]['simlity'] = [simility] return entities_name, entities_type, entities_location
##for loops colors = ["red", "orange", "yellow", "green", "blue", "purple"] for i in range(0, len(colors)): print("my favorite color is %s" % (colors[i])) ##functions def distance(x1, y1, x2, y2): return (x2 - x1)**2 + (y2 - y1)**2 print(distance(1, 3, 6, 8)) ##modules import demo print(demo.test()) print(demo.test2()) ####################################### ###make a function called area that takes arguments x and shape #where x is either the base of an equilateral triangle, #diameter of a circle or side of a square #and shape is either 'triangle', 'square' or 'circle' #the function should return the area of this shape #use a for loop with your function to find the area #of a triangle, square and circle where x = 5 #also try to find the area of a rectangle with this loop ##make a function called sum_of_squares that takes two lists of equal length
from demo import test from demo.disp import display test() display()
def get_person_pose_(obs, paths, path_to_images): count = 0 output = './PoseEstimation/AlphaPose/AlphaPose-pytorch/examples/demo/' #17 2D human joints with confidence score #feature_size = 17*3 #10 angles feature_size = 10 #extract people from data for i in range(len(obs)): for person in range(obs[i].shape[0]): for frame in range(obs[i].shape[1]): count += 1 #image = cv2.imread(path_to_images + os.path.splitext(os.path.basename(paths[i]))[0] + "/" + str( #int(obs[i][person][frame][1])) + ".png") print(path_to_images + os.path.splitext(os.path.basename(paths[i]))[0] + "/" + str(int(obs[i][person][frame][1])) + ".png") # #height_, width_, _ = image.shape # #x1 = int(obs[i][person][frame][2] * width_) #y1 = int(obs[i][person][frame][3] * height_) #x2 = int(obs[i][person][frame][2] * width_) + int(obs[i][person][frame][4] * width_) #y2 = int(obs[i][person][frame][3] * height_) + int(obs[i][person][frame][5] * height_) # #cropped_person = image[y1:y2, x1:x2] #cropped_person = cv2.resize(cropped_person, (64, 128)) outfile = output + '%s.jpg' % (str(count)) #cv2.imwrite(outfile, cropped_person) #extract poses for each person keypoints = demo.test() final_pose = np.zeros((count, feature_size)) #print(range(len(keypoints))) for i in range(len(keypoints)): img_name = keypoints[i].get('imgname') index = int(os.path.splitext(img_name)[0]) if len(keypoints[i].get('result')) > 0: angles = [] pose = keypoints[i].get('result')[0].get('keypoints') #image = cv2.imread(output+img_name) #height_, width_, _ = image.shape #pose = pose.numpy() #normalize pose #pose[:,0] = pose[:, 0] / width_ #pose[:,1] = pose[:, 1] / height_ #conf = keypoints[i].get('result')[0].get('kp_score') #conf = conf.numpy() #conf = conf.flatten() #pose = pose.flatten() #pose = np.concatenate((pose, conf)) #final_pose[index-1] = pose # {0, "Nose"}, # {1, "LEye"}, # {2, "REye"}, # {3, "LEar"}, # {4, "REar"}, # {5, "LShoulder"}, # {6, "RShoulder"}, # {7, "LElbow"}, # {8, "RElbow"}, # {9, "LWrist"}, # {10, "RWrist"}, # {11, "LHip"}, # {12, "RHip"}, # {13, "LKnee"}, # {14, "Rknee"}, # {15, "LAnkle"}, # {16, "RAnkle"} #calculate angles between body parts angle_between_nodes = [(5, 7), (6, 8), (7, 9), (8, 10), (11, 13), (13, 15), (12, 14), (14, 16)] for pair in angle_between_nodes: node1_x = pose[pair[0], 0] node1_y = pose[pair[0], 1] node2_x = pose[pair[1], 0] node2_y = pose[pair[1], 1] vector = (node2_x - node1_x, node2_y - node1_y) dot_product = np.dot(vector, (1, 0)) norm = np.linalg.norm(vector) angle = np.arccos(dot_product / norm) if np.isnan(angle): angle = 0 angles.append(angle) angle_between_limbs = [((11, 15), (12, 16)), ((5, 9), (6, 10))] for limb_pair in angle_between_limbs: node1_x = pose[limb_pair[0][0], 0] node1_y = pose[limb_pair[0][0], 1] node2_x = pose[limb_pair[0][1], 0] node2_y = pose[limb_pair[0][1], 1] vector1 = (node2_x - node1_x, node2_y - node1_y) node3_x = pose[limb_pair[1][0], 0] node3_y = pose[limb_pair[1][0], 1] node4_x = pose[limb_pair[1][1], 0] node4_y = pose[limb_pair[1][1], 1] vector2 = (node4_x - node3_x, node4_y - node3_y) dot_product = np.dot(vector1, vector2) norm1 = np.linalg.norm(vector1) norm2 = np.linalg.norm(vector2) angle = np.arccos(dot_product / norm1 / norm2) if np.isnan(angle): angle = 0 angles.append(angle) final_pose[index - 1] = angles final_pose = np.reshape( final_pose, [int(count / obs[0].shape[1]), obs[0].shape[1], feature_size]) return final_pose
def mousePressEvent(self, ev): if QT5: pos = self.transformPos(ev.pos()) else: pos = self.transformPos(ev.posF()) if ev.button() == QtCore.Qt.LeftButton: if self.drawing(): if self.current: # Add point to existing shape. if self.createMode == 'polygon': self.current.addPoint(self.line[1]) self.line[0] = self.current[-1] if self.current.isClosed(): self.finalise() elif self.createMode in ['rectangle', 'line']: assert len(self.current.points) == 1 self.current.points = self.line.points self.finalise() elif self.createMode == 'auto': assert len(self.current.points) == 1 self.current.points = self.line.points roi = self.line.auto_segment() img = io.imread(self.filename) row = np.arange(roi[1], roi[3]) col = np.arange(roi[0], roi[2]) roi_image = img[row] roi_image = roi_image[:, col] io.imsave('/devdata/Label2/labelme/output.png', roi_image) # demo.main() det = demo.test() j_det = 0 for i in row: self.pre_mask[i, col] = det[j_det, :] j_det += 1 np.save('/devdata/Label2/labelme/mask.npy', self.pre_mask) assert self.current self.current.close() self.shapes.append(self.current) self.storeShapes() self.current = None self.setHiding(False) self.newautoShape.emit() self.update() elif not self.outOfPixmap(pos): # Create new shape. self.current = Shape() self.current.addPoint(pos) if self.createMode == 'point': self.finalise() else: self.line.points = [pos, pos] self.setHiding() self.drawingPolygon.emit(True) self.update() else: self.selectShapePoint(pos) self.prevPoint = pos self.repaint() elif ev.button() == QtCore.Qt.RightButton and self.editing(): self.selectShapePoint(pos) self.prevPoint = pos self.repaint()
d_lstm = data #reshape for cnn data = data.reshape((data.shape[0], data.shape[1], 64, 64, 1)) if MODEL_FILE == '': print('insert model..') exit() print('Data Shape...', data.shape) ##predict data... import demo # model=demo.stacked_lstm_ae(8,4096,'relu',32,'sgd',0.2) # model = demo.mode_cnnlstm3() ## cnn_lstm_dense # model = demo.model_cnnlstm(256) ##cnn_lstm ##exw valei k encoding model = demo.test() ##cnn_lstm ##exw valei k encoding print(model.summary()) model.load_weights(MODEL_FILE) from tensorflow.python.keras.models import Model model = Model(inputs=model.inputs, outputs=model.get_layer("bottleneck").output) data = model.predict(data) print('data shape...', data.shape) # data=data.reshape(data.shape[0],data.shape[1]*data.shape[2]) # Reshape data = data.reshape(data.shape[1], data.shape[0]) ds = Dataset_transformations(data.T, 1000, data.shape) if os.path.exists(PREFIX + CONFIG_NAME + '.zip'): clust_obj = dataset_utils.load_single(PREFIX + CONFIG_NAME + '.zip') else:
def test_hello(): ret = demo.test() assert(ret == 42)
def dete1(request): import demo files = File.objects.all().order_by('create_time').last() a = files.file_name demo.test(a) return HttpResponseRedirect('/result')
import wasmtime import demo demo.test()