def extractRoi(image, winSize, stepSize): # hue boundaries colors = [(15, 30) # orange-yellow ] mask, weight_map, mask_scale = roiMask(image, colors) contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) yield weight_map, mask_scale for resized in pyramid(image, winSize): scale = image.shape[0] / resized.shape[0] for x, y, w, h in boundingRects(mask_scale, contours): x /= scale y /= scale w /= scale h /= scale center = (min(x + w / 2, resized.shape[1]), min(y + h / 2, resized.shape[0])) if w > winSize[0] or h > winSize[1]: for x, y, window in sliding_window( resized, (int(x), int(y), int(w), int(h)), stepSize, winSize): yield ((x, y, winSize[0], winSize[1]), scale, window) else: x = max(0, int(center[0] - winSize[0] / 2)) y = max(0, int(center[1] - winSize[1] / 2)) window = resized[y:y + winSize[1], x:x + winSize[0]] yield ((x, y, winSize[0], winSize[1]), scale, window)
def jolt_differences(ratings: List[int]) -> List[int]: differences = [] ratings.sort() for window in util.sliding_window(ratings, window_size = 2, skip_tail = True): differences.append(window[1] - window[0]) return differences
def main(): # ************************************ DADOS *************************************************** padroniza_imagem = 300 tamanho_da_entrada = (224, 224) arquivo = "./imagens/raposa.jpg" cor = (0, 255, 0) # Operacoes de preprocessamento e augumentacao composicao_de_transformacao = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) # ************************************* REDE ************************************************ modelo = ResNet(1000, True) modelo.eval() # Abre a imagem imagem_original = np.asarray(Image.open(arquivo)) imagem = imagem_original.copy() # Obtem as coordenadas da imagem (H, W) = imagem.shape[:2] r = padroniza_imagem / W dim_final = (padroniza_imagem, int(H * r)) imagem = cv2.resize(imagem, dim_final, interpolation=cv2.INTER_AREA) # Area da regiao de interesse ROI = (150, 150) #(H,W) # Lista de regioes de interesse (rois) e coods (coordenadas) rois = [] coods = [] # Execucao da funcao de piramede for nova_imagem in util.image_pyramid(imagem, escala=1.2): # Fator de escala entre a imagem original e a nova imagem gerada fator_escalar = W / float(nova_imagem.shape[1]) # Executa a operacao de deslizamento de janela for (x, y, regiao) in util.sliding_window(nova_imagem, size=ROI, stride=8): # Condicao de parada key = cv2.waitKey(1) & 0xFF if (key == ord("q")): break if regiao.shape[0] != ROI[0] or regiao.shape[1] != ROI[1]: continue # Obtem as coordenadas da ROI com relacao aa image x_r = int(x * fator_escalar) w_r = int(fator_escalar * ROI[1]) y_r = int(y * fator_escalar) h_r = int(fator_escalar * ROI[0]) # Obtem o ROI e realiza a transformacao necessaria para o treinamento roi = cv2.resize(regiao, tamanho_da_entrada) roi = np.asarray(roi) rois.append(roi) # Obtem as coordenadas (x1, y1, x2, y2) coods.append((x_r, y_r, x_r + w_r, y_r + h_r)) # Utiliza uma copia da imagem copia = nova_imagem.copy() # Imprime um retangulo na imagem de acordo com a posicao cv2.rectangle(copia, (x, y), (x + ROI[1], y + ROI[0]), cor, 2) # Mostra o resultado na janela cv2.imshow("Janela", copia[:, :, ::-1]) # Atraso no loop time.sleep(0.01) # Fechar todas as janelas abertas cv2.destroyAllWindows() #rois = np.array(rois, dtype="float32") # transform to torch tensor dataset = DataSet(rois, coods, composicao_de_transformacao) size = len(dataset) train_loader = torch.utils.data.DataLoader(dataset=dataset, shuffle=True, batch_size=size) print("Cópias: ", size) with torch.no_grad(): for _, (X, y) in enumerate(train_loader): # Classificacoes de todas as copias das imagens resultado = modelo.forward(X) # Obtem os melhores resultados por imagem confs, indices_dos_melhores_resultados = torch.max(resultado, 1) classe, _ = torch.mode(indices_dos_melhores_resultados.flatten(), -1) # Mascara mascara = [ True if item == classe else False for item in indices_dos_melhores_resultados ] # Selecao de boxes boxes = [] for i in range(size): if mascara[i] == True: boxes.append(coods[i]) # Realiza operacao de non_max_suppression boxes = util.non_max_suppression(np.asarray(boxes), overlapThresh=0.3) copia = imagem_original.copy() for (x1, y1, x2, y2) in boxes: cv2.rectangle(copia, (x1, y1), (x2, y2), cor, 2) cv2.imshow("Final", copia[:, :, ::-1]) cv2.waitKey(0) cv2.destroyAllWindows()
neg_db_sz = 0 neg_db = [0 for _ in range(1000)] for nid, img_name in enumerate(neg_file_list): img = Image.open(param.neg_dir + img_name) #check if gray if len(np.shape(img)) != param.input_channel: img = np.asarray(img) img = np.reshape(img, (np.shape(img)[0], np.shape(img)[1], 1)) img = np.concatenate((img, img, img), axis=2) img = Image.fromarray(img) #12-net #box: xmin, ymin, xmax, ymax, score, cropped_img, scale neg_box = util.sliding_window(img, param.thr_12, net_12, input_12_node) #12-calib neg_db_tmp = np.zeros((len(neg_box), param.img_size_12, param.img_size_12, param.input_channel), np.float32) for id_, box in enumerate(neg_box): neg_db_tmp[id_, :] = util.img2array(box[5], param.img_size_12) calib_result = net_12_calib.prediction.eval( feed_dict={input_12_node: neg_db_tmp}) neg_box = util.calib_box(neg_box, calib_result, img) #NMS for each scale scale_cur = 0 scale_box = [] suppressed = []
def read(self): self.train_data = [] self.test_data = [] # self test data is a list of tuples. each tuple corresponds to a single file # each tuple contains a list of numpy arrays and contains a numpy array containing sample by sample # predictions self.validation_data = [] self.class_list = [ (0, 'Other'), (406516, 'Open Door 1'), (406517, 'Open Door 2'), (404516, 'Close Door 1'), (404517, 'Close Door 2'), (406520, 'Open Fridge'), (404520, 'Close Fridge'), (406505, 'Open Dishwasher'), (404505, 'Close Dishwasher'), (406519, 'Open Drawer 1'), (404519, 'Close Drawer 1'), (406511, 'Open Drawer 2'), (404511, 'Close Drawer 2'), (406508, 'Open Drawer 3'), (404508, 'Close Drawer 3'), (408512, 'Clean Table'), (407521, 'Drink from Cup'), (405506, 'Toggle Switch') ] data_reader = DataReader("/home/vishvak/LargeScaleFeatureLearning/OpportunityUCIDataset/dataset") train_raw_data_frames = data_reader.data["training"] # populating training data sequences # iterating through training files for data_frame in train_raw_data_frames: train_labels = data_frame[:, -1] # removing the label column train_raw_data = data_frame[:, :-1] mask = train_labels != 0 train_labels = train_labels[mask] train_raw_data = train_raw_data[mask,:] # here we have removed the null data # we need to get the sequences corresponding to the other classes # and append them to our data list class_labels = np.unique(train_labels) if len(self.train_data) == 0: # initialize the list for _ in range(class_labels.shape[0]): self.train_data.append([]) indices_dict = self.get_indices(train_labels) for i,class_label in enumerate(class_labels): train_data_inner_list = [] # add numpy array corresponding to sequences to this list for index in indices_dict[class_label]: raw_data = train_raw_data[index[0]:index[1],:] # we need to convert this into feature vector sequences feats,_ = sliding_window(raw_data,train_labels[index[0]:index[1]],Constants.sliding_window_size, Constants.overlap, calculate_features) if np.all(feats != None): train_data_inner_list.append(feats) else: print('small window' + 'class label : {}'.format(class_label)) self.train_data[i] = self.train_data[i] + train_data_inner_list test_raw_data_frames = data_reader.data["test"] # populating testing data sequences # iterating through test files for data_frame in test_raw_data_frames: test_labels = data_frame[:, -1] # removing the label column test_raw_data = data_frame[:, :-1] # class_labels = np.unique(test_labels) # for i,class_label in enumerate(class_labels): # self.class_list[class_label] = i # if len(self.test_data) == 0: # # initialize the list # for _ in range(class_labels.shape[0]): # self.test_data.append([]) # add numpy array corresponding to sequences to this list test_inner_list = [] r = 0 while (r + Constants.testing_frame_size) < test_raw_data.shape[0]: raw_data = test_raw_data[r:r+Constants.testing_frame_size,:] raw_labels = test_labels[r:r+Constants.testing_frame_size] feats,_ = sliding_window(raw_data,raw_labels,Constants.testing_sliding_window_size, Constants.testing_overlap, calculate_features) test_inner_list.append(feats) r += Constants.testing_frame_slide_samples '''@TODO we are missing the last few samples. these generally woudnt matter but think about better ways of doing it ''' self.test_data.append((test_inner_list,test_labels)) train_data_file = open('trainFile','wb') test_data_file = open('testFile','wb') pickle.dump(self.train_data,train_data_file) pickle.dump(self.test_data,test_data_file) train_data_file.close() test_data_file.close()
def strategy_all_traces(traces): for size in range(1, 20): ngrams = [] for trace in traces: ngrams += util.sliding_window(''.join(trace), size) print collections.Counter(ngrams).most_common(3)
def trace_to_dict(t, n): t_ = ['['] + t + [']'] d = collections.defaultdict(lambda: 0) for i in util.sliding_window(t_, n): d[tuple(i)] += 1 return dict(d)