def inference(loader, path='data/Model.pth'): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = Net().to(device) model.load_state_dict(torch.load(path)) model.eval() confmatrix = np.zeros((5, 5)) correct = 0 predictions = [] for i in loader: if i[0].flag == 1 and i[1].flag == 1: if labels[i[0].truelab[0]] == 0: confmatrix[0][0] += 1 correct += 1 else: confmatrix[0][labels[i[0].truelab[0]]] += 1 predictions.append(0) else: data16 = i[0].to(device) data20 = i[1].to(device) dc = i[2].to(device) with torch.no_grad(): out = model(data16, data20, dc) yval = labels[data16.truelab[0]] pred = out.max(dim=1)[1].cpu().detach().numpy() correct += out.max(dim=1)[1].eq(data16.y).sum().item() predictions.append(pred) confmatrix[pred[0]][yval] += 1 return correct / len(loader), predictions, confmatrix
class Old_model_Veluator(): def __init__(self, prams_path): self.net = Net() self.net.load_state_dict( torch.load(prams_path, map_location=lambda x, loc: x)) self.net.eval() def eval(self, board): outputs = {} for m in board.legal_moves: board.push(m) input = torch.unsqueeze( torch.from_numpy(State(board).serialize()).float(), 0) outputs[m] = self.net(input).data.numpy()[0][0] board.pop() return outputs def play_one_step(self, board, verbose=False): qa = v.eval(board) # if verbose: # print(qa.values()) best_m = sorted(qa.items(), key=lambda x: x[1], reverse=board.turn)[0][0] san_m = board.san(best_m) board.push(best_m) if verbose: return board, san_m, qa.values() else: return board, san_m
class Veluator(): def __init__(self): self.net = Net() self.net.load_state_dict(torch.load('nets/values.pth',map_location = lambda x,loc:x)) self.net.eval() def eval(self,board): outputs = {} for m in board.legal_moves: board.push(m) input =torch.unsqueeze( torch.from_numpy(State(board).serialize()).float(),0) outputs[m] = self.net(input).data.numpy()[0][0] board.pop() return outputs
trained_model = "tipper_final.model" num_classes = 2 solenoid_pin = 23 # Pin #16 green_led_pin = 25 # Pin 22. red_led_pin = 8 # Pin 24. GPIO.setmode(GPIO.BCM) GPIO.setup(solenoid_pin, GPIO.OUT, initial=GPIO.LOW) GPIO.setup(green_led_pin, GPIO.OUT, initial=GPIO.LOW) GPIO.setup(red_led_pin, GPIO.OUT, initial=GPIO.LOW) # Load the saved model. checkpoint = torch.load(trained_model) model = Net(num_classes=num_classes) model.load_state_dict(checkpoint) model.eval() transformation = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) def predict_image_class(image): # Preprocess the image. image_tensor = transformation(image).float() # Add an extra batch dimension since pytorch treats all images as batches. image_tensor = image_tensor.unsqueeze_(0) image_tensor.cuda()
num_classes_gestures = 2 objs = [] objs.append("Google") objs.append("Lamp") objs.append("Nothing") gestures = [] gestures.append("Wave") gestures.append("Nothing") # Load the saved models. checkpoint_objects = torch.load(trained_model_objects) model_objects = Net(num_classes=num_classes_objects) model_objects.load_state_dict(checkpoint_objects) model_objects.eval() checkpoint_gestures = torch.load(trained_model_gestures) model_gestures = Net(num_classes=num_classes_gestures) model_gestures.load_state_dict(checkpoint_gestures) model_gestures.eval() transformation = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) def predict_image_class(image, classifier_type): # Preprocess the image. image_tensor = transformation(image).float()
class Generator: def __init__(self): # def __init__(self, is_random=True, category=None, ingredients=None, init_char=None): # torchtext 필드 지정 self.product_field = Utils.load_field('product') self.category_field = Utils.load_field('category') self.ingredients_field = Utils.load_field('ingredients') # 모델 초기화 self.model_initialize() # optimizer 세팅 self.optimizer = optim.Adam(self.Model.parameters(), lr=1e-3, weight_decay=1e-3) # best_model 경로 self.best_model_path = os.path.dirname(os.path.abspath(__file__)) + "/model" + "/best.pth.tar" # "model/e50.pth.tar" #"model/best.pth.tar" # if self.is_random is not True: # self.category = category self.init_char_list = Utils.load_pickle('init_char_list') self.fine_char_list = self.get_fine_char_list() self.wired_char_list = self.get_wired_char_list() def get_fine_char_list(self): """ [ (아,24134), (이,14134), (맨,12312) ] """ len_list = len(self.init_char_list) top_ = int(len_list * 0.3) fine_char_list = list() for n, char in enumerate(self.init_char_list): fine_char_list.append(char[0]) if n == top_: break return fine_char_list def get_wired_char_list(self): #reversed_ = self.init len_list = len(self.init_char_list) top_ = int(len_list * 0.3) wired_char_list = list() for n in range(top_): wired_char_list.append(self.init_char_list[n * (-1)-1]) return wired_char_list def set_all_inputs(self, category, ingredients_list, init_char): self.category = category self.init_char = init_char self.ingredients_list = ingredients_list self.generated_name = [] self.generated_name.append(self.init_char) def initialize_all_inputs(self): # 카테고리 self.category = None # 최초 시작 문자열 self.init_char = None # 성분리스트 self.ingredients_list = None # 생성된 문자열 self.generated_name = [] def initialize_category_ingredients(self): # 카테고리 self.category = None # 성분리스트 self.ingredients_list = None def randomize_all_inputs(self): # 카테고리 self.category = self.get_random_category() # 최초 시작 문자열 self.init_char = self.get_random_char() # 성분리스트 self.ingredients_list = self.get_random_igd() self.ingredients_list.append('정제수') # 생성된 문자열 self.generated_name = [self.init_char] def randomize_partial_inputs(self, category=None, ingredients_list=None, init_char=None, mode='fine'): if category is not None: self.category = category else: self.category = self.get_random_category() if ingredients_list is not None: self.ingredients_list = ingredients_list self.ingredients_list.append('정제수') else: self.ingredients_list = self.get_random_igd() self.ingredients_list.append('정제수') if init_char is not None: self.init_char = init_char else: if mode == 'fine': #self.init_char = self.get_random_char() self.init_char = self.get_fine_char() elif mode == 'wired': self.init_char = self.get_wired_char() # 생성된 문자열 self.generated_name = [self.init_char] # print("CATEGORY : {}".format(self.category)) # print("INGREDIENTS : {}".format(self.ingredients_list)) # print("INIT_CHAR : {}".format(self.init_char)) def model_initialize(self): self.Model = Net(vocab_size=len(self.product_field.vocab.stoi), embedding_dim=256, nb_category=len(self.category_field.vocab.stoi)-1, nb_ingredients=len(self.ingredients_field.vocab.stoi)-1, lstm_nb_layers=3, lstm_hidden_dim=256, fc_out=128, dropout_p=0.5, ).to(DEVICE) print("Model Initialized") def load_best_model(self): self.Model = Utils.load_checkpoint(self.best_model_path, self.Model, self.optimizer) print("LOAD BEST MODEL") def get_random_category(self): box = list(self.category_field.vocab.stoi.keys()) selected = random.choice(box) return selected # 랜덤 성분 선택 def get_random_igd(self): box = list(self.ingredients_field.vocab.stoi.keys()) selected = random.sample(box, random.randrange(20,50)) return selected # 랜덤 시작 문자열 선택 def get_random_char(self): box = list(self.product_field.vocab.stoi.keys()) selected = random.choice(box) return selected def get_fine_char(self): selected = random.choice(self.fine_char_list) return selected def get_wired_char(self): selected = random.choice(self.wired_char_list) return selected def get_random_category(self): box = list(self.category_field.vocab.stoi.keys()) selected = random.choice(box) return selected def generate_next_char(self, string): """ 카테고리, 성분들을 받아 다음글자들의 확률을 리턴 ingredients_list = ['정제수','쌀추출물'] category = '스킨케어' """ #print(self.category) category_tensor = self.category_field.process([self.category]).float().to(DEVICE) # print("갸 : ",self.ingredients_list) ingredients_tensor = self.ingredients_field.process([self.ingredients_list]).float().to(DEVICE) #self.init_char = self.init_char.lower() # 한글자 잘라줘야 함 self.product_field.fix_length = None first_char_tensor = self.product_field.process([string.upper()])[:, :-1].to(DEVICE) bsz, first_char_tensor_length = first_char_tensor.size() # 인풋을 모델에 넣어 출력합니다. #print(self.Model) self.Model.eval() with torch.no_grad(): # batch_size = 1 hidden = self.Model.init_hidden(1) for step in range(first_char_tensor_length): with torch.no_grad(): outputs, hidden = self.Model(category_tensor, ingredients_tensor, first_char_tensor[:, step], hidden) # print("step : ", step) # print("outputs : ", outputs.size()) probabilities = F.softmax(outputs, 1) return probabilities.squeeze() def generate_name(self): #chr_list = [self.init_char] #print("initial_prime: ", self.generated_name) while self.generated_name[-1] != "<eos>": #print("생성된 문자열 : {}".format(self.generated_name)) current_str = "".join(self.generated_name) #print("INPUT_CHAR : ", current_str) probabilities = self.generate_next_char(current_str) max_idx = probabilities.argmax().item() next_chr = self.product_field.vocab.itos[max_idx].replace("<pad>"," ") #print("OUTPUT_CHAR : ", next_chr) self.generated_name.append(next_chr) #print("생성된 문자열 : ", self.generated_name) #print(self.generated_name) generated_name = "".join(self.generated_name[:-1]) #print("generated_name: ", generated_name) return generated_name def clean_beam_basket(self, basket, beam_width): """ 가장 확률이 높은 beam_width개만 남기고 바스켓을 비운다. """ _tmp_basket = basket.copy() to_remove = sorted(_tmp_basket.items(), key=lambda x: x[1], reverse=True)[beam_width:] for item in to_remove: _tmp_basket.pop(item[0]) return _tmp_basket def beam_search(self, beam_width, init_char, category, ingredients, alpha=0.7): beam_basket = OrderedDict() beam_basket[init_char] = 0.0 counter = 0 while True: counter += 1 # 바스켓을 청소합니다. beam_basket = self.clean_beam_basket(beam_basket, beam_width) # 만약 바스켓에 모든 아이템이 <eos>가 있으면 루프를 멈춘다. eos_cnt = 0 print("eos_cnt : ", eos_cnt) for k in beam_basket.keys(): if "<eos>" in k: eos_cnt += 1 if eos_cnt == beam_width: # print("all items have <eos>") break # 모든 key를 돌면서 ## <eos>가 없는 경우 inference한다. new_entries = {} to_remove = [] for k in beam_basket.keys(): if "<eos>" not in k: probabilities = self.generate_next_char(init_char) for ix, prob in enumerate(probabilities): new_k = k + self.product_field.vocab.itos[ix] added_probability = beam_basket[k] + torch.log(prob).item() len_k = len(k.replace("<eos>", "")) normalized_probability = (1 / (len(k) ** alpha)) * added_probability new_entries[new_k] = normalized_probability to_remove.append(k) # 그리고 기존 key를 beam_basket에서 지운다. for k in to_remove: beam_basket.pop(k) # 새로운 키를 바스켓에 채워넣는다. for k, v in new_entries.items(): beam_basket[k] = v final_list = [] print(final_list) for k, v in beam_basket.items(): refined_k = k.replace("<eos>", "").capitalize() final_list.append(refined_k) final_prob = np.exp(v) return final_list
def main(): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # Normalize(平均, 偏差) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # テストデータデータをランダムに取得 dataiter = iter(testloader) images, labels = dataiter.next() last_saved_model = train(trainloader, SAVE_DIR) last_saved_model = SAVE_DIR / "epochs_2_iter_12000.pth" model = Net() model.load_state_dict(torch.load(last_saved_model)) model.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( f'Accuracy of the network on the 10000 test images: {100 * correct / total} %' ) class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data outputs = model(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(10): print( f'Accuracy of {classes[i]:.5s} : {100 * class_correct[i] / class_total[i]:.2f} %' )
ppv_vec = np.zeros(hp.bootstrap_samples) npv_vec = np.zeros(hp.bootstrap_samples) for sample in range(hp.bootstrap_samples): print('Bootstrap sample {}'.format(sample)) # Test data sample_patients = patients.sample(n=num_patients, replace=True) idx = np.squeeze(row_ids.loc[sample_patients].values) testloader, _, _ = get_trainloader(data, 'TEST', shuffle=False, idx=idx) # evaluate on test data net.eval() label_pred = torch.Tensor([]) label_test = torch.Tensor([]) with torch.no_grad(): for i, (stat, dp, cp, dp_t, cp_t, label_batch) in enumerate(tqdm(testloader), 0): # move to GPU if available stat = stat.to(device) dp = dp.to(device) cp = cp.to(device) dp_t = dp_t.to(device) cp_t = cp_t.to(device) label_pred_batch, _ = net(stat, dp, cp, dp_t, cp_t) label_pred = torch.cat((label_pred, label_pred_batch.cpu())) label_test = torch.cat((label_test, label_batch))