def loop_func(netType, root_path): # A_dict = {} # w_dict = {} # b_dict = {} # A_dict['J1'] = [] # A_dict['J2'] = [] # w_dict['J1'] = [] # w_dict['J2'] = [] # b_dict['J1'] = [] # b_dict['J2'] = [] # A_dict['J1'].append(0.2); w_dict['J1'].append(20); b_dict['J1'].append(0.2) # A_dict['J1'].append(0.3);w_dict['J1'].append(10);b_dict['J1'].append(0.1) # A_dict['J1'].append(0.5); w_dict['J1'].append(6); b_dict['J1'].append(0.3) # A_dict['J1'].append(0.6);w_dict['J1'].append(2);b_dict['J1'].append(0.4) # A_dict['J1'].append(1); w_dict['J1'].append(1); b_dict['J1'].append(0.6) # # A_dict['J2'].append(0.2); w_dict['J2'].append(20); b_dict['J2'].append(0.3) # A_dict['J2'].append(0.3);w_dict['J2'].append(10);b_dict['J2'].append(0.2) # A_dict['J2'].append(0.5); w_dict['J2'].append(5); b_dict['J2'].append(0.1) # A_dict['J2'].append(0.6);w_dict['J2'].append(2);b_dict['J2'].append(0.3) # A_dict['J2'].append(1); w_dict['J2'].append(1); b_dict['J2'].append(0.1) save_path = path.join(root_path, netType) load_path = save_path a = np.load(path.join(load_path,'trainTrajectory.npz'), allow_pickle=True) A_list_list = a['arr_0'].tolist() w_list_list = a['arr_1'].tolist() b_list_list = a['arr_2'].tolist() device = torch.device('cpu') model = get_model(netType, device) model, input_scaler, output_scaler = load_model(path.join(load_path,'model'), netType, model) dynamic_controller = Dynamic_Controller(model, input_scaler, output_scaler) pd_controller = PD_Controller() pd_dynamic_controller = PD_Dynamic_Controller(pd_controller, dynamic_controller) traj = ValinaCosTraj(A_list_list=A_list_list, w_list_list=w_list_list, b_list_list=b_list_list) q_dict, qdot_dict, qddot_dict, a_dict, _ = runTrajectory(pd_dynamic_controller, traj, sampleNum = 2000, savePath=save_path,saveFig=True, sim_hz=100, isShowPlot=True,isRender=False,saveName='testTrajectory',isReturnAllForce=True, isPlotPredictVel=True) print("finish test script!")
def train(model, train_loader, valid_loader, loss_fn, optimizer, early_stopping, save_path, max_training_epoch, goal_loss, is_plot=True): avg_train_losses = [ ] # to track the average training loss per epoch as the data trains avg_valid_losses = [ ] # to track the average validation loss per epoch as the data trains for t in range(max_training_epoch): train_losses = [] valid_losses = [] start_time = time.time() for feature, target in train_loader: target_hat = model(feature) loss = loss_fn(target_hat, target) optimizer.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients train_losses.append(loss.item()) for feature, target in valid_loader: # forward pass: compute predicted outputs by passing inputs to the data target_hat = model(feature) loss = loss_fn(target_hat, target) valid_losses.append(loss.item()) train_loss = np.average(train_losses) valid_loss = np.average(valid_losses) avg_train_losses.append(train_loss) avg_valid_losses.append(valid_loss) ellapse_time = time.time() - start_time print('Epoch', t, ': Train Loss is ', train_loss, 'Validate Loss is', valid_loss, ", One Interation take ", int(ellapse_time), 'seconds') if valid_loss <= goal_loss: print("Reach goal loss, valid_loss=", valid_loss, '< goal loss=', goal_loss) break early_stopping(valid_loss, model) if early_stopping.early_stop: print("Early stopping at Epoch") # update the data with checkpoint break model, _, _ = load_model('.', 'checkpoint', model) remove('checkpoint.pt') ### plot the train loss and validate loss curves if is_plot: fig = plt.figure(figsize=(10, 8)) plt.plot(range(1, len(avg_train_losses) + 1), avg_train_losses, label='Training Loss') plt.plot(range(1, len(avg_valid_losses) + 1), avg_valid_losses, label='Validation Loss') # find position of lowest validation loss minposs = avg_valid_losses.index(min(avg_valid_losses)) + 1 plt.axvline(minposs, linestyle='--', color='r', label='Early Stopping Checkpoint') plt.xlabel('epochs') plt.ylabel('loss') plt.ylim(0, max(max(avg_valid_losses), max(avg_valid_losses))) # consistent scale plt.xlim(0, len(avg_train_losses) + 1) # consistent scale plt.grid(True) plt.legend() plt.tight_layout() plt.show() fig.savefig(path.join(save_path, 'trainLoss.png'))
avg_valid_losses.append(valid_loss) ellapse_time = time.time() - start_time print('Epoch', t, ': Train Loss is ', train_loss, 'Validate Loss is', valid_loss, ", One Interation take ", ellapse_time) if valid_loss <= goal_loss: print("Reach goal loss, valid_loss=", valid_loss, '< goal loss=', goal_loss) break early_stopping(valid_loss, model) if early_stopping.early_stop: print("Early stopping at Epoch") # update the data with checkpoint break model, _, _ = load_model('.', 'checkpoint', model) remove('checkpoint.pt') save_model(file_path, file_name, model, input_scaler=dataset.input_scaler, output_scaler=dataset.output_scaler) ### plot the train loss and validate loss curves if is_plot: fig = plt.figure(figsize=(10, 8)) plt.plot(range(1, len(avg_train_losses) + 1), avg_train_losses, label='Training Loss') plt.plot(range(1,
import torch from Net import * from loadModel import get_model, load_model import time MTM_ARM = 'MTMR' pub_topic = '/dvrk/' + MTM_ARM + '/set_effort_joint' sub_topic = '/dvrk/' + MTM_ARM + '/state_joint_current' use_net = 'SinNet' D = 5 device = 'cpu' model = get_model('MTM', use_net, D, device=device) model, input_scaler, output_scaler = load_model('.','test_Controller',model) model = model.to('cpu') pub = rospy.Publisher(pub_topic, JointState, queue_size=15) rospy.init_node(MTM_ARM + 'controller', anonymous=True) rate = rospy.Rate(10) # 10hz mtm_arm = dvrk.mtm(MTM_ARM) count = 0 def callback(data): global use_net global D global input_scaler global output_scaler
import numpy as np from PIL import ImageTk, Image import librosa import os #imports para la gui import tkinter as tk from tkinter import ttk from tkinter import messagebox from tkinter.filedialog import askopenfilename window = tk.Tk() from loadModel import load_model #Cargar el modelo model = load_model() #Propiedades de la Ventana window.title("MusiFile") window.geometry('400x400') window.iconbitmap("./clef.ico") window.maxsize(width=400, height=400) frame1 = tk.Frame(window, bg='#67B93E') frame1.pack(fill='both', expand='yes') #-------------------------# # #lbl1 = tk.Label(frame1, image=photo) #lbl1.photo = photo #lbl1.pack()
from loadModel import load_model from imagenet_utils import preprocess_input from keras.models import Model from keras.preprocessing import image import numpy as np vehicle_classifier = load_model() def test_type(img_path): image = img_preprocess(img_path) pred = vehicle_classifier.predict(image) return decode_predictions(pred) def img_preprocess(img_path): img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) return preprocess_input(x) def decode_predictions(pred): max_val = np.argmax(pred) if max_val == 0: return "Bike" else: return "Car"
def multiTask_train(modelList, train_loaderList, valid_loaderList, optimizer, loss_fn, early_stopping, max_training_epoch, is_plot=True): train_losses = [] valid_losses = [] # to track the validation loss as the data trains avg_train_losses = [] # to track the average training loss per epoch as the data trains avg_valid_losses = [] # to track the average validation loss per epoch as the data trains task_num = len(modelList) for t in range(max_training_epoch): train_losses = [] valid_losses = [] loss = [] for i in range(task_num): for feature, target in train_loaderList[i]: model = modelList[i] target_hat = model(feature) loss = loss_fn(target_hat, target) optimizer.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients train_losses.append(loss.item()) for i in range(task_num): for feature, target in valid_loaderList[i]: model = modelList[i] target_hat = model(feature) loss = loss_fn(target_hat, target) valid_losses.append(loss.item()) train_loss = np.average(train_losses) valid_loss = np.average(valid_losses) avg_train_losses.append(train_loss) avg_valid_losses.append(valid_loss) print('Epoch', t, ': Train Loss is ', train_loss, 'Validate Loss is', valid_loss) early_stopping(valid_loss, modelList) if early_stopping.early_stop: print("Early stopping at Epoch") break modelList, _, _ = load_model('.', 'checkpoint', modelList) checkpoint = torch.load('checkpoint.pt') remove('checkpoint.pt') # plot if is_plot: fig = plt.figure(figsize=(10, 8)) plt.plot(range(1, len(avg_train_losses) + 1), avg_train_losses, label='Training Loss') plt.plot(range(1, len(avg_valid_losses) + 1), avg_valid_losses, label='Validation Loss') # find position of lowest validation loss minposs = avg_valid_losses.index(min(avg_valid_losses)) + 1 plt.axvline(minposs, linestyle='--', color='r', label='Early Stopping Checkpoint') plt.xlabel('epochs') plt.ylabel('loss') plt.ylim(0, max(max(avg_valid_losses), max(avg_valid_losses))) # consistent scale plt.xlim(0, len(avg_train_losses) + 1) # consistent scale plt.grid(True) plt.legend() plt.tight_layout() plt.show() # fig.savefig(pjoin('data','LogNet',model_file_name+'.png'), bbox_inches='tight') return modelList