def main(): hparams = HParams() model_train.train(hparams) string = '待翻译文本' translation = model_infer.translate(hparams, string) print(translation)
def model_train(): # time.sleep(30) train() print("Model trained successfully!") send_status() send_model() # get_agg_model() return "Model trained successfully!"
def model_train(): print("Received request") # time.sleep(10) train() print("Model trained successfully!") send_status() send_model() # get_agg_model() return "Model trained successfully!"
def train(self, train_covariates, train_response, learing_rate=0.03, iterations_number=1000, skip_side_values=False, interations_b4_skip=5000, mistake_to_skip=5, double_learn=False, additional_rand_learn=False): model_train.train(self, train_covariates, train_response, learing_rate, iterations_number, skip_side_values, interations_b4_skip, mistake_to_skip, double_learn, additional_rand_learn)
def model_train(): if os.path.isdir(cwd + '/static') == False: os.mkdir(cwd + '/static') y, z = train() accuracy = y["accuracy"] loss = y["loss"] val_accuracy = y["val_accuracy"] val_loss = y["val_loss"] N = len(loss) plt.style.use("ggplot") plt.figure() plt.plot(np.arange(0, N), loss, label="train_loss") plt.plot(np.arange(0, N), accuracy, label="train_acc") plt.plot(np.arange(0, N), val_loss, label="val_loss") plt.plot(np.arange(0, N), val_accuracy, label="val_acc") plt.title("Training Loss and Accuracy for Federated Client 1") plt.xlabel("Epochs") plt.ylabel("Loss/Accuracy") plt.legend(loc="center right") plt.savefig(cwd + "/static/plot1.jpg") image = [i for i in os.listdir('static') if i.endswith('.jpg')][0] return render_template('train.html', epoch=len(loss), loss=loss, accuracy=accuracy, val_loss=val_loss, val_accuracy=val_accuracy, name=z, user_image=image)
help='Print debug info') subparsers = parser.add_subparsers(dest='command') training = subparsers.add_parser( 'train', help="Trains the model, '-nc' skip cleaning & '-ni' skip IDF") training.add_argument('-nc', action='store_true', help="Skips the Data Cleaning Process") training.add_argument('-d', type=str, help="Enter the name of the dataset") training.add_argument('-t', type=int, help="Enter the rows to be truncated") prediction = subparsers.add_parser( 'predict', help="Predicts the output, 'main.py -P number_of_tags url'") prediction.add_argument('n', type=int) prediction.add_argument('url', type=str) args = parser.parse_args() if args.command == 'train': if args.t and args.d is not None: train(args.nc, args.d, args.t) elif args.t is None and args.d is None: train(args.nc, 'articles.csv', -1) elif args.t is None: train(args.nc, args.d, -1) elif args.d is None: train(args.nc, 'articles.csv', args.t) elif args.command == 'predict': predict(args.n, args.url)
default=64) parser.add_argument('--gen_search_num', type=int, help='gen_search_num', default=512) parser.add_argument('--gen_mean', type=int, help='噪声均值', default=0) parser.add_argument('--gen_std', type=int, help='噪声方差', default=1) input_args = parser.parse_args() print(input_args) return input_args if __name__ == '__main__': args = parse_argvs() # train_path = args.train_path # test_path = args.test_path # output_model_path = args.output_model_path # num_classes = args.classes_num # batch_size = args.batch_size # img_size = args.img_size # lr = args.lr # model = models.resnet18(num_classes=num_classes) # model = models.squeezenet1_1(num_classes=num_classes) model_train = model_train.ModuleTrain(opt=args) model_train.train() # model_train.test(show_img=True)
from PIL import ImageTk, Image from embeddings_extraction import embeddings from recognize_face import recognition from dataset_collection import dataset from model_train import train root = Tk() root.title("CRIMINAL DETECTION") root.geometry("1000x700") root.configure(bg='black') new_face_button = Button( root, text="NEW CRIMINAL FACE RECOGNITION", command=lambda: [dataset( ), embeddings(), train(), recognition()], activeforeground="#ffffff", activebackground="#168900", padx=10, pady=20, bg="#39FF14", highlightcolor="#ADFF9E", font="Impact", width=30, height=1) new_face_button.pack(side=TOP) recognizer_button = Button(root, text="EXISTING CRIMINAL FACE RECOGNITION", command=recognition, activeforeground="#ffffff",
def model_train(): train() return "Model trained successfully!"
model_file = os.path.join( output_model_path, 'cc_inception_v3_' + str(args.old_classes_num) + '.pkl') model = models.inception_v3(num_classes=old_classes_num, aux_logits=False) else: model_file = os.path.join( output_model_path, 'cc_resnet18_' + str(args.old_classes_num) + '_best.pkl') new_model_file = os.path.join( output_model_path, 'cc_resnet18_' + str(args.new_classes_num) + '.pkl') # model = models.resnet18(num_classes=old_classes_num) model = resnet_torch.resnet18(num_classes=old_classes_num) transfer_learning = True model_train = model_train.ModuleTrain(train_path=train_path, test_path=test_path, model_file=model_file, model=model, batch_size=batch_size, img_size=img_size, lr=lr, optimizer=args.optimizer, re_train=args.re_train, new_model_file=new_model_file, transfer_learning=transfer_learning, new_classes_num=args.new_classes_num) model_train.train(args.epoch, args.decay_epoch)
train_data, valid_data, train_labels, valid_labels = train_test_split( train_valid_data, train_valid_labels, test_size=0.2) trainSignData = Dataset(train_data, train_labels) trainDataLoader = torch.utils.data.DataLoader(trainSignData, shuffle=True, batch_size=batch_size) testSignData = Dataset(test_data, test_labels) testDataLoader = torch.utils.data.DataLoader(testSignData, shuffle=True, batch_size=batch_size) validSignData = Dataset(valid_data, valid_labels) validDataLoader = torch.utils.data.DataLoader(validSignData, shuffle=True, batch_size=batch_size) net = Net() if torch.cuda.is_available(): print('CUDA is available! Training on GPU ...\n') net.cuda() criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(), lr=lr) for epoch in range(epochs): train(epoch, net, trainDataLoader, optimizer, criterion, validDataLoader) test(net, testDataLoader)
import torch import numpy as np from torchvision import datasets import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt # %matplotlib inline # Prep the datasets from prepare_datasets import prepare_datasets train_loader, test_loader, batch_size = prepare_datasets() # build the network architecture from model_arch import ConvAutoencoder model = ConvAutoencoder() # train the network from model_train import train train(train_loader, model) # check our results from check_results import check_results check_results(test_loader, model, batch_size) ## END ##
if __name__ == '__main__': if len(sys.argv) != 1: print("Usage:%s camera_id\r\n" % (sys.argv[0])) sys.exit(0) # 加载模型 model = Model() # train(data_path) model.load_model(file_path=r'../face_recognition/model/train_model.h5') images = cv2.VideoCapture(0) while True: if SWITCH == 1: enter_new_user(data_path) train(data_path) else: flag, frame = images.read() if flag: frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) else: continue # 使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier( r"D:\anaconda3\envs\tensorflow\Lib\site-packages\cv2\data\haarcascade_frontalface_alt2.xml" ) # 利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3,
from model_evaluate import evaluate from model_train import train if __name__: with open('data/model_defaults.json') as f: defaults = json.load(f) if defaults['dataset_root'] == '': if 'kmametani' in os.environ['HOME']: defaults['dataset_root'] = '/home/kmametani/Master_Files' else: defaults[ 'dataset_root'] = '/media/kokimame/Work_A_1TB/Project/Master_Files' parser = argparse.ArgumentParser(description='Training code of TA_MODEL') parser.add_argument( '-dn', '--dataset_name', type=str, default='', help='Specifying a dataset for training and evaluation. ') args = parser.parse_args() save_name = '_'.join([args.dataset_name, *str(datetime.now()).split()]) # lr_arg = '{}'.format(args.learning_rate).replace('.', '-') # margin_arg = '{}'.format(args.margin).replace('.', '-') if defaults['run_type'] == 'train': train(defaults, save_name, args.dataset_name) # Use this later # else: # evaluate(defaults, save_name, args.dataset_name)
train_path = args.train_path test_path = args.test_path output_model_path = args.output_model_path num_classes = args.classes_num batch_size = args.batch_size img_size = args.img_size lr = args.lr print('train_path: %s' % train_path) print('test_path: %s' % test_path) print('output_model_path: %s' % output_model_path) print('num_classes: %d' % num_classes) print('img_size: %d' % img_size) print('batch_size: %d' % batch_size) print('lr: %s' % lr) model = models.resnet18(num_classes=num_classes) # model = models.squeezenet1_1(num_classes=num_classes) model_train = model_train.ModuleTrain(train_path, test_path, output_model_path, model=model, batch_size=batch_size, img_size=img_size, lr=lr) model_train.train(200, 80) # model_train.test(show_img=True)