Esempio n. 1
0
def main():
    '''args = parse_args()
    batch_size = args.batch_size
    n_epochs = args.n_epochs
    learning_rate = args.learning_rate'''

    batch_size = 4
    n_epochs = 70
    learning_rate = 0.0005

    start(batch_size, n_epochs, learning_rate)
Esempio n. 2
0
def openCam():
    # Student name (input this value)
    studentName = entryName.get()
    studentName.upper
    studentName = studentName.replace(' ', '_')
    print(studentName)
    root.destroy()

    cancel = False

    # Student classroom (input this value)
    classroomID = '1A'  # sys.argv[2]

    # Pictures counter
    count = 0

    # Train folder path
    path = "classrooms/{}/train/".format(classroomID)

    # Show webcam until it has 6 pictures taken
    while (count < 6):

        # Read webcam frame
        ret, frame = cap.read()

        # Display frame
        cv2.imshow("Webcam - Aperte 'q' para cancelar", frame)

        key = cv2.waitKey(1)
        # Enter pressed
        if key & 0xFF == ord('\r'):

            # Setting the file name according with the name and the counter value
            fileName = "{}.{}.jpg".format(studentName, count)

            # Getting the frame and saving in the train folder path
            cv2.imwrite(path + fileName, frame)
            count += 1
        elif key & 0xFF == ord('q'):
            cancel = True
            while (count != 0):
                count -= 1
                fileName = "{}.{}.jpg".format(studentName, count)
                os.remove(path + fileName)
            break

    # Stop reading frames from the webcam
    cap.release()

    # Close webcam window
    cv2.destroyAllWindows()

    if (cancel == False):
        train.start(classroomID, studentName)
Esempio n. 3
0
def run(args):
    if args['mode'] == 'train':
        ## preprocessing steps
        preprocess.preprocess_train('./cars/cars_train',
                                    './cars/devkit/cars_train_annos.mat')
        preprocess.preprocess_test('./cars/cars_test',
                                   './cars/devkit/cars_test_annos.mat')

        split_test_val.test_with_labels(
            './cars/cars_test', './cars/devkit/cars_test_annos_withlabels.mat')
        split_test_val.val_test_split()

        train.start(train_path, val_path)

    elif args['mode'] == 'test':
        car_detector = det.Detector()
        car_recognizer = rec.Recognizer()
        images_dir = os.listdir(args['data'] + "/test/")
        for imagepath in images_dir:
            no_of_cars, car_boxes = car_detector.test_model(args['data'] +
                                                            "/test/" +
                                                            imagepath)
            print('car_boxes', car_boxes[0])
            if no_of_cars > 0:
                print(imagepath)
                image_ = image.load_img(args['data'] + "/test/" + imagepath)
                image_ = image.img_to_array(image_)
                height, width, ch = image_.shape
                if no_of_cars > 1:
                    for cars in range(0, no_of_cars):
                        print('cars', cars)
                        rec_image = image_[int(car_boxes[cars][0] *
                                               height):int(car_boxes[cars][2] *
                                                           height),
                                           int(car_boxes[cars][1] *
                                               width):int(car_boxes[cars][3] *
                                                          width)]
                        result = car_recognizer.load_images_predict(rec_image)

                        print("found {} in the above image".format(result))
                else:
                    rec_image = image_[int(car_boxes[0][0] *
                                           height):int(car_boxes[0][2] *
                                                       height),
                                       int(car_boxes[0][1] *
                                           width):int(car_boxes[0][3] * width)]
                    result = car_recognizer.load_images_predict(rec_image)
                    print("found {} in the above image".format(result))
Esempio n. 4
0
def executeTrainModel(config_path, model_name):
    print(config_path)
    print('System start to prepare parser config file...')
    conf = ParserConf(config_path)
    conf.parserConf()


    model = eval(model_name)
    model = model(conf)

    #print('System start to load data...')
    data = DataUtil(conf)
    evaluate = Evaluate(conf)

    import train as starter
    starter.start(conf, data, model, evaluate)
Esempio n. 5
0
def executeTrainModel(config_path, model_name, log_dir):
    print(config_path)
    #print('System start to prepare parser config file...')
    conf = ParserConf(config_path)
    conf.parserConf()
    print conf.topk

    #print('System start to load TensorFlow graph...')
    model = eval(model_name)
    model = model(conf)  # 初始化模型diffnet.py

    #print('System start to load data...')
    data = DataUtil(conf)
    evaluate = Evaluate(conf)

    import train as starter
    starter.start(conf, data, model, evaluate, log_dir)
Esempio n. 6
0
def evaluate_hw3():
    # all data files should be inside "coco" folder in the project directory
    preprocess_images.run(is_evaluate=True)
    preprocess_vocab.run()
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(model.Net(val_loader.dataset.num_tokens)).cuda()
    net.load_state_dict(
        torch.load('model.pkl', map_location=lambda storage, loc: storage))

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
    tracker = utils.Tracker()
    answers, accuracies, idx = start(net,
                                     val_loader,
                                     optimizer,
                                     tracker,
                                     train=False,
                                     prefix='val')
    acc = calc_accuracy(accuracies)
    print('%.2f' % acc)
Esempio n. 7
0
import tensorflow as tf
import os.path

import server

# Check if pre-trained model already exists
if not os.path.exists('mnist.h5'):
    import train

    train.start()

    print('Training complete. Starting server')
    server.start()

else:
    print('Model exists. Starting server')
    server.start()
Esempio n. 8
0
from ParserConf import ParserConf

app_conf = ParserConf('dualpc.ini')
app_conf.parserConf()

import os

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

from dualpc import dualpc

model = dualpc(app_conf)

import torch

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.setDevice(device)

from DataUtil import DataUtil

data = DataUtil(app_conf)

import train as train

train.start(app_conf, data, model)
Esempio n. 9
0
    parser = argparse.ArgumentParser(
        description='Welcome to the Experiment Platform Entry')
    parser.add_argument('--data_name', nargs='?', help='data name')
    parser.add_argument('--model_name',
                        nargs='?',
                        default='gcncsr',
                        help='model name')
    parser.add_argument('--gpu', nargs='+', help='available gpu id')

    args = parser.parse_args()

    data_name = args.data_name
    model_name = args.model_name
    device_id = ''
    for gid in args.gpu:
        device_id += gid
        device_id += ','
    device_id = device_id.rstrip(',')
    os.environ['CUDA_VISIBLE_DEVICES'] = device_id
    # os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
    config_path = os.path.join(
        os.getcwd(), f'conf/{data_name}/{data_name}_{model_name}.ini')
    conf = ParserConf(config_path)
    conf.parserConf()
    setproctitle.setproctitle('{}_{}_{}@linian'.format(conf.data_name,
                                                       conf.model_name,
                                                       conf.test_name))
    data = DataUtil(conf)
    starter.start(conf, data, model_name)
    # executeTrainModel(config_path, model_name)
Esempio n. 10
0
    config['dataloader']['batch_size'] = args.pop('sz_batch')
    config['dataloader']['num_workers'] = args.pop('num_workers')
    config['recluster']['mod_epoch'] = args.pop('mod_epoch')
    config['opt']['backbone']['lr'] = args.pop('backbone_lr')
    config['opt']['backbone']['weight_decay'] = args.pop('backbone_wd')
    config['opt']['embedding']['lr'] = args.pop('embedding_lr')
    config['opt']['embedding']['weight_decay'] = args.pop('embedding_wd')

    for k in args:
        if k in config:
            config[k] = args[k]

    if config['nb_clusters'] == 1:
        config['recluster']['enabled'] = False

    config['log'] = {
        'name': '{}-K-{}-M-{}-exp-{}'.format(
            config['dataset_selected'],
            config['nb_clusters'],
            config['recluster']['mod_epoch'],
            args['exp']
        ),
        'path': 'log/{}'.format(args['dir'])
    }

    # tkinter not installed on this system, use non-GUI backend
    matplotlib.use('agg')
    train.start(config)

Esempio n. 11
0
import sys
import train as Train
import run as Run

if __name__ == "__main__":
    if len(sys.argv) is 1:
        print("Please select the mode")
    else:
        arg = sys.argv[1]
        if arg == "train":
            print("Trainning mode")
            Train.start()
        elif arg == "run":
            print("Running mode")
            Run.start()
        else:
            print("Please choose the correct mode")
Esempio n. 12
0
    if len(sys.argv) < 2 or (not sys.argv[1] in ["serve", "train"]):
        raise Exception(
            "Invalid argument: you must inform 'train' for training mode or 'serve' predicting mode"
        )

    if sys.argv[1] == "train":

        env = environment.Environment()

        parser = argparse.ArgumentParser()
        # https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md
        parser.add_argument("--max-depth", type=int, default=10)
        parser.add_argument("--n-jobs", type=int, default=env.num_cpus)
        parser.add_argument("--boosting-type", type=str, default='gbdt')

        # reads input channels training and testing from the environment variables
        parser.add_argument("--train",
                            type=str,
                            default=env.channel_input_dirs["train"])
        parser.add_argument("--validation",
                            type=str,
                            default=env.channel_input_dirs["validation"])

        parser.add_argument("--model-dir", type=str, default=env.model_dir)
        parser.add_argument("--output-dir", type=str, default=env.output_dir)

        args, unknown = parser.parse_known_args()
        train.start(args)
    else:
        model_server.start_model_server(handler_service="serving.handler")