Exemplo n.º 1
0
def test(opt):
    # logging
    logger = base_utils.get_logger('base')
    if opt['verbose']:
        logger.info('{} Configurations {}'.format('=' * 20, '=' * 20))
        base_utils.print_options(opt, logger)
    # infer and evaluate performance for each model
    for load_path in opt['model']['generator']['load_path_lst']:
        # setup model index
        model_idx = osp.splitext(osp.split(load_path)[-1])[0]
        
        # log
        logger.info('=' * 40)
        logger.info('Testing model: {}'.format(model_idx))
        logger.info('=' * 40)

        # create model
        opt['model']['generator']['load_path'] = load_path
        model = define_model(opt)
        model_idx = osp.basename(opt['model']['generator']['load_path']).split('.')[0]
        # for each test dataset
        for dataset_idx in sorted(opt['dataset'].keys()):
            # use dataset with prefix `test`
            if not dataset_idx.startswith('test'):
                continue
            validate(opt, model, logger, dataset_idx, model_idx, compute_metrics=False)

            logger.info('-' * 40)

    # logging
    logger.info('Finish testing')
    logger.info('=' * 40)
Exemplo n.º 2
0
def main(args):
    device = torch.device('cuda:{}'.format(args.device)) \
        if args.cuda else torch.device('cpu')

    model = EfficientDet.from_pretrained(args.model).to(device) \
        if args.pretrained else EfficientDet.from_name(args.model).to(device)

    if args.mode == 'trainval':
        logger("Model's trainable parameters: {}".format(
            count_parameters(model)))

        loader = get_loader(path=cfg.TRAIN_SET,
                            annotations=cfg.TRAIN_ANNOTATIONS,
                            batch_size=cfg.BATCH_SIZE)

        optimizer, scheduler, criterion, ema_decay = build_tools(model)
        writer = setup_writer(args.experiment, args)
        best_score = -1

        for epoch in range(cfg.NUM_EPOCHS):
            model, optimizer, scheduler, writer = \
                train(model, optimizer, loader, scheduler,
                      criterion, ema_decay, device, writer)

            if epoch > cfg.VAL_DELAY and \
                    (epoch + 1) % cfg.VAL_INTERVAL == 0:
                ema_decay.assign(model)
                model, writer, best_score = \
                    validate(model, device, writer,
                             cfg.MODEL.SAVE_PATH, best_score=best_score)
                ema_decay.resume(model)

    elif args.mode == 'eval':
        validate(model, device)
Exemplo n.º 3
0
def validate_c(board, r, c, player):
    count = 0
    # left
    for x in range(9):
        count += validate(board, [r, c, r, x], player)
        if board[r][x] == '1':
            board[r][x] = piece_op[player][-1]
            count += validate(board, [r, c, r, x], player)
            board[r][x] = '1'
    # right
    # for x in range(c, 9):
    #     count += validate(board, [r, c, r, x], player)
    #     newboard = board
    #     newboard[r][x] = piece_op[player][-1]
    #     count += validate(newboard, [r, c, r, x], player)
    # up
    for y in range(10):
        count += validate(board, [r, c, y, c], player)
        if board[y][c] == '1':
            board[y][c] = piece_op[player][-1]
            count += validate(board, [r, c, y, c], player)
            board[y][c] = '1'
    # down
    # for y in range(r, 10):
    #     count += validate(board, [r, c, y, c], player)
    #     newboard = board
    #     newboard[r][x] = piece_op[player][-1]
    #     count += validate(newboard, [r, c, r, x], player)
    return count
Exemplo n.º 4
0
def initAppDir(ctx, app_dir):
    """Initialize the application directory.

    Set the application directory in ctx, if not done yet,
    and load and validate the application yaml configuration.
    Exits the whole program on failure.

    Args:
        ctx (dict): The click context.
        app_dir (Path): The path to the application directory.
    """
    if 'APP_DIR' in ctx.obj:
        return
    ctx.obj['APP_DIR'] = app_dir
    file = Path(app_dir, 'application.yaml').resolve()
    try:
        with file.open('r') as app_yaml:
            ctx.obj['APP_YAML'] = yaml.safe_load(app_yaml)
    except (yaml.YAMLError, OSError) as e:
        fail("Failed to parse [%s]! Cause: %s" % (file, e))
    try:
        validation.validate(ctx.obj['APP_YAML'])
    except validation.ValidationError as e:
        fail(e)
    if 'name' not in ctx.obj['APP_YAML']['app']:
        ctx.obj['APP_YAML']['app']['name'] = str(
            ctx.obj['APP_YAML']['app']['title']).lower().replace(' ', '-')
Exemplo n.º 5
0
def register_page():
    #check if form has been submitted i.e., user has tried to register
    if (request.method == 'POST'):
        #get the data in name, email, and password fields
        name = request.form.get('name')
        email = request.form.get('email')
        
        # Validate email address
        if not validate(EMAIL_VALIDATION, email):
            flash("Invalid Email Address!", "danger")
            return render_template('register.html', json=json)

        password = request.form.get('password')
        
        # Validate password
        if not validate(PASSWORD_VALIDATION, password):
            flash("Invalid Password. Please enter a valid password!", "danger")
            return render_template('register.html', json=json)

        password2 = request.form.get('password2')
        #check if passwords match
        if(password!=password2):
            #if not, flash an error msg
            flash("Password unmatched!", "danger")
            return render_template('register.html', json=json)
        else:
            #generate the hashed password
            password = sha256_crypt.hash(password)
            response = Organization.query.filter_by(email=email).first()
            #check if the email already exists in the db
            if not response:
                #add the user to the db using the details entered and flash a msg
                entry = Organization(name=name, email=email, password=password, date=time, status=1)
                db.session.add(entry)
                db.session.commit()

                # Generate email verification token
                verification_token = generate_token(email)
                #generate the welcome email to be sent to the user
                subject = "Welcome aboard " + name + "!"

                content = render_template('email_template.html', token=verification_token, email=email)

                response = mail_handler(recepient_email=email,subject=subject, content=content)
                
                ## If any error occurs, the response will be equal to False 
                if isinstance(response, bool) and not response:    
                    flash("Error while sending mail!", "danger")
                else:
                    flash("Now verify your email address for activating your account.", "success")

                return redirect(url_for('login'))
            else:
                #user exists so flash an error
                flash("User exists!", "danger")
                return render_template('register.html', json=json)
    return render_template('register.html', json=json)
Exemplo n.º 6
0
def validate_n(board, r, c, player):
    count = 0
    for x in [-1, 1]:
        for y in [-2, 2]:
            count += validate(board, [r, c, r + x, c + y], player)
    for x in [-2, 2]:
        for y in [-1, 1]:
            count += validate(board, [r, c, r + x, c + y], player)
    return count
Exemplo n.º 7
0
def validate_p(board, r, c, player):
    count = 0
    if player == 'b':
        t = 1
    else:
        t = -1
    # forward
    count += validate(board, [r, c, r + t, c], player)
    # right
    count += validate(board, [r, c, r, c + 1], player)
    # left
    count += validate(board, [r, c, r, c - 1], player)
    return count
Exemplo n.º 8
0
def validate_k(board, r, c, player):
    count = 0
    for x in [-1, 1]:
        count += validate(board, [r, c, r + x, c], player)
    for y in [-1, 1]:
        count += validate(board, [r, c, r, c + y], player)
    if player == 'b':
        y_range = range(7, 10)
    else:
        y_range = range(3)
    for y in y_range:
        count += validate(board, [r, c, y, c], player)
    return count
Exemplo n.º 9
0
def main(args):
    device = torch.device('cuda:{}'.format(args.device)) \
        if args.cuda else torch.device('cpu')

    model = EfficientDet.from_pretrained(args.model).to(device) \
        if args.pretrained else EfficientDet.from_name(args.model).to(device)

    if args.mode == 'trainval':
        logger("Model's trainable parameters: {}".format(
            count_parameters(model)))

        loader = get_loader(path=cfg.TRAIN_SET,
                            annotations=cfg.TRAIN_ANNOTATIONS,
                            batch_size=cfg.BATCH_SIZE)

        optimizer, scheduler, criterion, ema_decay = build_tools(model)
        writer = setup_writer(args.experiment, args)
        best_score = -1

        for epoch in range(cfg.NUM_EPOCHS):
            model, optimizer, scheduler, writer = \
                train(model, optimizer, loader, scheduler,
                      criterion, ema_decay, device, writer)

            if epoch > cfg.VAL_DELAY and \
                    (epoch + 1) % cfg.VAL_INTERVAL == 0:
                ema_decay.assign(model)
                model, writer, best_score = \
                    validate(model, device, writer,
                             cfg.MODEL.SAVE_PATH, best_score=best_score)
                ema_decay.resume(model)

    elif args.mode == 'eval':
        model.eval()
        #model.to(memory_format=torch.channels_last)
        with torch.no_grad():
            model = torch.jit.script(model)
            #model = torch.utils.mobile_optimizer.optimize_for_mobile(model)
            #model(torch.randn((1,3,896,896)))
        import sys, time
        #N = 16
        #t =- time.time()
        #with torch.no_grad():
        #    for _ in range(N):
        #        model(torch.randn((1,3,896,896)))
        #t += time.time()
        #print(t/N)
        #sys.exit(0)
        import pdb
        pdb.set_trace()
        validate(model, device)
Exemplo n.º 10
0
def command(specs, args):

    cmd = dict()
    for key in specs: 
    
        mandatory = specs[key]['class'] == "required"
        if mandatory and not args.has_key(key): 
            raise ValueError("%s is a required field" % key)

        if args.has_key(key):
            value = args.get(key)
            validation.validate(key, specs[key]['type'], value)
            cmd[key] = value
        
    return cmd
Exemplo n.º 11
0
def validate_r(board, r, c, player):
    count = 0
    # left
    for x in range(c):
        count += validate(board, [r, c, r, x], player)
    # right
    for x in range(c, 9):
        count += validate(board, [r, c, r, x], player)
    # up
    for y in range(r):
        count += validate(board, [r, c, y, c], player)
    # down
    for y in range(r, 10):
        count += validate(board, [r, c, y, c], player)
    return count
Exemplo n.º 12
0
 def play(self):
     """Starts the game and checks if it finished."""
     while True:
         guess = self.get_guess()
         try:
             validate(guess)
         except AssertionError as e:
             print('Oops! ', e.args[0])
             continue
         results = self.check_guess(guess)
         print('Good: {0}\nRegular: {1}\n'.format(results['good'],
                                                  results['regular']))
         if results['good'] == NUMBER_SIZE:
             print('You win! Game over')
             break
Exemplo n.º 13
0
def train(model, epochs, train_dl, val_dl):
    best_score = 0.0
    optimizer = BASE_OPTIMIZER(model.parameters(), lr=args.init_lr)
    for epoch in range(epochs):
        lr = lr_scheduler(epoch, args.lr_decay_factor, args.init_lr,
                          args.lr_decay_epoch)
        optimizer = BASE_OPTIMIZER(model.parameters(), lr=lr)
        total_loss = 0
        for batch_idx, (data, target) in enumerate(train_dl):
            data, target = data.cuda().float(), target.cuda().float()
            output = model(data)
            loss = F.binary_cross_entropy_with_logits(output, target)
            total_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if batch_idx % 100 == 0:
                logger.info("Epoch %d (Batch %d / %d)\t Train loss: %.3f" % \
                    (epoch+1, batch_idx, len(train_dl), loss.item()))
        # train loss
        train_loss = total_loss / len(train_dl)
        logger.info("Epoch %d\t Train loss: %.3f") % (epoch + 1, train_loss)
        mlflow.log_metric('train_loss', train_loss, step=epoch)
        # validation scores
        val_f2_score, val_loss = validate(model, val_dl, 0.2)
        logger.info("Epoch %d \t Validation loss: %.3f, F2 score: %.3f" % \
            (epoch+1, val_loss, val_f2_score))
        mlflow.log_metric('val_loss', val_loss, step=epoch)
        mlflow.log_metric('val_f2_score', val_f2_score, step=epoch)
        if val_f2_score > best_score:
            best_score = val_f2_score
            best_model_path = os.path.join('models', 'model_net_%d.pth' % \
                (100*val_f2_score))
            logger.info("Saving model to %s" % best_model_path)
            save_model(model, best_model_path)
Exemplo n.º 14
0
def make_icon(left, right, icon_name):
    valid, args = validate((left, right), icon_name, request)
    if not valid:
        return jsonify(args)

    mime = "image/png" if args["format"] == "png" else "image/x-icon"
    return send_file(generate(args), mime)
Exemplo n.º 15
0
    def _validate(self, model = None):
        # calls the event handler for the validation process this
        # should setup the operations for a correct validation
        self.pre_validate()

        # starts the model reference with the current model in
        # case none is defined
        model = model or self.model

        # retrieves the class associated with the current instance
        # to be able to retrieve the correct validate methods
        cls = self.__class__

        # checks if the current model is new (create operation)
        # and sets the proper validation methods retrieval method
        is_new = self.is_new()
        if is_new: method = cls.validate_new
        else: method = cls.validate

        # runs the validation process on the various arguments
        # provided to the account and in case an error is returned
        # raises a validation error to the upper layers
        errors, object = validation.validate(
            method,
            object = model,
            build = False
        )
        if errors: raise exceptions.ValidationError(errors, object)

        # calls the event handler for the validation process this
        # should finish the operations from a correct validation
        self.post_validate()
Exemplo n.º 16
0
    def answered_delay_question(self, f, subject, val):
        try:
            self.retract(f)

            # Previous delay
            if subject == 'previous_delay':
                val = re.sub('[^0-9]', '', val)

            # Validate
            # Check for suggestions
            sug = suggest(val, subject, self.context)
            if sug:
                self.just_suggested = True
                self.state_message(sug['message'])
                self.declare(Suggested(subject, sug['value'], sug['original']))
                self.set_prev_state('QUESTIONING')
                self.modify(state, status='SUGGESTING')
                return

            error = validate(val, subject)
            if error:
                self.state_message(error)
                return

            new_fact = return_fact(subject, val)
            self.declare(new_fact)
            self.mark_answered_delay(subject)
        except Exception as e:
            self.state_message('Sorry something went wrong.')
def trainGradientBoosting(data, train, validation, N, max_features=None):
    w = open("F-1 for Gradient Boosting.txt", "a+")
    n = len(N)
    f1 = np.zeros(n)
    X_train, y_train = v.makeMatrix(data, train)
    X_test, y_test = v.makeMatrix(data, validation)

    for i in range(n):
        clf = GradientBoostingClassifier(n_estimators=N[i],
                                         max_features=max_features)
        f1[i] = v.validate(data, X_train, y_train, X_test, y_test, clf)
        print(
            "F-1 measure for Gradient Boosting with %s boosting stages and max_features = %s is %s"
            % (N[i], max_features, f1[i]))

    f1_max = np.max(f1)
    optimal_N = N[np.argmax(f1)]
    print(
        "Highest F-1 measure for Gradient Boosting with %s boosting stages and max_features = %s is %s"
        % (optimal_N, max_features, f1_max))
    w.write(
        "Highest F-1 measure for Gradient Boosting with %s boosting stages and max_features = %s is %s\n"
        % (optimal_N, max_features, f1_max))

    # f1_mean = np.mean(f1)
    # optimal_N = N[0]
    # print("Average F-1 measure for Gradient Boosting with %s boosting stages and max_features = %s is %s" %(optimal_N, max_features, f1_mean))
    # w.write("Average F-1 measure for Gradient Boosting with %s boosting stages and max_features = %s is %s\n" %(optimal_N, max_features, f1_mean))

    return f1_max, optimal_N
def trainRandomForest(data, train, validation, N, max_features=None):
    w = open("F-1 for Random Forest.txt", "a+")
    n = len(N)
    f1 = np.zeros(n)
    X_train, y_train = v.makeMatrix(data, train)
    X_test, y_test = v.makeMatrix(data, validation)

    for i in range(n):
        clf = RandomForestClassifier(n_estimators=N[i],
                                     max_features=max_features,
                                     n_jobs=-1,
                                     class_weight='balanced')
        f1[i] = v.validate(data, X_train, y_train, X_test, y_test, clf)
        print(
            "F-1 measure for Random Forest with %s trees and max_features = %s is %s"
            % (N[i], max_features, f1[i]))

    # f1_max = np.max(f1)
    # optimal_N = N[np.argmax(f1)]
    # print("Highest F-1 measure for Random Forest with %s trees and max_features = %s is %s" %(optimal_N, max_features, f1_max))
    # w.write("Highest F-1 measure for Random Forest with %s trees and max_features = %s is %s\n" %(optimal_N, max_features, f1_max))

    f1_mean = np.mean(f1)
    optimal_N = N[0]
    print(
        "Average F-1 measure for Random Forest with %s trees and max_features = %s is %s"
        % (optimal_N, max_features, f1_mean))
    w.write(
        "Average F-1 measure for Random Forest with %s trees and max_features = %s is %s\n"
        % (optimal_N, max_features, f1_mean))

    return f1_mean, optimal_N
Exemplo n.º 19
0
def trainRegularizationStrengthForl2(data, train, validation, C):
    '''
	Logistic Regression with Ridge
	'''
    w = open("F-1 for Logistic Regression.txt", "a+")
    n = len(C)
    f1 = np.zeros(n)
    X_train, y_train = v.makeMatrix(data, train)
    X_test, y_test = v.makeMatrix(data, validation)

    for i in range(n):
        clf = LogisticRegression(C=C[i],
                                 class_weight='balanced',
                                 max_iter=10000,
                                 solver='sag',
                                 n_jobs=-1)
        f1[i] = v.validate(data, X_train, y_train, X_test, y_test, clf)
        print("F-1 measure for Logistic Regression with l2 and C = %s is %s" %
              (C[i], f1[i]))

    f1_max = np.max(f1)
    optimal_C = C[np.argmax(f1)]
    print(
        "Highest F-1 measure for Logistic Regression with l2 is %s with C = %s"
        % (f1_max, optimal_C))
    w.write(
        "Highest F-1 measure for Logistic Regression with l2 is %s with C = %s\n"
        % (f1_max, optimal_C))
    w.close()

    return f1_max, optimal_C
Exemplo n.º 20
0
def trainSVMWithLinearKernel2(data, train, validation, C):
    '''
	SVM with Linear Kernal with SVC()
	'''
    w = open("F-1 for Support Vector Mahines.txt", "a+")
    n = len(C)
    f1 = np.zeros(n)
    X_train, y_train = v.makeMatrix(data, train)
    X_test, y_test = v.makeMatrix(data, validation)

    for i in range(n):
        clf = SVC(C=C[i], kernel='linear', class_weight='balanced')
        f1[i] = v.validate(data, X_train, y_train, X_test, y_test, clf)
        print("F-1 measure for SVM with Linear kernel and C = %s is %s" %
              (C[i], f1[i]))

    f1_max = np.max(f1)
    optimal_C = C[np.argmax(f1)]
    print("Highest F-1 measure for SVM with Linear is %s with C = %s" %
          (f1_max, optimal_C))
    w.write("Highest F-1 measure for SVM with Linear is %s with C = %s\n" %
            (f1_max, optimal_C))
    w.close()

    return f1_max, optimal_C
Exemplo n.º 21
0
 def set_attributes(self, *new_deviceattribute):
     for a in new_deviceattribute:
         if (validate(a)):
             j_data = {"key": a.key, "value": a.value}
             self.j_list.append(j_data)
             self.attributes = self.j_list
         else:
             print("Attribute : ", a, " is not valid.")
Exemplo n.º 22
0
def create_user(cursor, db):
    """
    Creates a a user for the overall manager

    Args:
        cursor (mysql.connector.cursor.MySQLCursor): The databse cursor
        db (mysql.connector.connection.MySQLConnection): The database for storying data

    Returns:
        str: The selected username choosen
    """

    logger.info(Fore.GREEN + "Welcome to user setup." + Style.RESET_ALL)

    user = input("Please enter the username you would like to login as:")

    userEmail = input("Please enter your email: ")

    USERPASS = getpass(
        "Please enter the password you would like to login with: ")

    strong = validate(USERPASS)

    while strong is False:
        logger.warn(
            'Your password must be 8 characters long, contain one uppercase, one lowercase, and one of the following characters: [, ], @, $, #, !, &, *, (, ), ., ?, +, =, -'
        )

        USERPASS = getpass(
            "Please enter the password you would like to login with: ")

        strong = validate(USERPASS)

    mail.send_test(email, userEmail, emailPass)

    secret = totp.generate_shared_secret()

    PASS = cryptic.encrypt(USERPASS, key)

    insert_master(cursor, user, userEmail, PASS, secret)
    create_user_table(cursor, db, user)

    db.commit()

    return user
Exemplo n.º 23
0
    def check_multi_errors(self, params, errors):
        error = None

        for topic in freeform_topics:
            if len(params[topic].strip()) > 0 and topic in params.keys():
                error = validate(params[topic], topic, self.context)

            if error:
                errors.append(topic)
    def calculate(self, args) -> int:
        """
        args support operators {+, -, *, /} and integers
        """
        if not len(args):
            raise InvalidInputError(self.help())

        self.stack.clear()

        while len(args):
            arg = args.pop()
            validation.validate(arg, self.ops.keys())
            self.__step(arg)

        if len(self.stack) > 1:
            raise InvalidInputError(
                'Not enough operations to perform calculation')

        return self.stack.pop()
Exemplo n.º 25
0
def train(model, epochs, train_dl, val_dl, fold):
    best_score = 0.0
    lr0 = args.init_lr_0
    iterations = epochs * len(train_dl)
    idx = 0
    # create optimizer with differential learning rates
    optimizer = create_optimizer(MODEL, BASE_OPTIMIZER, args.init_lr_0,
                                 DIFF_LR_FACTORS)
    # set up lr schedule based on val loss
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                        'min',
                                                        verbose=True,
                                                        patience=args.patience)
    for epoch in range(epochs):
        total_loss = 0
        # training loop
        model.train()
        for batch_idx, (data, target) in enumerate(train_dl):
            data, target = data.cuda().float(), target.cuda().float()
            output = model(data)
            loss = F.binary_cross_entropy_with_logits(output, target)
            total_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            idx += 1
            # unfreeze deeper layers sequentially
            if idx == int(0.1 * iterations):
                model.unfreeze(1)
                logger.info("Iteration %d: Unfreezing group 1" % idx)
            if idx == int(0.2 * iterations):
                model.unfreeze(0)
                logger.info("Iteration %d: Unfreezing group 0" % idx)
            if batch_idx % 100 == 0:
                logger.info("Epoch %d (Batch %d / %d)\t Train loss: %.3f" % \
                    (epoch+1, batch_idx, len(train_dl), loss.item()))
        # train loss
        train_loss = total_loss / len(train_dl)
        logger.info("Epoch %d\t Train loss: %.3f" % (epoch + 1, train_loss))
        mlflow.log_metric('train_loss', train_loss, step=epoch)
        # validation scores
        val_f2_score, val_loss = validate(model, val_dl, 0.2)
        # lr monitoring val_loss
        lr_scheduler.step(val_loss)
        logger.info("Epoch %d \t Validation loss: %.3f, F2 score: %.3f" % \
            (epoch+1, val_loss, val_f2_score))
        mlflow.log_metric('val_loss', val_loss, step=epoch)
        mlflow.log_metric('val_f2_score', val_f2_score, step=epoch)
        # model saving
        if val_f2_score > best_score:
            best_score = val_f2_score
            best_model_path = os.path.join(MODEL_DIR, 'fold_%s' % fold, 'model_VGG19_%d.pth' % \
                (100*val_f2_score))
            logger.info("Saving model to %s" % best_model_path)
            save_model(model, best_model_path)
Exemplo n.º 26
0
def train_dir(nets, optim, optim2, dataloader, args):
    
  global image_size, it, image_sizes
  caffe.set_mode_gpu() 
  
  if args.debug:
    image_sizes = [[416, 416]]

  while True:
      
    if it % 500 == 0:
      image_size = image_sizes[random.randint(0, len(image_sizes) - 1)]
      print(image_size)
    
    #im = cv2.imread('/home/busta/data/90kDICT32px/background/n03085781_3427.jpg')
    #try:
    process_batch(nets, optim, optim2, image_size, args)
    
    if it % valid_interval == 0:
      validate(nets, dataloader, image_size = [416, 416], split_words=False)
Exemplo n.º 27
0
def train_dir(nets, optim, optim2, dataloader, args):
    
  global image_size, it, image_sizes
  caffe.set_mode_gpu() 
  
  if args.debug:
    image_sizes = [[416, 416]]

  while True:
      
    if it % 500 == 0:
      image_size = image_sizes[random.randint(0, len(image_sizes) - 1)]
      print(image_size)
    
    #im = cv2.imread('/home/busta/data/90kDICT32px/background/n03085781_3427.jpg')
    #try:
    process_batch(nets, optim, optim2, image_size, args)
    
    if it % valid_interval == 0:
      validate(nets, dataloader, image_size = [416, 416], split_words=False)
Exemplo n.º 28
0
def train_dir(nets, optim, optim2, dataloader, args):

    global image_size, it, image_sizes
    caffe.set_mode_gpu()

    while it < args.iterate:

        print("it:", it)
        if it % 500 == 0:  # 每500轮迭代修改一次输入图片需要reshape成的大小
            image_size = image_sizes[random.randint(0, len(image_sizes) - 1)]
            print(image_size)

        process_batch(nets, optim, optim2, image_size, args)

        if it % valid_interval == 0:
            print("validation it:", it)
            validate(nets,
                     dataloader,
                     image_size=[416, 416],
                     split_words=False)
Exemplo n.º 29
0
def calc_statistics(data):
    good = True
    xml = validate(data, 'fb2')  # fb2 schema
    if xml is None:
        good = False
        bad_fb2 = True
        xml = validate(data, 'xml')  # dom parser
        if xml is None:
            if validate(data, 'sax') is None:  # sax parser
                Stat.sax_errors += 1
            else:
                Stat.xml_errors += 1
            return
        else:
            Stat.fb2_errors += 1
    if check_links(xml) != 0:  # links
        good = False
        Stat.extra_errors += 1
        Stat.links_errors += 1
    if good:
        Stat.good += 1
Exemplo n.º 30
0
def trainModel(fileVocabulary, wordVocabulary, contextProvider, model,
               superBatchSize, miniBatchSize, parametersPath, embeddingsPath,
               learningRate, l1Coefficient, l2Coefficient, epochs,
               metricsPath):
    if os.path.exists(metricsPath):
        os.remove(metricsPath)

    superBatchesCount = contextProvider.contextsCount / superBatchSize + 1
    startTime = time.time()
    previousTotal = 0

    for epoch in xrange(0, epochs):
        for superBatchIndex in xrange(0, superBatchesCount):
            contextSuperBatch = contextProvider[superBatchIndex *
                                                superBatchSize:
                                                (superBatchIndex + 1) *
                                                superBatchSize]

            fileIndices, wordIndices, targetWordIndices = contextSuperBatch[:,
                                                                            1], contextSuperBatch[:,
                                                                                                  1:
                                                                                                  -1], contextSuperBatch[:,
                                                                                                                         -1]

            model.train(wordIndices, targetWordIndices, miniBatchSize,
                        learningRate, l1Coefficient, l2Coefficient)

            metrics = validation.validate(wordVocabulary, model)
            customMetrics = {
                'simGemJewel': similarity('gem', 'jewel', wordVocabulary,
                                          model)
            }
            validation.dump(metricsPath, epoch, superBatchIndex, *metrics,
                            **customMetrics)
            validation.dump(metricsPath, epoch, superBatchIndex, *metrics)

            if previousTotal < sum(metrics):
                model.dump(parametersPath, embeddingsPath)

            currentTime = time.time()
            elapsed = currentTime - startTime
            secondsPerEpoch = elapsed / (epoch + 1)

            rg, sim353, simLex999, syntRel, sat = metrics
            log.progress(
                'Training model: {0:.3f}%. Elapsed: {1}. Epoch: {2}. ({3:.3f} sec/epoch), RG: {4}. Sim353: {5}. SimLex999: {6}. SyntRel: {7}. SAT: {8}. Gem/Jewel: {9:.3f}.',
                epoch + 1, epochs, log.delta(elapsed), epoch, secondsPerEpoch,
                rg, sim353, simLex999, syntRel, sat,
                customMetrics['simGemJewel'])

    log.lineBreak()

    return model
Exemplo n.º 31
0
def calc_statistics(data):
    good = True
    xml = validate(data, 'fb2')         # fb2 schema
    if xml is None:
        good = False
        bad_fb2 = True
        xml = validate(data, 'xml')     # dom parser
        if xml is None:
            if validate(data, 'sax') is None: # sax parser
                Stat.sax_errors += 1
            else:
                Stat.xml_errors += 1
            return
        else:
            Stat.fb2_errors += 1
    if check_links(xml) != 0:           # links
        good = False
        Stat.extra_errors += 1
        Stat.links_errors += 1
    if good:
        Stat.good += 1
Exemplo n.º 32
0
def train(model, data_dir, epochs, gpu, lr):
    trainloader, validloader, testloader = load_datasets(data_dir)

    if not trainloader:
        return

    device = torch.device(
        'cuda' if gpu and torch.cuda.is_available() else 'cpu')

    print(
        f"\n[*] Training on device: {device}... this will be a long operation based on the epochs you specified."
    )

    #Define criterion for calculating loss and optimizer
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=lr)

    model.to(device)

    with active_session():
        for e in range(epochs):
            running_loss = 0

            for images, labels in trainloader:
                images, labels = images.to(device), labels.to(device)

                optimizer.zero_grad()

                output = model.forward(images)
                loss = criterion(output, labels)
                loss.backward()
                optimizer.step()

                running_loss += loss.item()

            print(f"\nEpoch {e+1}/{epochs}")
            print(f"Train Loss: {running_loss/len(trainloader)}")
    print("\n[*] Finished Training")

    validate(model, validloader, testloader, criterion, optimizer, device)
Exemplo n.º 33
0
    def answered_ticket_question(self, state, f, subject, val):
        self.retract(f)

        # When dealing with stations
        try:
            if subject in ['departing_from', 'departing_to']:
                actual_station = get_station_by_alias(val)
                if actual_station:
                    val = actual_station
        except Exception as e:
            self.state_message('I couldn\'t find any stations by that name')

        # When dealing with dates
        try:
            if subject in ['departure_date', 'return_date']:
                actual_date = dateFormat2(val)
                val = actual_date
        except Exception as e:
            print(str(e))

        # # When dealing with times
        # try:
        #     if subject in ['departure_time', 'return_time']:
        #         actual_time = timeFormat(val)
        #         val = actual_time
        # except Exception as e:
        #     print(str(e))

        # Check for suggestions
        sug = suggest(val, subject, self.context)
        if sug:
            self.just_suggested = True
            self.state_message(sug['message'])
            self.declare(Suggested(subject, sug['value'], sug['original']))
            self.set_prev_state('QUESTIONING')
            self.modify(state, status='SUGGESTING')
            return

        # Check for errors
        error = validate(val, subject, self.context)
        if error:
            self.state_message(error)
            return

        new_fact = return_fact(subject, val)
        self.declare(new_fact)
        self.mark_answered_ticket(subject)

        # When it's a single ticket
        if new_fact[0] == False:
            self.declare(ReturnDate(None), ReturnTime(None))
            self.mark_answered_ticket('return_date', 'return_time')
def register(request):
	if "username" not in request.POST:
		return go_to(request, "register.html")		
	else:
		if not validate(request.POST["username"]):
			return getBack(request, error_message = "Invalid username.")		
		
		if not validate(request.POST["password"]):
			return getBack(request, error_message = "Invalid password.")		
			
		if request.POST["password"] != request.POST["password_again"]:
			return getBack(request, error_message = "Password confirmation didn't match!")		
		
		exists = users.objects.filter(username__exact= request.POST["username"])
		if exists:
			return getBack(request, error_message = "User already exists")		
		else:		
			password = hashlib.sha256(request.POST["password"]).hexdigest()
			new_user = users(username = request.POST["username"],
							hashed_password = password,
							email = request.POST.get("email",""),
							last_login = datetime.datetime.now())
			new_user.save()
			return HttpResponseRedirect(reverse("userengine.views.signin"))
Exemplo n.º 35
0
def trainModel(fileVocabulary, wordVocabulary, contextProvider, model, superBatchSize, miniBatchSize, parametersPath, embeddingsPath, learningRate, l1Coefficient, l2Coefficient, epochs, metricsPath):
    if os.path.exists(metricsPath):
        os.remove(metricsPath)

    superBatchesCount = contextProvider.contextsCount / superBatchSize + 1
    startTime = time.time()
    previousTotal = 0

    for epoch in xrange(0, epochs):
        for superBatchIndex in xrange(0, superBatchesCount):
            contextSuperBatch = contextProvider[superBatchIndex * superBatchSize:(superBatchIndex + 1) * superBatchSize]

            fileIndices, wordIndices, targetWordIndices = contextSuperBatch[:,1], contextSuperBatch[:,1:-1], contextSuperBatch[:,-1]

            model.train(wordIndices, targetWordIndices, miniBatchSize, learningRate, l1Coefficient, l2Coefficient)

            metrics = validation.validate(wordVocabulary, model)
            customMetrics = {
                'simGemJewel': similarity('gem', 'jewel', wordVocabulary, model)
            }
            validation.dump(metricsPath, epoch, superBatchIndex, *metrics, **customMetrics)
            validation.dump(metricsPath, epoch, superBatchIndex, *metrics)

            if previousTotal < sum(metrics):
                model.dump(parametersPath, embeddingsPath)

            currentTime = time.time()
            elapsed = currentTime - startTime
            secondsPerEpoch = elapsed / (epoch + 1)

            rg, sim353, simLex999, syntRel, sat = metrics
            log.progress('Training model: {0:.3f}%. Elapsed: {1}. Epoch: {2}. ({3:.3f} sec/epoch), RG: {4}. Sim353: {5}. SimLex999: {6}. SyntRel: {7}. SAT: {8}. Gem/Jewel: {9:.3f}.',
                         epoch + 1, epochs, log.delta(elapsed), epoch, secondsPerEpoch,
                         rg, sim353, simLex999, syntRel, sat,
                         customMetrics['simGemJewel'])

    log.lineBreak()

    return model
Exemplo n.º 36
0
def submit_booking():
    # Get the form data
    json_data = request.get_json()
    # Check for data in dictionary
    if not json_data:
        return "Request missing JSON data"
    # Check the reCAPTCHA
    if "g-recaptcha-response" not in json_data.keys():
        return "Recaptcha response missing"
    if not check_recaptcha(json_data.pop("g-recaptcha-response"), request.remote_addr):
        return "Recaptcha check failed"
    # Validate the received data
    if not validation.validate(json_data):
        return "Data validation check failed"
    # Prepare the data
    escape_values(json_data)
    convert_newlines(json_data)
    # Attempt to send the mail
    if not send_booking_mail(json_data):
        return "Send mail error"
    # If this point is reached, everything completed successfully
    return "success"
Exemplo n.º 37
0
    def post(self):
        have_error = False
        self.username = self.request.get("username")
        self.password = self.request.get("password")
        self.verify = self.request.get("verify")
        self.email = self.request.get("email")

        params,have_error = validation.validate(self.username,self.password,self.verify,self.email)

        if have_error:
            self.render("signup-form.html",**params)

        else:
            u = User.by_name(self.username)
            if u:
                msg = 'That user already exists.'
                self.render('signup-form.html',err_uname=msg)
            else:
                u = User.register(self.username,self.password,self.email)
                u.put()
                self.login(u)
                self.redirect('/')
Exemplo n.º 38
0
def parse(data):
    # return values:
    # 0 - good file
    # 1 - fixed
    # -1 - not fixed

    # pre validation
##     if options.pre_fb2lint:
##         if validate(data, 'fb2', 'PRE') is not None:
##             return 0
##     else:
    if not options.force and validate(data, 'xml', 'PRE') is not None:
        return 0

    # parsing and recovery
    try:
        soup = FB2Parser(data, convertEntities='xml')
    except:
        traceback.print_exc()
        print_log('FATAL: exception', level=3)
        return -1
    ret = 1

    # post validation #1
    xml = validate(str(soup.FictionBook), 'xml', 'POST')
    if xml is None:
        print_log('FATAL: not fixed', level=3)
        return -1

    # post processing
    ##add_desc(soup)
    ##norm_desc(soup)

    # post validation #2
    #check_tags(soup)
##     if options.post_fb2lint:
##         if validate(data, 'fb2', 'POST') is None:
##             ret = -1

    if options.check_only:
        return ret

    data = etree.tostring(xml, encoding=options.output_encoding,
                          xml_declaration=True)

    # save result
    def add_suffix(fn):
        root, ext = os.path.splitext(fn)
        return root+'-fixed'+ext
    newfilename = None
    zipfilename = None
    z = False                           # zip result ?
    # filenames
    if not options.nozip:
        if options.z_filename or options.zip:
            z = True
    newfilename = add_suffix(options.filename)
    if z:
        if not newfilename.endswith('.zip'):
            newfilename = add_suffix(newfilename+'.zip')
        newfilename = add_suffix(newfilename)
        if options.z_filename:
            zipfilename = options.z_filename
        else:
            zipfilename = options.filename
        zipfilename = os.path.basename(zipfilename)
    else:
        if options.z_filename:
            d = os.path.dirname(options.filename)
            newfilename = add_suffix(os.path.join(d, options.z_filename))
    if options.outfile:
        newfilename = options.outfile
    if options.dest_dir:
        f = os.path.basename(newfilename)
        newfilename = os.path.join(options.dest_dir, f)

    if os.path.exists(newfilename):
        print_log('FATAL: could not save file, file exists:',
                  newfilename, level=3)
        return
    if newfilename == '-':
        z = False
    # save
    if options.verbose:
        print_log('save:', newfilename)
    if z:
        zf = zipfile.ZipFile(newfilename, 'w')
        zf.writestr(zipfilename, data)
    else:
        if newfilename == '-':
            sys.stdout.write(data)
        else:
            open(newfilename, 'w').write(data)
Exemplo n.º 39
0
    # Send lines until you receive a False
    while True:
        # Get response
        response = child.receive_response_json_dict(response_timeout, character_timeout).lstrip('\r\n')

        # Encode response
        # Strip first line for decoding, it's either the Version number or the last move
        response = response[response.find('\n') + 1:]

        # log the response
        logging.debug("Response:\n" + response)

        # Validate the response
        # Validation turn off when testing
        if (not options.test_castle) and (not validation.validate(response)):
            print "Got invalid response from game:"
            print response
            sys.exit(-1)

        response = loads(response)

        # Determine next move and tell the game program
        next_move = player.handle_response(response)

                # log the response
        logging.debug("Next Move:\n" + str(next_move))

        # If next_move is false then stop playing
        if next_move == False:
            break
Exemplo n.º 40
0
def index():
    return jsonify(validate(request.get_json()))
Exemplo n.º 41
0
Arquivo: model.py Projeto: lep/ff
def handleFile(source, destination, prefix):
	data = None
	with open(source, "r") as f:
		data = load(f.read())
	with open(destination, "w") as f:
		f.write(render(validate(process(data)), prefix=prefix))