def step(self, step_dir, states, *args):
        old_actor = copy.deepcopy(self.actor)
        
        params = utils.flat_params(self.actor)
        new_params = params + self.lr * step_dir
        utils.update_model(self.actor, new_params)

        kl = self.actor.get_kl(states.float(), old_actor=old_actor)
        kl = kl.mean()

        return {'kl':kl, 'step_size':self.lr, 'entropy':self.actor.get_entropy(states.float())} # return statistics
Beispiel #2
0
def main():
    text_inp = "Input Image"
    text_fg = "Foreground"

    cv2.namedWindow(text_inp, cv2.WINDOW_AUTOSIZE)
    cv2.namedWindow(text_fg, cv2.WINDOW_AUTOSIZE)

    cap = cv2.VideoCapture("../dt_passat.mpg")
    cap.grab()
    img = cap.retrieve()[1]

    width = img.shape[1]
    height = img.shape[0]
    sh_width = width / 2
    sh_height = height / 2

    cv2.moveWindow(text_inp, 0, 0)
    cv2.moveWindow(text_fg, sh_width + 10, 0)
    start = time.time()
    model = Model1D(sh_width, sh_height, K, SIGMA_INIT, ALPHA, SIGMA_THRESH, T)

    end = time.time()
    print 'Model Init time {0}'.format(end - start)

    fg_img = np.zeros((sh_height, sh_width), np.uint8)

    while cap.grab():
        #Grabs the next frame from video file or capturing device
        img = cap.retrieve()[1]
        show_img = cv2.resize(img, (sh_width, sh_height))
        inp_gray_img = cv2.cvtColor(show_img, cv2.COLOR_RGB2GRAY)

        start = time.time()
        utils.update_model(model, inp_gray_img)
        end = time.time()
        model_update_time = end - start

        start = time.time()
        utils.extract_fg(model, inp_gray_img, fg_img)
        end = time.time()
        extract_fg_time = end - start

        print 'Update Model time {0}'.format(model_update_time)
        print 'Extract fg time {0}'.format(extract_fg_time)

        cv2.imshow(text_inp, inp_gray_img)
        cv2.imshow(text_fg, fg_img)
        cv2.waitKey(50)
    def step(self, step_dir, states, actions, returns, loss, loss_grad):
        '''
        Returns the step size taken
        '''
        params = utils.flat_params(self.actor)
        shs = 0.5 * (step_dir * self.fisher_vector_product(states, step_dir)
                     ).sum(0, keepdim=True)
        step_size = 1 / torch.sqrt(shs / self.max_kl)[0]
        full_step = step_size * step_dir

        # ----------------------------
        # step 5: do backtracking line search for n times
        # Create a copy of the current actor
        old_actor = copy.deepcopy(self.actor)
        expected_improve = (loss_grad * full_step).sum(0, keepdim=True)
        old_policy = old_actor.get_log_probs(states.float(), actions)

        flag = False
        fraction = 1.0
        for i in range(10):
            new_params = params + fraction * full_step
            utils.update_model(self.actor, new_params)
            new_loss = self.get_loss(returns, states, actions, old_policy)
            loss_improve = new_loss - loss
            expected_improve *= 0.5
            kl = self.actor.get_kl(states.float(), old_actor=old_actor)
            kl = kl.mean()

            #print('kl: {:.4f}  loss improve: {:.4f}  expected improve: {:.4f}  '
            #   'number of line search: {}'
            #   .format(kl.data.numpy(), loss_improve, expected_improve[0], i))

            # see https: // en.wikipedia.org / wiki / Backtracking_line_search
            if kl < self.max_kl and (loss_improve / expected_improve) > 0.5:
                flag = True
                return {'kl':kl, 'step_size':fraction*step_size.item(), 'entropy':self.actor.get_entropy(states.float())} # return statistics
                

            fraction *= 0.5

        if not flag:
            params = utils.flat_params(old_actor)
            utils.update_model(self.actor, params)
            print('policy update does not impove the surrogate')

            return {'kl': 0, 'step_size': 0, 'entropy':self.actor.get_entropy(states.float())} # return statistics
Beispiel #4
0
def candidate_detail(racedate, candidateid):
    racedate_db = PostgresqlExtDatabase('elex_%s' % racedate,
        user=os.environ.get('ELEX_ADMIN_USER', 'elex'),
        host=os.environ.get('ELEX_ADMIN_HOST', '127.0.0.1')
    )
    models.database_proxy.initialize(racedate_db)
    if request.method == 'POST':
        payload = utils.clean_payload(dict(request.form))

        try:
            oc = models.OverrideCandidate.get(models.OverrideCandidate.candidate_candidateid == candidateid)
        except models.OverrideCandidate.DoesNotExist:
            oc = models.OverrideCandidate.create(candidate_candidateid=candidateid)

        utils.update_model(oc, payload)
        utils.update_views(models.database_proxy)

        return json.dumps({"message": "success"})
Beispiel #5
0
def train_model(train_file, classifier, batch_size):

    all_classes = get_all_classes(train_file)
    col_label, col_description = get_column_position(form=1)
    with open(train_file, "r") as input_file:

        first_line = input_file.readline().strip()
        entry = literal_eval(first_line)
        vector_size = len(entry[col_description])
        input_file.seek(0)  # reset the file pointer after reading first line

        y_train = []
        X_train = np.full(shape=(batch_size, vector_size),
                          fill_value=0,
                          dtype=float)
        line_count = 0
        idx = 0
        start_time = time.time()
        for line in input_file:
            line_count += 1
            entry = literal_eval(line)
            y_train.append(entry[col_label])
            X_train[idx, :] = entry[col_description]

            if line_count % batch_size == 0:
                update_model(classifier, X_train, y_train, all_classes)
                print "training model for lines = ", line_count, 'time=', int(
                    time.time() - start_time), 's'
                print "precision score", classifier.score(X_train, y_train)
                del y_train, X_train
                y_train = []
                X_train = np.full(shape=(batch_size, vector_size),
                                  fill_value=0,
                                  dtype=float)
                idx = -1

            idx += 1

        if line_count % batch_size > 1:
            X_train = X_train[:idx, :]
            update_model(classifier, X_train, y_train, all_classes)
            print "training model for lines = ", line_count, 'time=', int(
                time.time() - start_time), 's'
            print "precision score", classifier.score(X_train, y_train)
Beispiel #6
0
def candidate_detail(racedate, candidateid, raceyear):
    racedate_db = PostgresqlExtDatabase(
        'elex_%s' % racedate,
        user=os.environ.get('ELEX_ADMIN_USER', 'elex'),
        host=os.environ.get('ELEX_ADMIN_HOST', '127.0.0.1'))
    models.database_proxy.initialize(racedate_db)
    if request.method == 'POST':
        payload = utils.clean_payload(dict(request.form))

        try:
            oc = models.OverrideCandidate.get(
                models.OverrideCandidate.candidate_unique_id == candidateid)
        except models.OverrideCandidate.DoesNotExist:
            oc = models.OverrideCandidate.create(
                candidate_unique_id=candidateid)

        utils.update_model(oc, payload)
        utils.update_views(models.database_proxy)

        return json.dumps({"message": "success"})
Beispiel #7
0
    def mainLoop():
        # while img:
        frame = process.stdout.read(frame_size)

        if frame:
            img = Image.frombytes('L', (width, height), frame, 'raw', 'L', 0,
                                  1)
            inp_gray_img = img.resize((sh_width, sh_height), Image.ANTIALIAS)

            start = time.time()
            utils.update_model(model, inp_gray_img)
            end = time.time()
            model_update_time = end - start

            start = time.time()
            utils.extract_fg(model, inp_gray_img, fg_img)
            end = time.time()
            extract_fg_time = end - start

            print 'Update Model time {0}'.format(model_update_time)
            print 'Extract fg time {0}'.format(extract_fg_time)

            if isPyPy:
                original.iteration += 1
                if original.iteration % 10 == 0:
                    fg_img.save('pypyobrazek.bmp')
            else:
                original.phorig = ImageTk.PhotoImage(inp_gray_img)
                original.ph = ImageTk.PhotoImage(fg_img)
                original.canvas.create_image(sh_width / 2,
                                             sh_height / 2,
                                             image=original.phorig)
                original.canvas.create_image(sh_width + sh_width / 2,
                                             sh_height / 2,
                                             image=original.ph)
                original.canvas.update_idletasks()

            original.after(0, mainLoop)
Beispiel #8
0
def train_model(train_file, classifier, batch_size):

    all_classes = get_all_classes(train_file)
    col_label, col_description = get_column_position(form=1)
    with open(train_file, "r") as input_file:

        first_line = input_file.readline().strip()
        entry = literal_eval(first_line)
        vector_size = len(entry[col_description])
        input_file.seek(0)  # reset the file pointer after reading first line 

        y_train = []
        X_train = np.full(shape=(batch_size, vector_size), fill_value=0, dtype=float)
        line_count = 0
        idx = 0
        start_time = time.time()
        for line in input_file:
            line_count += 1
            entry = literal_eval(line)
            y_train.append(entry[col_label])
            X_train[idx, :] = entry[col_description]

            if line_count % batch_size == 0:
                update_model(classifier, X_train, y_train, all_classes)
                print "training model for lines = ", line_count, 'time=', int(time.time() - start_time), 's'
                print "precision score", classifier.score(X_train, y_train)
                del y_train, X_train
                y_train = []
                X_train = np.full(shape=(batch_size, vector_size), fill_value=0, dtype=float)
                idx = -1

            idx += 1

        if line_count % batch_size > 1:
            X_train = X_train[:idx, :]
            update_model(classifier, X_train, y_train, all_classes)
            print "training model for lines = ", line_count, 'time=', int(time.time() - start_time), 's'
            print "precision score", classifier.score(X_train, y_train)
Beispiel #9
0
        racedate_db = PostgresqlExtDatabase('elex_%s' % racedate,
            user=os.environ.get('ELEX_ADMIN_USER', 'elex'),
            host=os.environ.get('ELEX_ADMIN_HOST', '127.0.0.1')
        )
        models.database_proxy.initialize(racedate_db)
        payload = utils.clean_payload(dict(request.form))
        try:
            r = models.OverrideRace.get(models.OverrideRace.race_raceid == raceid.split('-')[1], models.OverrideRace.race_statepostal == raceid.split('-')[0])
        except models.OverrideRace.DoesNotExist:
            r = models.OverrideRace.create(race_raceid=raceid.split('-')[1], race_statepostal=raceid.split('-')[0])

        print payload

        utils.set_winner(payload['nyt_winner'], raceid)

        utils.update_model(r, payload)
        utils.update_views(models.database_proxy)

        return json.dumps({"message": "success"})

@app.route('/elections/2016/admin/<racedate>/candidateorder/', methods=['POST'])
def candidate_order(racedate):
    racedate_db = PostgresqlExtDatabase('elex_%s' % racedate,
        user=os.environ.get('ELEX_ADMIN_USER', 'elex'),
        host=os.environ.get('ELEX_ADMIN_HOST', '127.0.0.1')
    )
    models.database_proxy.initialize(racedate_db)
    if request.method == 'POST':
        payload = utils.clean_payload(dict(request.form))

        if payload.get('candidates', None):
Beispiel #10
0
def training(sess, model, opt, train, valid, save):
    # Model data
    batch_size = opt['batch_size']
    train_epochs = opt['train_epochs']
    patience = opt['patience']
    lr = opt['lr']
    learning_decay = opt['learning_decay']
    keep_prob = opt['keep_prob']
    use_log = opt['log']
    path_log = opt['path_log']
    augmentation = opt['augmentation']

    if augmentation:
        print 'Applying data augmentation at each epoch'
    else:
        print 'No data augmentation'

    (train_x, train_y, train_num) = train
    (valid_x, valid_y, valid_num) = valid

    n_train_batches = train_num / batch_size
    n_valid_batches = valid_num / batch_size

    tr_appender = utils.factory_appender(sess=sess,
                                         use_log=use_log,
                                         log_dir=path_log,
                                         log_filename="train")
    va_appender = utils.factory_appender(sess=sess,
                                         use_log=use_log,
                                         log_dir=path_log,
                                         log_filename="valid")

    train_errors, valid_errors = numpy.zeros((train_epochs)), numpy.zeros(
        (train_epochs))
    train_costs, valid_costs = numpy.zeros((train_epochs)), numpy.zeros(
        (train_epochs))
    learning_rates = numpy.zeros((train_epochs))

    best_valid_err = 1.
    best_valid_epoch = 0
    bad_count = 0

    print "Training..."
    sess.run(tf.assign(model.lr, lr))
    for epoch in range(train_epochs):
        start = time.time()

        # shuffle the train set
        idx_perm = numpy.random.permutation(train_num)
        train_x = train_x[idx_perm]
        train_y = train_y[idx_perm]

        if augmentation:
            beg = time.time()
            tensor_x = utils.distord(train_x)
            train_x = numpy.array(sess.run([tensor_x]))[0]
            end = time.time()
            print 'data_augmentation/epoch = {:.3f} s'.format(end - beg)

        # compute train loss, err and update weights
        utils.update_model(sess=sess,
                           model=model,
                           inputs=train_x,
                           target=train_y,
                           batch_size=batch_size,
                           n_batch=n_train_batches,
                           keep_prob=keep_prob)

        # Compute training loss and err
        loss, err = utils.fwd_eval(sess=sess,
                                   model=model,
                                   inputs=train_x,
                                   target=train_y,
                                   batch_size=batch_size,
                                   n_batch=n_train_batches)
        tr_appender(train_errors, err, epoch, "error")
        tr_appender(train_costs, loss, epoch, "loss")

        # compute validation loss and err
        loss, err = utils.fwd_eval(sess=sess,
                                   model=model,
                                   inputs=valid_x,
                                   target=valid_y,
                                   batch_size=batch_size,
                                   n_batch=n_valid_batches)
        va_appender(valid_errors, err, epoch, "error")
        va_appender(valid_costs, loss, epoch, "loss")

        current_lr = sess.run(model.lr)
        tr_appender(learning_rates, current_lr, epoch, "learning_rate")

        # keep best model and reduce learning rate if necessary
        if valid_errors[epoch] <= best_valid_err:
            best_valid_err = valid_errors[epoch]
            best_valid_epoch = epoch
            # update the current best model
            save()
        else:
            print "bad_count"
            bad_count += 1
            if bad_count > patience:
                print 'Reducing the learning rate..'
                sess.run(tf.assign(model.lr, model.lr * learning_decay))
                bad_count = 0

        end = time.time()
        print "epoch. {}, train_loss = {:.4f}, valid_loss = {:.4f}," \
              "train_error = {:.4f}, valid_error = {:.4f}, time/epoch = {:.3f} s" \
            .format(epoch, train_costs[epoch], valid_costs[epoch], train_errors[epoch],
                    valid_errors[epoch], end - start)

    print 'Best errors train {:.4f}, valid {:.4f}'.format(
        train_errors[best_valid_epoch], valid_errors[best_valid_epoch])
Beispiel #11
0
def race_detail(racedate, raceid, raceyear):
    if request.method == 'GET':
        try:
            racedate_db = PostgresqlExtDatabase(
                'elex_%s' % racedate,
                user=os.environ.get('ELEX_ADMIN_USER', 'elex'),
                host=os.environ.get('ELEX_ADMIN_HOST', '127.0.0.1'))
            models.database_proxy.initialize(racedate_db)
            context = utils.build_context(racedate, raceyear)

            context['race'] = [
                r for r in models.ElexResult.raw(
                    """select officename, seatname, race_unique_id, raceid, statepostal, accept_ap_calls from elex_results where race_unique_id = '%s' group by officename, seatname, race_unique_id, raceid, statepostal, accept_ap_calls"""
                    % raceid)
            ][0]

            context['candidates'] = models.ElexResult.raw(
                """select nyt_runoff, party, nyt_winner, candidate_unique_id, first, last from elex_results where race_unique_id = '%s' group by nyt_runoff, party, nyt_winner, candidate_unique_id, first, last order by last, first DESC;"""
                % raceid)

            context['ap_winner'] = None
            ap_winner = [
                m for m in models.ElexResult.raw(
                    """select candidate_unique_id, first, last, winner, nyt_winner, nyt_called from elex_results where race_unique_id = '%s' and winner = 'true' group by candidate_unique_id, first, last, winner, nyt_winner, nyt_called order by last, first DESC;"""
                    % raceid)
            ]

            if len(ap_winner) > 0:
                context['ap_winner'] = ap_winner[0]

            context['states'] = []

            state_list = sorted(list(
                set([race.statepostal for race in models.ElexRace.select()])),
                                key=lambda x: x)

            for state in state_list:
                race = models.ElexRace.select().where(
                    models.ElexRace.statepostal == state)[0]
                state_dict = {}
                state_dict['statepostal'] = state
                state_dict['report'] = None
                state_dict['report_description'] = None
                context['states'].append(state_dict)

            return render_template('race_detail.html', **context)

        except peewee.OperationalError as e:
            context['error'] = e
            return render_template('error.html', **context)

    if request.method == 'POST':
        racedate_db = PostgresqlExtDatabase(
            'elex_%s' % racedate,
            user=os.environ.get('ELEX_ADMIN_USER', 'elex'),
            host=os.environ.get('ELEX_ADMIN_HOST', '127.0.0.1'))
        models.database_proxy.initialize(racedate_db)
        payload = utils.clean_payload(dict(request.form))
        try:
            r = models.OverrideRace.get(
                models.OverrideRace.race_unique_id == raceid)
        except models.OverrideRace.DoesNotExist:
            r = models.OverrideRace.create(race_unique_id=raceid,
                                           raceid=raceid.split('-')[1],
                                           statepostal=raceid.split('-')[0])

        # nyt_winner is a single ID, there can only be one winner.
        utils.set_winner(payload['nyt_winner'], raceid)

        print(payload)

        # nyt_runoff is a list of ids, there can be 2 or more advancing.
        runoff_cands = []
        if payload.get('nyt_runoff', None):
            runoff_cands = [
                x.strip() for x in payload['nyt_runoff'].split(',')
            ]
        utils.set_runoff(runoff_cands, raceid)

        utils.update_model(r, payload)
        utils.update_views(models.database_proxy)

        return json.dumps({"message": "success"})