コード例 #1
0
    def __init__(self, date_format = '%Y-%m-%d %H:%M:%S', *args, **kwargs ):
        super(Instrument, self).__init__()
        global SingletonInstrument
        if SingletonInstrument and not kwargs.get('serial_emulator'):
            raise ValueError("SingletonInstrument already set")

        self.name = 'instrument'
        self.uuid = os.environ["MQTT_UUID"]

        self.mqtt_org = kwargs.get('mqtt_org')
        self.mqtt_host = kwargs.get('mqtt_host')
        self.mqtt_port = kwargs.get('mqtt_port')
        self.mqtt_keep_alive = kwargs.get('mqtt_keep_alive')
        self.mqtt_qos = kwargs.get('mqtt_qos')

        self.serial_port_description = kwargs.get('serial_port_description')
        self.serial_baudrate = kwargs.get('serial_baudrate')
        self.serial_parity = kwargs.get('serial_parity')
        self.serial_stopbits = kwargs.get('serial_stopbits')
        self.serial_bytesize = kwargs.get('serial_bytesize')
        self.serial_timeout = kwargs.get('serial_timeout')

        self.storage_location = kwargs.get('storage_location')
        init_models(self.storage_location)

        # self.mqtt_publish_topic = ''
        self.mqtt_publish_topic = self._get_topic(topic_type = MQTT_TYPE_READING)
        self.mqtt_analysis_topic = self._get_topic(topic_type = MQTT_TYPE_ANALYSIS)

        self._mqtt_connected = False
        self._mqtt_clean_session = False
        self._mqtt_retain = False
        self._mqtt_messages_lost = False

        # Depending on memory size this is approx 400 MB of memory
        self._mqtt_max_queue = 100000

        self._mqtt_client = self._setup_mqtt_client()

        self._serial = None
        self._imodules = {}

        self.date_format = date_format
        self._log_format = '%(asctime)s [%(levelname)s] %(message)s'
        self._log_info = {}

        self._logger = self._setup_logger(self.name)
        self.scheduler = self._set_up_scheduler()

        self._mqtt_client.enable_logger(logger=self._logger)

        if not kwargs.get('serial_emulator'):
            self._serial = self._set_up_serial()

        self._modes = {}
        SingletonInstrument = self

        self.timezone = strftime("%z", gmtime())
        self._countdown_current = 0
        self._countdown_last = 0
コード例 #2
0
ファイル: app.py プロジェクト: linanster/estar
def create_app():
    app = Flask(__name__)
    app.config.from_pyfile('settings.py')
    init_models(app)
    init_views(app)
    init_loginmanager(app)
    init_bootstrap(app)
    return app
コード例 #3
0
ファイル: archv.py プロジェクト: npmcdn-to-unpkg-bot/midnight
def init():
    # modify the name of the database here
    m.db.init('slack-archv-test.sqlite')

    with m.db.atomic():
        m.init_models()
        # Add version info
        m.Information.create_or_get(key='__version', value='1.0.0')
コード例 #4
0
def init_app(app, test=False):
    if test:
        app.config['TESTING'] = True

    from views import simple_page
    app.register_blueprint(simple_page)

    from models import init_models
    init_models(app)
コード例 #5
0
def flaskr_client(context, *args, **kwargs):
    APP.config['DATABASE'] = {'engine': 'peewee.SqliteDatabase', 'name': ':memory:'}
    # context.db, APP.config['DATABASE'] = tempfile.mkstemp()
    APP.testing = True
    context.client = APP.test_client()

    with APP.app_context():
        DB = Database(APP)
        init_models(DB)
        # pass
    yield context.client
コード例 #6
0
ファイル: app.py プロジェクト: panda2134/Flask-Blog
def create_app():
    con = FlaskApp(__name__, specification_dir='vue-blog-api')
    # con.add_api('api.yaml', resolver=MethodViewResolver('views'), validate_responses=True)
    con.add_api('api.yaml', resolver=MethodViewResolver('views'))

    app = con.app
    app.config.from_pyfile('config.py')
    CORS(app, resources='/api/*', supports_credentials=True)
    with app.app_context():
        models.init_models(app)

    return app
コード例 #7
0
def process():
    data = load_json(PATH_STEP2_CLEAN)
    kFold = MyKFold(10, shuffle=True)
    print("MLE generating sentences using Language model.")
    for train_tweets, _ in kFold(data):
        models = init_models(3, MLE)
        train_sents = compress(train_tweets)

        # padded multiple models, returning a list of (ngram, vocab)
        ngrams = padded_multiple_models(3, train_sents)

        # train models using ngrams
        fit_multiple_models(models, ngrams)

        for i in range(3):
            print(f"\nGenerating {Ngrams(i)} sentences:")
            for j in range(10):  # generate 10 sentences for each ngram.
                new_sentence = []
                word = models[i].generate(text_seed=['<s>'])
                new_sentence.append(word)
                while word != '</s>':
                    word = models[i].generate(text_seed=[word])
                    new_sentence.append(word)
                print(f"#{j}: [{' '.join(new_sentence)}]")
        break
コード例 #8
0
    def __init__(self, args, env):
        self.args = args
        assert (self.args.sync)
        self.env = env
        self.max_steps = self.args.max_steps
        self.guides = generate_guide_grid(args.bin_divide)
        self.bmanager = BufferManager(args)  # spc buffer manager
        self.amanager = ActionSampleManager(args,
                                            self.guides)  # action sampler
        self.model, self.optim, self.epoch, self.exploration, self.num_steps = init_models(
            self.args)
        self.env.set_epoch(self.epoch)

        # set logger, by default using wandb
        self.logger = WandBLogger(
            os.path.join(args.save_path, self.args.logger_path),
            self.args.wandb)

        # import some frequently used params
        self.bsize = self.args.batch_size
        self.pstep = self.args.pred_step
        self.img_h = self.args.frame_height
        self.img_w = self.args.frame_width
        self.classes = self.args.classes  # class number of pixel semantic labels

        # define loss functions used
        self.event_loss_func = nn.CrossEntropyLoss
        self.guide_loss_func = nn.CrossEntropyLoss
        self.speed_loss_func = nn.MSELoss
        self.seg_loss_func = nn.NLLLoss
        self.depth_loss_func = nn.L1Loss()
        self.detect_loss_func = FocalLoss()
        self.coll_with_loss_func = nn.CrossEntropyLoss

        # figure out predictive task list
        self.eventloss_weights = dict()  # filed -> loss weight
        self.speedloss_weight = 0.01
        self.segloss_weight = 1.0
        # if self.args.use_detection: self.eventloss_weights['detection'] = 1.0
        # if self.args.use_colls_with: self.eventloss_weights['colls_with'] = 1.0
        if self.args.use_collision: self.eventloss_weights['coll'] = 1.0
        # if self.args.use_collision_other: self.eventloss_weights['coll_other'] = 0.5
        if self.args.use_offroad: self.eventloss_weights['offroad'] = 1.0
        if self.args.use_offlane: self.eventloss_weights['offlane'] = 0.2

        self.timer = None
        self.last_episode_step = 0
コード例 #9
0
def process():
    # 1. First load the data into memory
    data = load_json(PATH_STEP2_CLEAN)

    # 2. Making 10-Fold Cross Validation
    kFold = MyKFold(10, False)
    print("Starting 10-Fold CV training/test")
    means = np.zeros((10, 3))
    for idx, (train_tweets, test_tweets) in enumerate(kFold(data)):
        print(f"Fold {idx}:")
        models = init_models(3, KneserNeyInterpolated)
        train_sents = compress(train_tweets)
        test_sents = compress(test_tweets)

        # padded multiple models, returning a list of (ngram, vocab)
        ngrams = padded_multiple_models(3, train_sents)

        # train models using ngrams
        fit_multiple_models(models, ngrams)

        test_ngrams = padded_multiple_models(3, test_sents)
        for n in range(0, 3):
            temp = [(models[n]).perplexity(i)
                    for i in tqdm((test_ngrams[n])[0],
                                  desc=f"perplexity of {Ngrams(n)}",
                                  total=len(test_sents))]
            means[idx, n] = np.mean(temp)
        print(
            f"run {idx}, unigram: {means[idx, 0]}, bigram: {means[idx, 1]}, trigram: {means[idx, 2]}"
        )

    final_means = np.mean(means, axis=0)
    print("Final mean of 10-Fold CV:")
    print(
        f"unigram: {final_means[0]}, bigram: {final_means[1]}, trigram: {final_means[2]}"
    )
コード例 #10
0
ファイル: tests.py プロジェクト: hitbox/LibraryWebsite
 def setUp(self):
     models.Base.metadata.create_all(bind=library.db.engine)
     self.app = library.app.test_client()
     models.init_models(library.db.session)
     self.session = library.db.session
コード例 #11
0
 def setUp(self):
     models.Base.metadata.create_all(bind=library.db.engine)
     self.app = library.app.test_client()
     models.init_models(library.db.session)
     models.init_test_models(library.db.session)
     self.session = library.db.session
コード例 #12
0
ファイル: manage.py プロジェクト: Behnam-RK/Cinema-Schedule
                white_medium_star='\U00002B50',
                heavy_minus_sign='\U00002796',
                cinema='\U0001F3A6',
                top_hat='\U0001F3A9',
                ticket='\U0001F3AB',
                performing_arts='\U0001F3AD',
                speech_balloon='\U0001F4AC',
                heavy_dollar_sign='\U0001F4B2',
                money_bag='\U0001F4B0',
                floppy_disk='\U0001F4BE',
                calendar='\U0001F4C5',
                tear_off_calendar='\U0001F4C6',
                pushpin='\U0001F4CC',
                telephone_receiver='\U0001F4DE',
                memo='\U0001F4DD',
                thought_balloon='\U0001F4AD',
                bicyclist='\U0001F6B4',
                taxi='\U0001F695',
                oncoming_taxi='\U0001F696'))

if __name__ == '__main__':
    session = init_models(config.db_engine_url)

    token = sys.argv[1]
    bot = Bot(token, session, data)
    bot.run()
    print('Listening ...')

    while True:
        time.sleep(10)
コード例 #13
0
ファイル: train.py プロジェクト: dedbox/TOAD-GAN
def train(real, opt):
    """ Wrapper function for training. Calculates necessary scales then calls train_single_scale on each. """
    generators = []
    noise_maps = []
    noise_amplitudes = []

    if opt.game == 'mario':
        token_group = MARIO_TOKEN_GROUPS
    else:  # if opt.game == 'mariokart':
        token_group = MARIOKART_TOKEN_GROUPS

    scales = [[x, x] for x in opt.scales]
    opt.num_scales = len(scales)

    if opt.game == 'mario':
        scaled_list = special_mario_downsampling(opt.num_scales, scales, real,
                                                 opt.token_list)
    else:  # if opt.game == 'mariokart':
        scaled_list = special_mariokart_downsampling(opt.num_scales, scales,
                                                     real, opt.token_list)

    reals = [*scaled_list, real]

    # If (experimental) token grouping feature is used:
    if opt.token_insert >= 0:
        reals = [(token_to_group(r, opt.token_list, token_group)
                  if i < opt.token_insert else r) for i, r in enumerate(reals)]
        reals.insert(
            opt.token_insert,
            token_to_group(reals[opt.token_insert], opt.token_list,
                           token_group))
    input_from_prev_scale = torch.zeros_like(reals[0])

    stop_scale = len(reals)
    opt.stop_scale = stop_scale

    # Log the original input level as an image
    img = opt.ImgGen.render(one_hot_to_ascii_level(real, opt.token_list))
    wandb.log({"real": wandb.Image(img)}, commit=False)
    os.makedirs("%s/state_dicts" % (opt.out_), exist_ok=True)

    # Training Loop
    divergences = []
    for current_scale in range(0, stop_scale):
        opt.outf = "%s/%d" % (opt.out_, current_scale)
        try:
            os.makedirs(opt.outf)
        except OSError:
            pass

        # If we are seeding, we need to adjust the number of channels
        if current_scale < (opt.token_insert + 1):  # (stop_scale - 1):
            opt.nc_current = len(token_group)

        # Initialize models
        D, G = init_models(opt)
        # If we are seeding, the weights after the seed need to be adjusted
        if current_scale == (opt.token_insert + 1):  # (stop_scale - 1):
            D, G = restore_weights(D, G, current_scale, opt)

        # Actually train the current scale
        z_opt, input_from_prev_scale, G, divs = train_single_scale(
            D, G, reals, generators, noise_maps, input_from_prev_scale,
            noise_amplitudes, opt)

        # Reset grads and save current scale
        G = reset_grads(G, False)
        G.eval()
        D = reset_grads(D, False)
        D.eval()

        generators.append(G)
        noise_maps.append(z_opt)
        noise_amplitudes.append(opt.noise_amp)
        divergences.append(divs)

        torch.save(noise_maps, "%s/noise_maps.pth" % (opt.out_))
        torch.save(generators, "%s/generators.pth" % (opt.out_))
        torch.save(reals, "%s/reals.pth" % (opt.out_))
        torch.save(noise_amplitudes, "%s/noise_amplitudes.pth" % (opt.out_))
        torch.save(opt.num_layer, "%s/num_layer.pth" % (opt.out_))
        torch.save(opt.token_list, "%s/token_list.pth" % (opt.out_))
        wandb.save("%s/*.pth" % opt.out_)

        torch.save(G.state_dict(),
                   "%s/state_dicts/G_%d.pth" % (opt.out_, current_scale))
        wandb.save("%s/state_dicts/*.pth" % opt.out_)

        del D, G

    torch.save(torch.tensor(divergences), "%s/divergences.pth" % opt.out_)

    return generators, noise_maps, reals, noise_amplitudes
コード例 #14
0
def train_policy(args, env, max_steps=40000000):
    guides = generate_guide_grid(args.bin_divide)
    train_net, net, optimizer, epoch, exploration, num_steps = init_models(args)

    buffer_manager = BufferManager(args)
    action_manager = ActionSampleManager(args, guides)
    action_var = Variable(torch.from_numpy(np.array([-1.0, 0.0])).repeat(1, args.frame_history_len - 1, 1), requires_grad=False).float()

    # prepare video recording
    if args.recording:
        video_folder = os.path.join(args.video_folder, "%d" % num_steps)
        os.makedirs(video_folder, exist_ok=True)
        if args.sync:
            video = cv2.VideoWriter(os.path.join(video_folder, 'video.avi'),
                                    cv2.VideoWriter_fourcc(*'MJPG'),
                                    24.0, (args.frame_width, args.frame_height), True)
        else:
            video = None
            signal = mp.Value('i', 1)
            p = mp.Process(target=record_screen,
                           args=(signal,
                                 os.path.join(video_folder, 'video.avi'),
                                 1280, 800, 24))
            p.start()

    # initialize environment
    obs, info = env.reset()
    if args.recording:
        log_frame(obs, buffer_manager.prev_act, video_folder, video)

    num_episode = 1
    print('Start training...')

    for step in range(num_steps, max_steps):
        obs_var = buffer_manager.store_frame(obs, info)
        action, guide_action = action_manager.sample_action(net=net,
                                                            obs=obs,
                                                            obs_var=obs_var,
                                                            action_var=action_var,
                                                            exploration=exploration,
                                                            step=step,
                                                            explore=num_episode % 2)
        obs, reward, done, info = env.step(action)
        print("action [{0:.2f}, {1:.2f}]".format(action[0], action[1]) + " " +
              "collision {}".format(str(bool(info['collision']))) + " " +
              "off-road {}".format(str(bool(info['offroad']))) + " " +
              "speed {0:.2f}".format(info['speed']) + " " +
              "reward {0:.2f}".format(reward) + " " +
              "explore {0:.2f}".format(exploration.value(step))
              )

        action_var = buffer_manager.store_effect(guide_action=guide_action,
                                                 action=action,
                                                 reward=reward,
                                                 done=done,
                                                 collision=info['collision'],
                                                 offroad=info['offroad'])
        if args.recording:
            log_frame(obs, action, video_folder, video)

        if done:
            print('Episode {} finished'.format(num_episode))
            if not args.sync and args.recording:
                signal.value = 0
                p.join()
                del p

        # train SPN
        if buffer_manager.spc_buffer.can_sample(args.batch_size) and ((not args.sync and done) or (args.sync and step % args.learning_freq == 0)):
            # train model
            for ep in range(args.num_train_steps):
                optimizer.zero_grad()
                loss = train_model(args=args,
                                   net=train_net,
                                   spc_buffer=buffer_manager.spc_buffer)
                if args.use_guidance:
                    loss += train_guide_action(args=args,
                                               net=train_net,
                                               spc_buffer=buffer_manager.spc_buffer,
                                               guides=guides)
                print('loss = %0.4f\n' % loss.data.cpu().numpy())
                loss.backward()
                optimizer.step()
                epoch += 1
            net.load_state_dict(train_net.state_dict())

            # save model
            if epoch % args.save_freq == 0:
                print(color_text('Saving models ...', 'green'))
                torch.save(train_net.module.state_dict(),
                           os.path.join(args.save_path, 'model', 'pred_model_%09d.pt' % step))
                torch.save(optimizer.state_dict(),
                           os.path.join(args.save_path, 'optimizer', 'optimizer.pt'))
                with open(os.path.join(args.save_path, 'epoch.pkl'), 'wb') as f:
                    pkl.dump(epoch, f)
                buffer_manager.save_spc_buffer()
                print(color_text('Model saved successfully!', 'green'))

        if done:
            # reset video recording
            if args.recording:
                if args.sync:
                    video.release()
                    if sys.platform == 'linux':  # save memory
                        os.system('ffmpeg -y -i {0} {1}'.format(
                            os.path.join(video_folder, 'video.avi'),
                            os.path.join(video_folder, 'video.mp4')
                        ))
                        if os.path.exists(os.path.join(video_folder, 'video.mp4')):
                            os.remove(os.path.join(video_folder, 'video.avi'))

                    video_folder = os.path.join(args.video_folder, "%d" % step)
                    os.makedirs(video_folder, exist_ok=True)
                    video = cv2.VideoWriter(os.path.join(video_folder, 'video.avi'),
                                            cv2.VideoWriter_fourcc(*'MJPG'),
                                            24.0, (args.frame_width, args.frame_height), True)
                else:
                    video_folder = os.path.join(args.video_folder, "%d" % step)
                    os.makedirs(video_folder, exist_ok=True)

                    signal.value = 1
                    p = mp.Process(target=record_screen,
                                   args=(signal, os.path.join(video_folder, 'obs.avi'), 1280, 800, 24))
                    p.start()

            num_episode += 1
            obs, info = env.reset()
            buffer_manager.reset(step)
            action_manager.reset()
            if args.recording:
                log_frame(obs, buffer_manager.prev_act, video_folder, video)
コード例 #15
0
import os

from meme_bot import MemeBot
from models import init_models

BOT_TOKEN = os.getenv("BOT_TOKEN")
bot = MemeBot(BOT_TOKEN)


@bot.message_handler(content_types=['text', 'video', 'photo', 'document'])
def message_handler(message):
    bot.meme_handler(message)


@bot.callback_query_handler(func=lambda call: True)
def callback_handler(call):
    bot.vote_handler(call)


if __name__ == '__main__':
    print('Start', flush=True)
    init_models()
    bot.run_pooling()
    bot.run_scheduler()


コード例 #16
0

def create_app():
    app = Flask(__name__)  # Создаем экземпляр класса Flask-приложения
    app.url_map.strict_slashes = local_settings.TRAILING_SLASH  # Указываем игнорирововать слеша в конце url
    app.config.from_object(
        local_settings)  # Передаём остальные настройки в приложение
    return app


APP = create_app()  # Инициируем приложение

DB = Database(
    APP
)  # Инициируем работу с БД. Тут же создаюётся таблицы, если их нет в БД.
init_models(DB)

API = RestAPI(APP)  # Инициируем RestAPI от peewee
init_api(API)

ADMIN = init_admin(APP, DB)  # Инициируем Админку

import ui.root  # Импортируем view для главной страницы

# Api на flask_restful и роуты для API
from flask_restful import Api

api = Api(APP)

from services.product import GetProducts, AddProduct, DeleteProduct, UpdateProduct
api.add_resource(GetProducts, '/product/get')
コード例 #17
0
def evaluate_policy(args, env):
    guides = generate_guide_grid(args.bin_divide)
    args.checkpoint = args.checkpoint
    net, optimizer, epoch, exploration, num_steps = init_models(args)
    output_path = args.output_path
    for episode in range(100):
        buffer_manager = BufferManager(args)
        action_manager = ActionSampleManager(args, guides)
        action_var = torch.from_numpy(np.array([-1.0, 0.0])).repeat(
            1, args.frame_history_len - 1, 1).float()

        # initialize environment
        obs_ori, info = env.reset()
        obs = obs_ori.reshape(
            (args.frame_height, 4, args.frame_width, 4, 3)).max(3).max(1)
        info['seg'].resize(args.frame_height, args.frame_width)

        encoder = DataEncoder()

        print('Start episode...')

        for step in range(args.max_eval_step):
            obs_var = buffer_manager.store_frame(obs, info)
            action, guide_action, p = action_manager.sample_action(
                net=net,
                obs=obs,
                obs_var=obs_var,
                action_var=action_var,
                exploration=exploration,
                step=step,
                explore=False,
                testing=True)

            # in the test mode, sample_action outputs the guide_action and action
            # for future pred_step steps, while we only take those for the next frame
            # to execute and store into buffer
            output = net_infer(args, obs_var, action, net, action_var)

            bboxes, labels, scores = [], [], []
            if args.use_detection:
                loc_preds, cls_preds = output['loc_pred'][0].cpu(
                ), output['cls_pred'][0].cpu()

                for find in range(loc_preds.size(0)):
                    print(cls_preds[find].sigmoid().max())
                    frame_loc_pred = loc_preds[find]
                    frame_cls_pred = cls_preds[find]
                    pred_bboxes, pred_labels, pred_scores = encoder.decode(
                        frame_loc_pred,
                        frame_cls_pred,
                        input_size=(args.frame_width, args.frame_height))
                    bboxes.append(pred_bboxes)
                    labels.append(pred_labels)
                    scores.append(pred_scores)

            draw_prediction(step, args, output, bboxes, scores, 'outcome',
                            '{}/{}'.format(output_path, episode))

            # in the testing mode, $action and $guide_action are for multiple samples, we only pick up the first one
            guide_action = guide_action[0]
            action = action[0]

            obs_ori, reward, done, info = env.step(action)
            obs = obs_ori.reshape(
                (args.frame_height, 4, args.frame_width, 4, 3)).max(3).max(1)
            info['seg'].resize(args.frame_height, args.frame_width)
            draw_current_frame(
                args, action, obs_ori, p, None, bboxes, scores,
                os.path.join(output_path, str(episode), str(step)), step)

            print(
                "step: {0} | action [{1:.2f}, {2:.2f}] coll {3} offroad {4} offlane {5} speed {6:.2f} reward {7:.2f}"
                .format(step, action[0], action[1], info['collision'],
                        info['offroad'], info['offlane'], info['speed'],
                        reward))

            action_var = buffer_manager.store_effect(guide_action=guide_action,
                                                     action=action,
                                                     reward=reward,
                                                     done=done,
                                                     info=info)
            if done:
                break
コード例 #18
0
    'MONGODB_PORT': 27017,
    'MONGODB_DB_NAME': 'eta_app',
    'MONGODB_SEGMENT_COLLECTION': 'segments',
    'MONGODB_VEHICLE_COLLECTION': 'vehicles',
    'MONGOENGINE_DB_ALIAS': 'default',
    'JSON_ROUTE_DATA': 'ikot_route_test.geojson',
    'ZOMBIE_THRESHOLD_TIMEDELTA': 5
}

# configure timezone
os.environ['TZ'] = "Asia/Manila"

me_connection = me.register_connection(db=config['MONGODB_DB_NAME'],
                                       alias=config['MONGOENGINE_DB_ALIAS'])

models = models.init_models(config)

# current_folder = Path(os.getcwd())
# outer_folder = current_folder.parent
# update_segment_folder = Path(str(outer_folder), "metrics/data/network_computation/update_segment")

app_directory = os.getcwd()
update_segment_folder = os.path.join(
    app_directory, 'metrics/data/network_computation/update_segment')

# print(update_segment_folder)


# Updates a Segment (given seg_id) in the database from traffic data in NIMPA
# Flow:
# 1. fetch the desired Segment
コード例 #19
0
from flask import Flask, request, jsonify, abort
from models import init_app as init_models, Note, db
from datetime import datetime, timedelta

app = Flask(__name__)
init_models(app)


def note_to_json(note: Note):
    return {
        'id': note.id,
        'kind': note.kind,
        'content': note.content,
        'lat': note.lat,
        'lon': note.long,
        'altitude': note.altitude,
        'expires': note.expires,
        'color': note.colour
    }


@app.route('/notes', methods=['GET'])
def note_list():
    long = float(request.args['long'])
    lat = float(request.args['lat'])
    resolution = float(request.args.get('resolution', '0.001'))
    # TODO: this will fail opposite the prime meridian and/or at the poles
    notes = Note.query.filter(Note.long >= long - resolution,
                              Note.long <= long + resolution,
                              Note.lat >= lat - resolution,
                              Note.lat <= Note.lat + resolution).all()
コード例 #20
0
def evaluate_policy(args, env):
    guides = generate_guide_grid(args.bin_divide)
    train_net, net, optimizer, epoch, exploration, num_steps = init_models(
        args)

    buffer_manager = BufferManager(args)
    action_manager = ActionSampleManager(args, guides)
    action_var = Variable(torch.from_numpy(np.array([-1.0, 0.0])).repeat(
        1, args.frame_history_len - 1, 1),
                          requires_grad=False).float()

    # prepare video recording
    if args.recording:
        video_folder = os.path.join(args.video_folder, "%d" % num_steps)
        os.makedirs(video_folder, exist_ok=True)
        if args.sync:
            video = cv2.VideoWriter(os.path.join(video_folder, 'video.avi'),
                                    cv2.VideoWriter_fourcc(*'MJPG'), 24.0,
                                    (args.frame_width, args.frame_height),
                                    True)
        else:
            video = None
            signal = mp.Value('i', 1)
            p = mp.Process(target=record_screen,
                           args=(signal, os.path.join(video_folder,
                                                      'video.avi'), 1280, 800,
                                 24))
            p.start()

    # initialize environment
    obs, info = env.reset()
    if args.recording:
        log_frame(obs, buffer_manager.prev_act, video_folder, video)

    print('Start episode...')

    for step in range(1000):
        obs_var = buffer_manager.store_frame(obs, info)
        action, guide_action = action_manager.sample_action(
            net=net,
            obs=obs,
            obs_var=obs_var,
            action_var=action_var,
            exploration=exploration,
            step=step,
            explore=False,
            testing=True)
        draw(action, step, obs_var, net, args, action_var, 'outcome')
        cv2.imwrite(os.path.join('demo', str(step), 'obs.png'),
                    cv2.cvtColor(obs, cv2.COLOR_BGR2RGB))
        obs, reward, done, info = env.step(action[0])

        action_var = buffer_manager.store_effect(guide_action=guide_action,
                                                 action=action,
                                                 reward=reward,
                                                 done=done,
                                                 collision=info['collision'],
                                                 offroad=info['offroad'])
        if args.recording:
            log_frame(obs, action, video_folder, video)

        if done:
            print('Episode finished ...')
            if args.recording:
                if args.sync:
                    video.release()
                    if sys.platform == 'linux':  # save memory
                        os.system('ffmpeg -y -i {0} {1}'.format(
                            os.path.join(video_folder, 'video.avi'),
                            os.path.join(video_folder, 'video.mp4')))
                        if os.path.exists(
                                os.path.join(video_folder, 'video.mp4')):
                            os.remove(os.path.join(video_folder, 'video.avi'))
                else:
                    signal.value = 0
                    p.join()
                    del p
            break
コード例 #21
0
ファイル: daemon_parsers.py プロジェクト: mrcolts/hdrezkabot
import asyncio

from config import Config
from parsers import SerialsParser, UpdateSerialParser
from models import Serial, LastUpdateHash, User, init_models

if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    init_models(Config.RDB, loop)

    cours = asyncio.gather(
        # SerialsParser(Config).parse(),
        UpdateSerialParser(Config).parse()
    )

    try:
        loop.run_until_complete(cours)
    finally:
        loop.close()