def build_env(args):
    ncpu = multiprocessing.cpu_count()
    if sys.platform == 'darwin': ncpu //= 2
    nenv = args.num_env or ncpu
    config = tf.ConfigProto(allow_soft_placement=True,intra_op_parallelism_threads=0,inter_op_parallelism_threads=0)#1 or more?
    config.gpu_options.allow_growth = True
    get_session(config=config)

    env = make_vec_env(nenv, args.seed,copeoperation=True)

    return env
예제 #2
0
 def get(self):
     util.start_session(self)
     session = util.get_session(self)
     payment = 'Full Payment'        
     if 'payment' in session:
         payment = session['payment']
     self.response.out.write(pages.buy_now(payment))
예제 #3
0
    def setUp(self):
                #make the fmim
        kw_filters = [self.my_kw_filter]
        doc_filters = [self.kw_count_filter, self.has_database_filter]
        
        
        self.r = LinRelRecommender(2, 2, 
                                   1.0, 0.1, 1.0, 0.1,
                                   #the default configuration
                                   kw_filters = None, doc_filters = [self.kw_count_filter, self.has_database_filter],
                                   **fmim.__dict__)
        
        self.session = get_session()
        
        self.session.update_kw_feedback(Keyword.get("redis"), .7)
        self.session.update_kw_feedback(Keyword.get("database"), .6)
        
        self.session.update_doc_feedback(Document.get(1), .7)
        self.session.update_doc_feedback(Document.get(2), .7)
        self.session.update_doc_feedback(Document.get(8), .7)

        filtered_kws = self.r._filter_objs(kw_filters, kws = Keyword.all_kws)
        filtered_docs = self.r._filter_objs(doc_filters, docs = Document.all_docs)
        
        kw2doc_submat, kw_ind_map, kw_ind_map_r = self.r._submatrix_and_indexing(filtered_kws, filtered_docs, fmim.kw2doc_m, fmim.kw_ind, fmim.doc_ind)
        doc2kw_submat, doc_ind_map, doc_ind_map_r = self.r._submatrix_and_indexing(filtered_docs, filtered_kws, fmim.doc2kw_m, fmim.doc_ind, fmim.kw_ind)
        
        self.fmim = FeatureMatrixAndIndexMapping(kw_ind_map, doc_ind_map, kw2doc_submat, doc2kw_submat, kw_ind_map_r, doc_ind_map_r)
예제 #4
0
    def setUp(self):
        init_recommender = QueryBasedRecommender(3, 2, 
                                                 3, 2, 
                                                 **fmim.__dict__)
        main_recommender = LinRelRecommender(3, 3, 
                                             1., .5, 
                                             1., .5, 
                                             None,None,
                                             None,None,
                                             **fmim.__dict__)

        self.app = CmdApp(OnePassPropagator, OverrideUpdater, 
                          init_recommender, main_recommender)        
        
        self.session = get_session()
        
        #add recommended list
        self.session.add_doc_recom_list(Document.get_many([1,2,3]))
        self.session.add_kw_recom_list(Keyword.get_many(["a", "redis", "database"]))
        
        self.fb = {
            "docs": [[1, .5]],
            "kws": [["redis", .5]],
            "dockws": [["redis", 1, .5]]
        }
        
        random.seed(123456)
예제 #5
0
 def setUp(self):
     self.session = get_session()
     FilterRepository.init(
         session=self.session,
         kw_fb_threshold=.29,
         doc_fb_threshold=.37,
     )
def insert_coin_route(name):
    with get_session() as session:
        inserted = insert_coin(session, name)

    if not inserted:
        return response(status=400, data={"msg": "Failed to insert coin."})

    return response(status=200, data=inserted)
def buy_product_route(name):
    with get_session() as session:
        bought, data = buy_product(session, name)

    if bought is True:
        return response(status=200, data=data)
    else:
        return response(status=400, data=data)
예제 #8
0
 def load(self):
     """
     Searches the database and populates the Streamer instance with the
     quotes that it needs to retrieve from YQL.
     """
     with get_session() as session:
         stocks_in_database = session.query(Stock).all()
     for stock in stocks_in_database:
         self.add( str(stock.symbol) )
예제 #9
0
 def __init__(self):
     # 初始化信息
     self.session = get_session()
     self.sku_id = global_config.getRaw('config', 'sku_id')
     self.seckill_init_info = dict()
     self.seckill_url = dict()
     self.seckill_order_data = dict()
     self.timers = Timer()
     self.default_user_agent = global_config.getRaw('config', 'DEFAULT_USER_AGENT')
예제 #10
0
 def find_database(self, symbol):
     """
     Searches the database for the symbol.
     
     @symbol a string of a stock's ticker symbol.
     @return the object if it exists, None otherwise.
     """
     with get_session() as session:
         stock = session.query(Stock).filter(Stock.symbol == symbol).first()
     return stock
예제 #11
0
    def setUp(self):
        self.r = LinRelRecommender(2, 2, 1., .1, 1., .1, None, None,
                                   **fmim.__dict__)

        self.session = get_session()

        #giving the feedbacks
        self.session.update_kw_feedback(Keyword.get("redis"), .7)
        self.session.update_kw_feedback(Keyword.get("database"), .6)

        self.session.update_doc_feedback(Document.get(1), .7)
        self.session.update_doc_feedback(Document.get(2), .7)
        self.session.update_doc_feedback(Document.get(8), .7)
예제 #12
0
 def update(self):
     while self.running:
         if len(self.stocks) > 0:
             # Retrieve a response object from Yahoo's YQL service.
             try:
                 stocks_json_request = requests.get(STREAM_URL.format(repr(self.stocks_csv)))
             except requests.ConnectionError as e:
                 print('Cannot connect to the service. Please check your connection.')
                 print('Trying again in {} seconds.'.format(UPDATE_INTERVAL))
             except requests.HTTPError as e:
                 print(e)
             except:
                 print('Could not retrieve the json from the specified URL.')
             
             # Retrieve the json data from the response.
             try:
                 stocks_json_data = stocks_json_request.json()
             except ValueError as e:
                 print(e)
                 print('Something bad occurred when trying to update the stocks '
                     'from Yahoo\'s YQL service.')
                     
             # Retrieve a list of each stock's data, which are stored as dictionaries.
             stocks_data_list = stocks_json_data['query']['results']['quote']
             if type(stocks_data_list) is dict:  # Occurs when only one stock entry.
                 stocks_data_list = [stocks_data_list]
             
             # Update each stock's data.
             with get_session() as session:
                 for stock_data in stocks_data_list:
                     stock = session.query(Stock).filter(Stock.symbol == stock_data['Symbol']).first()
                     stock.last_trade_price = stock_data['LastTradePriceOnly']
                     session.add(stock)
                     print('\nSymbol: ' + stock.symbol)
                     print('Price: ' + stock.last_trade_price)
                     
                 # Commit the changes.
                 session.commit()
                 
                 print('- - - - - - - - - - - - - - -')
                 
                 # Warn the user if number of stocks in the database mismatch with
                 # the number of stocks the application is attempting to update.
                 num_stocks = session.query(Stock).count()
                 if num_stocks != len(self.stocks):
                     print('Warning: the number of stocks in the database do not match '
                         'the number of stocks this program is updating.')
                     print('Some data may not be refreshing and will be inaccurate.')
             
         sleep(UPDATE_INTERVAL)
예제 #13
0
 def __init__(self):
     # 初始化信息
     self.session = get_session()
     self.sku_id = global_config.getRaw('config', 'sku_id')
     self.seckill_init_info = dict()
     self.seckill_url = dict()
     self.seckill_order_data = dict()
     self.timers = Timer()
     self.default_user_agent = global_config.getRaw(
         'config', 'DEFAULT_USER_AGENT')
     self.is_login = False
     self.time_diff = 0.1
     self.nick_name = ''
     print("京东时间:%s\n本地时间:%s" %
           (datetime.fromtimestamp(self.getJdTime()), datetime.now()))
예제 #14
0
    def setUp(self):
        self.r = LinRelRecommender(2, 2, 
                                   1., .1, 1., .1,
                                   None, None,
                                   **fmim.__dict__)
        
        
        
        self.session = get_session()

        #giving the feedbacks
        self.session.update_kw_feedback(Keyword.get("redis"), .7)
        self.session.update_kw_feedback(Keyword.get("database"), .6)
        
        self.session.update_doc_feedback(Document.get(1), .7)
        self.session.update_doc_feedback(Document.get(2), .7)
        self.session.update_doc_feedback(Document.get(8), .7)
예제 #15
0
 def get(self):
     util.start_session(self)
     session = util.get_session(self)
     if ('key' in session):
         form = datastore.Get(session['key'])
         if 'email' in session:
             self.response.out.write(session['email'])
             mailed = free_email.blue_email(settings.from_email, session['email'] + ", " + settings.to_email, settings.thank_you_subject, settings.thank_you_body)
             if mailed:
                 form['email_sent'] = 'maybe'
             else:
                 form['email_sent'] = 'no'        
         else:
             form['email_sent'] = 'no'
             self.response.out.write("Thank you")
         datastore.Put(form)        
     self.redirect(settings.thank_you_url)
예제 #16
0
def setup():
    """
    Destroy the whole damn database before running tests! WOO!

    We would do this afterwards, but sometimes you might need to do a
    post-mortem on the state of the database.

    """
    with get_session() as s:
        colls = s.db.collection_names()
        for coll in colls:
            if coll == 'system.indexes':
                continue
            s.db[coll].drop_indexes()
            s.db[coll].drop()

        s.db.command({'dropDatabase':1})
def possibly_req_auth(username, password):
    global usr, pas, logged_in, localCookie
    if logged_in:
        return
    if not usr:
        usr = username or input('Username: '******'Password: '******'username': usr,'password': pas}
        ).encode('ascii')
    print('open', FIMFICTION + '/ajax/login.php', login_data)
    ret = get_session().post(FIMFICTION + '/ajax/login.php', data={'username': usr, 'password': pas})
    print(type(ret.json()))
    if 'signing_key' not in ret.json():
        fail('Login failed, check your username and password')
    logged_in = True
    return usr, pas
예제 #18
0
 def add(self, symbol):
     """
     Adds a symbol to the Streamer. add updates the Streamer's stocks
     array, stocks_csv string, and puts the symbol into the database.
     
     @symbol a string of a stock's ticker symbol.
     """
     if self.is_valid_symbol(symbol):
         symbol = symbol.upper()
         in_local = self.find_local(symbol) is not None
         in_database = self.find_database(symbol) is not None
         if not in_local:
             self.stocks.append(symbol)
             self.stocks_csv = '","'.join(self.stocks)
         if not in_database:
             new_stock = Stock(symbol)
             with get_session() as session:
                 session.add(new_stock)
                 session.commit()
     else:
         print('Symbol must be a string.')
예제 #19
0
    def setUp(self):
        #make the fmim
        kw_filters = [self.my_kw_filter]
        doc_filters = [self.kw_count_filter, self.has_database_filter]

        self.r = LinRelRecommender(
            2,
            2,
            1.0,
            0.1,
            1.0,
            0.1,
            #the default configuration
            kw_filters=None,
            doc_filters=[self.kw_count_filter, self.has_database_filter],
            **fmim.__dict__)

        self.session = get_session()

        self.session.update_kw_feedback(Keyword.get("redis"), .7)
        self.session.update_kw_feedback(Keyword.get("database"), .6)

        self.session.update_doc_feedback(Document.get(1), .7)
        self.session.update_doc_feedback(Document.get(2), .7)
        self.session.update_doc_feedback(Document.get(8), .7)

        filtered_kws = self.r._filter_objs(kw_filters, kws=Keyword.all_kws)
        filtered_docs = self.r._filter_objs(doc_filters,
                                            docs=Document.all_docs)

        kw2doc_submat, kw_ind_map, kw_ind_map_r = self.r._submatrix_and_indexing(
            filtered_kws, filtered_docs, fmim.kw2doc_m, fmim.kw_ind,
            fmim.doc_ind)
        doc2kw_submat, doc_ind_map, doc_ind_map_r = self.r._submatrix_and_indexing(
            filtered_docs, filtered_kws, fmim.doc2kw_m, fmim.doc_ind,
            fmim.kw_ind)

        self.fmim = FeatureMatrixAndIndexMapping(kw_ind_map, doc_ind_map,
                                                 kw2doc_submat, doc2kw_submat,
                                                 kw_ind_map_r, doc_ind_map_r)
예제 #20
0
    def setUp(self):
        init_recommender = QueryBasedRecommender(3, 2, 3, 2, **fmim.__dict__)
        main_recommender = LinRelRecommender(3, 3, 1., .5, 1., .5, None, None,
                                             None, None, **fmim.__dict__)

        self.app = CmdApp(OnePassPropagator, OverrideUpdater, init_recommender,
                          main_recommender)

        self.session = get_session()

        #add recommended list
        self.session.add_doc_recom_list(Document.get_many([1, 2, 3]))
        self.session.add_kw_recom_list(
            Keyword.get_many(["a", "redis", "database"]))

        self.fb = {
            "docs": [[1, .5]],
            "kws": [["redis", .5]],
            "dockws": [["redis", 1, .5]]
        }

        random.seed(123456)
예제 #21
0
 def remove(self, symbol):
     """
     Removes a symbol to the Streamer. remove updates the Streamer's stocks
     array, stocks_csv string, and deletes the symbol from the database.
     
     @symbol a string of a stock's ticker symbol.
     """
     if self.is_valid_symbol(symbol):
         symbol = symbol.upper()
         in_local = self.find_local(symbol) is not None
         in_database = self.find_database(symbol) is not None
         if in_local and in_database:
             self.stocks.remove(symbol)
             self.stocks_csv = '","'.join(self.stocks)
             with get_session() as session:
                 stock = session.query(Stock).filter(Stock.symbol == symbol).first()
                 session.delete(stock)
                 session.commit()
         elif not in_local:
             print('Symbol {} is not in the local stocks array.'.format(symbol))
         else:
             print('Symbol {} is not in the database.'.format(symbol))
     else:
         print('Symbol must be a string.') 
예제 #22
0
from keras.optimizers import SGD, adam, RMSprop
from keras.datasets import mnist, cifar10 
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.preprocessing.image import ImageDataGenerator
import keras.backend as K
import matplotlib.pyplot as plt

from model import mnist_model, cifar_model
from util import preprocess_image_array, preprocess_label_array
from util import find_best_lr, plot_training_curves, get_session 

from clr_callback import CyclicLR

# allow GPU growth
K.tensorflow_backend.set_session(get_session())

# command line
parser = argparse.ArgumentParser()

# dataset and model parameters
parser.add_argument('--dataset', default='mnist', help='one of mnist, cifar10')
parser.add_argument('--kernel_initializer', default='he_normal',
        help='kernel initializer, only modifies cifar10 model')
parser.add_argument('--activation', default='relu',
        help='activation function, only modifies cifar10 model')
parser.add_argument('--weight_decay', default=0.0005, type=float,
        help='weight decay? (only applied to cifar model)')

# training and learning rate parameters
parser.add_argument('--optimizer', default='sgd',
예제 #23
0
# -*- coding: utf-8 -*-
# 程序测试部分

import requests
import setting
import util
import downloads
import choose_course

# 测试获取cookie
url = "http://bkxk.szu.edu.cn/xsxkapp/sys/xsxkapp/*default/index.do"
session = util.get_session()
data = {
    "timeTemp":util.get_timestamp()
}

# 查询志愿
# response = session.post(util.get_url("xsxkapp/sys/xsxkapp/publicinfo/volunteer.do"),headers=setting.headers,data = data)
# print(response.headers)
# print(response.text)
#print(downloads.recommended_course(0))
# downloads.downloads()
예제 #24
0
 def setUp(self):
     self.session = get_session()
     FilterRepository.init(session = self.session, 
                           kw_fb_threshold = .29, 
                           doc_fb_threshold = .37,
     )
def delete_all_coins_route():
    with get_session() as session:
        delete_all_coins(session)

    return response(status=200, data={})
def get_coins_route():
    with get_session() as session:
        data = {"change": get_coin_amount(session)}

    return response(status=200, data=data)
def learn(env,
          total_timesteps,
          seed=None,
          nsteps=1024,
          ent_coef=0.01,
          lr=0.01,
          vf_coef=0.5,
          p_coef=1.0,
          max_grad_norm=None,
          gamma=0.99,
          lam=0.95,
          nminibatches=15,
          noptepochs=4,
          cliprange=0.2,
          save_interval=100,
          copeoperation=False,
          human_ent_coef=0.01,
          human_vf_coef=0.5,
          human_p_coef=1.0):

    set_global_seeds(seed)
    sess = get_session()
    global_summary = tf.summary.FileWriter(
        'summaries/' + 'feeding' +
        datetime.datetime.now().strftime('%d-%m-%y%H%M'), sess.graph)

    if isinstance(lr, float): lr = constfn(lr)
    else: assert callable(lr)
    if isinstance(cliprange, float): cliprange = constfn(cliprange)
    else: assert callable(cliprange)

    # Get the nb of env
    nenvs = env.num_envs
    # Calculate the batch_size
    nbatch = nenvs * nsteps
    nbatch_train = nbatch // nminibatches
    if copeoperation == True:
        human_model = Model(env=env,
                            nbatch_act=nenvs,
                            nbatch_train=nbatch_train,
                            ent_coef=human_ent_coef,
                            vf_coef=human_vf_coef,
                            p_coef=human_p_coef,
                            max_grad_norm=max_grad_norm,
                            human=True,
                            robot=False)
        robot_model = Model(env=env,
                            nbatch_act=nenvs,
                            nbatch_train=nbatch_train,
                            ent_coef=ent_coef,
                            vf_coef=vf_coef,
                            p_coef=p_coef,
                            max_grad_norm=max_grad_norm,
                            human=False,
                            robot=True)

    if copeoperation == False:
        model = Model(env=env,
                      nbatch_act=nenvs,
                      nbatch_train=nbatch_train,
                      ent_coef=ent_coef,
                      vf_coef=vf_coef,
                      p_coef=p_coef,
                      max_grad_norm=max_grad_norm)
    initialize()

    # Instantiate the runner object
    if copeoperation == True:
        runner = Runner(env=env,
                        model=None,
                        nsteps=nsteps,
                        gamma=gamma,
                        lam=lam,
                        human_model=human_model,
                        robot_model=robot_model)
    if copeoperation == False:
        runner = Runner(env=env,
                        model=model,
                        nsteps=nsteps,
                        gamma=gamma,
                        lam=lam)

    epinfobuf = deque(maxlen=10)  #recent 10 episode
    pbar = tqdm(total=total_timesteps, dynamic_ncols=True)

    tfirststart = time.perf_counter()

    nupdates = total_timesteps // nbatch
    for update in range(1, nupdates + 1):
        assert nbatch % nminibatches == 0
        # Start timer
        frac = 1.0 - (update - 1.0) / nupdates
        # Calculate the learning rate
        lrnow = lr(frac)
        # Calculate the cliprange
        cliprangenow = cliprange(frac)

        # Get minibatch
        if copeoperation == False:
            obs, returns, masks, actions, values, neglogpacs, epinfos = runner.run(
            )
        if copeoperation == True:
            obs, human_returns, robot_returns, masks, human_actions, robot_actions, human_values, robot_values, human_neglogpacs, robot_neglogpacs, epinfos = runner.coop_run(
            )
        epinfobuf.extend(epinfos)
        mblossvals = []
        human_mblossvals = []
        robot_mblossvals = []
        inds = np.arange(nbatch)
        for _ in range(noptepochs):
            # Randomize the indexes
            np.random.shuffle(inds)
            for start in range(0, nbatch, nbatch_train):
                end = start + nbatch_train
                mbinds = inds[start:end]
                if copeoperation == True:
                    human_slices = (arr[mbinds]
                                    for arr in (obs[:, 24:], human_returns,
                                                human_actions, human_values,
                                                human_neglogpacs))
                    robot_slices = (arr[mbinds]
                                    for arr in (obs[:, :24], robot_returns,
                                                robot_actions, robot_values,
                                                robot_neglogpacs))
                    human_mblossvals.append(
                        human_model.train(lrnow, cliprangenow, *human_slices))
                    robot_mblossvals.append(
                        robot_model.train(lrnow, cliprangenow, *robot_slices))
                if copeoperation == False:
                    slices = (arr[mbinds] for arr in (obs, returns, actions,
                                                      values, neglogpacs))
                    mblossvals.append(model.train(lrnow, cliprangenow,
                                                  *slices))  #None
        # Feedforward --> get losses --> update
        if copeoperation == True:
            human_lossvals = np.mean(human_mblossvals, axis=0)
            robot_lossvals = np.mean(robot_mblossvals, axis=0)
        if copeoperation == False:
            lossvals = np.mean(mblossvals, axis=0)
        summary = tf.Summary()
        if copeoperation == True:
            human_ev = explained_variance(human_values, human_returns)
            robot_ev = explained_variance(robot_values, robot_returns)
        if copeoperation == False:
            ev = explained_variance(values, returns)
        performance_r = np.mean([epinfo['r'] for epinfo in epinfobuf])
        performance_len = np.mean([epinfo['l'] for epinfo in epinfobuf])
        success_time = np.mean(
            [epinfo['success_time'] for epinfo in epinfobuf])
        fall_time = np.mean([epinfo['fall_time'] for epinfo in epinfobuf])
        summary.value.add(tag='Perf/Reward', simple_value=performance_r)
        summary.value.add(tag='Perf/episode_len', simple_value=performance_len)
        summary.value.add(tag='Perf/success_time', simple_value=success_time)
        summary.value.add(tag='Perf/fall_time', simple_value=fall_time)
        if copeoperation == True:
            summary.value.add(tag='Perf/human_explained_variance',
                              simple_value=float(human_ev))
            summary.value.add(tag='Perf/robot_explained_variance',
                              simple_value=float(robot_ev))
        if copeoperation == False:
            summary.value.add(tag='Perf/explained_variance',
                              simple_value=float(ev))
        if copeoperation == True:
            for (human_lossval, human_lossname) in zip(human_lossvals,
                                                       human_model.loss_names):
                if human_lossname == 'grad_norm':
                    summary.value.add(tag='grad/' + human_lossname,
                                      simple_value=human_lossval)
                else:
                    summary.value.add(tag='human_loss/' + human_lossname,
                                      simple_value=human_lossval)
            for (robot_lossval, robot_lossname) in zip(robot_lossvals,
                                                       robot_model.loss_names):
                if robot_lossname == 'grad_norm':
                    summary.value.add(tag='grad/' + robot_lossname,
                                      simple_value=robot_lossval)
                else:
                    summary.value.add(tag='robot_loss/' + robot_lossname,
                                      simple_value=robot_lossval)
        if copeoperation == False:
            for (lossval, lossname) in zip(lossvals, model.loss_names):
                if lossname == 'grad_norm':
                    summary.value.add(tag='grad/' + lossname,
                                      simple_value=lossval)
                else:
                    summary.value.add(tag='loss/' + lossname,
                                      simple_value=lossval)

        global_summary.add_summary(summary, int(update * nbatch))
        global_summary.flush()
        print('finish one update')
        if update % 10 == 0:
            msg = 'step: {},episode reward: {},episode len: {},success_time: {},fall_time: {}'
            pbar.update(update * nbatch)
            pbar.set_description(
                msg.format(update * nbatch, performance_r, performance_len,
                           success_time, fall_time))

        if update % save_interval == 0:
            tnow = time.perf_counter()
            print('consume time', tnow - tfirststart)
            if copeoperation == True:
                savepath = osp.join("my_model_cop/", '%.5i' % update)
            if copeoperation == False:
                savepath = osp.join("my_model/", '%.5i' % update)
            os.makedirs(savepath, exist_ok=True)
            savepath = osp.join(savepath, 'ppomodel')
            print('Saving to', savepath)
            save_state(savepath)
    pbar.close()

    return model
예제 #28
0
 def setUp(self):
     self.session = get_session()
     
     #add recommendation list
     self.session.add_doc_recom_list(Document.get_many([1,2]))
예제 #29
0
    def setUp(self):
        #session
        self.session = get_session()

        #add recommendation list
        self.session.add_doc_recom_list(Document.get_many([1,2]))
예제 #30
0
 def setUp(self):
     self.session = get_session()
     self.maxDiff = None
예제 #31
0
 def setUp(self):
     self.session = get_session()
    def __init__(self,
                 env,
                 nbatch_act,
                 nbatch_train,
                 ent_coef,
                 vf_coef,
                 p_coef,
                 max_grad_norm,
                 human=False,
                 robot=False):
        self.sess = sess = get_session()

        if human:
            with tf.variable_scope('human_ppo_model', reuse=tf.AUTO_REUSE):

                act_model = policy(env,
                                   nbatch_act,
                                   sess,
                                   human=True,
                                   robot=False)

                train_model = policy(env,
                                     nbatch_train,
                                     sess,
                                     human=True,
                                     robot=False)
        if robot:
            with tf.variable_scope('robot_ppo_model', reuse=tf.AUTO_REUSE):

                act_model = policy(env,
                                   nbatch_act,
                                   sess,
                                   human=False,
                                   robot=True)

                train_model = policy(env,
                                     nbatch_train,
                                     sess,
                                     human=False,
                                     robot=True)
        if human == False and robot == False:
            with tf.variable_scope('ppo_model', reuse=tf.AUTO_REUSE):
                act_model = policy(env, nbatch_act, sess)

                train_model = policy(env, nbatch_train, sess)
        if human:
            self.A = A = tf.placeholder(tf.float32, shape=(nbatch_train, 4))
        if robot:
            self.A = A = tf.placeholder(tf.float32, shape=(nbatch_train, 7))
        if human == False and robot == False:
            self.A = A = tf.placeholder(
                tf.float32, shape=(nbatch_train, ) +
                env.action_space.shape)  #action[batch,action]
        self.ADV = ADV = tf.placeholder(tf.float32, shape=[None])  #[batch]
        self.R = R = tf.placeholder(tf.float32, shape=[None])  #[batch]

        self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(
            tf.float32, [None])  # [batch,action]

        self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])  #[None]
        self.LR = LR = tf.placeholder(tf.float32, [])
        self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])

        neglogpac = train_model.pd.neglogp(
            A)  #come from comparsion between onehot A and train network output

        entropy = tf.reduce_mean(train_model.pd.entropy())  #[]

        vpred = train_model.vf  #[batch,]
        vpredclipped = OLDVPRED + tf.clip_by_value(
            train_model.vf - OLDVPRED, -CLIPRANGE, CLIPRANGE)  #[batch,]
        # Unclipped value
        vf_losses1 = tf.square(vpred - R)  #[batch,]
        # Clipped value
        vf_losses2 = tf.square(vpredclipped - R)  #[batch,]

        vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))  #[]

        # Calculate ratio (pi current policy / pi old policy)
        ratio = tf.exp(OLDNEGLOGPAC - neglogpac)  #[batch,agent]
        pg_losses = -ADV * ratio  #[batch,action]#######[500] uncompile with [500,7]

        pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE,
                                             1.0 + CLIPRANGE)  #[batch,action]

        # Final PG loss
        pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))  #[]
        approxkl = .5 * tf.reduce_mean(
            tf.square(neglogpac - OLDNEGLOGPAC))  #[]
        clipfrac = tf.reduce_mean(
            tf.to_float(tf.greater(tf.abs(ratio - 1.0),
                                   CLIPRANGE)))  #[]choose clipprange

        # Total loss
        loss = pg_loss * p_coef - entropy * ent_coef + vf_loss * vf_coef  #[]

        if human:
            params = tf.trainable_variables('human_ppo_model')
        if robot:
            params = tf.trainable_variables('robot_ppo_model')
        if human == False and robot == False:
            params = tf.trainable_variables('ppo_model')
        self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
        grads_and_var = self.trainer.compute_gradients(loss, params)  #none
        grads, var = zip(*grads_and_var)

        if max_grad_norm is not None:
            # Clip the gradients (normalize)
            grads, _grad_norm = tf.clip_by_global_norm(
                grads,
                max_grad_norm)  #grad-norm,the norm of grad befor clip,None
        grads_and_var = list(zip(grads, var))

        self.grads = grads
        self.var = var
        self._train_op = self.trainer.apply_gradients(grads_and_var)  #None
        if human:
            self.loss_names = [
                'human_policy_loss', 'human_value_loss',
                'human_policy_entropy', 'human_approxkl', 'human_clipfrac',
                'human_total_loss'
            ]
        if robot:
            self.loss_names = [
                'robot_policy_loss', 'robot_value_loss',
                'robot_policy_entropy', 'robot_approxkl', 'robot_clipfrac',
                'robot_total_loss'
            ]
        if human == False and robot == False:
            self.loss_names = [
                'policy_loss', 'value_loss', 'policy_entropy', 'approxkl',
                'clipfrac', 'total_loss'
            ]
        self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac, loss]

        self.train_model = train_model
        self.act_model = act_model
        self.step = act_model.step
        self.value = act_model.value
from methods.coin import (get_coin_amount, insert_coin, delete_all_coins)
from methods.product import (reload_stock, buy_product)

from util import response, get_session

app = Flask(__name__)
app.debug = True


base.metadata.create_all(db)


# add default stock
with app.test_request_context():
    with get_session() as session:
        reload_stock(session)


@app.route("/coin", methods=["GET"])
def get_coins_route():
    with get_session() as session:
        data = {"change": get_coin_amount(session)}

    return response(status=200, data=data)


@app.route("/coin", methods=["DELETE"])
def delete_all_coins_route():
    with get_session() as session:
        delete_all_coins(session)
예제 #34
0
 def setUp(self):
     #session
     self.session = get_session()
예제 #35
0
파일: train.py 프로젝트: ZhuFengdaaa/adda
def train(args):
    # Reads pfathes of images together with their labels
    with tf.device("/cpu:0"):
        suncg_color_image_list, suncg_depth_image_list, mp3d_color_image_list, mp3d_depth_image_list = read_labeled_image_list(
            "/home/linchao/minos/gym/suncg", "/home/linchao/minos/gym/matter")
        print(len(suncg_color_image_list), len(suncg_depth_image_list),
              len(mp3d_color_image_list), len(mp3d_depth_image_list))
        min_len = min(len(suncg_color_image_list), len(suncg_depth_image_list),
                      len(mp3d_color_image_list), len(mp3d_depth_image_list))
        suncg_color_image_list, suncg_depth_image_list, mp3d_color_image_list, mp3d_depth_image_list = suncg_color_image_list[:
                                                                                                                              min_len], suncg_depth_image_list[:
                                                                                                                                                               min_len], mp3d_color_image_list[:
                                                                                                                                                                                               min_len], mp3d_depth_image_list[:
                                                                                                                                                                                                                               min_len]

        suncg_color_images = ops.convert_to_tensor(suncg_color_image_list,
                                                   dtype=tf.string)
        suncg_depth_images = ops.convert_to_tensor(suncg_depth_image_list,
                                                   dtype=tf.string)
        mp3d_color_images = ops.convert_to_tensor(mp3d_color_image_list,
                                                  dtype=tf.string)
        mp3d_depth_images = ops.convert_to_tensor(mp3d_depth_image_list,
                                                  dtype=tf.string)
        # Makes an input queue
        input_queue = tf.train.slice_input_producer([
            suncg_color_images, suncg_depth_images, mp3d_color_images,
            mp3d_depth_images
        ],
                                                    num_epochs=args.num_epochs,
                                                    shuffle=True)

        suncg_color_image, suncg_depth_image, mp3d_color_image, mp3d_depth_image = read_images_from_disk(
            input_queue)

        # Optional Preprocessing or Data Augmentation
        # tf.image implements most of the standard image augmentation
        suncg_color_image = preprocess_color(suncg_color_image,
                                             args.input_size)
        mp3d_color_image = preprocess_color(mp3d_color_image, args.input_size)
        suncg_depth_image = preprocess_depth(suncg_depth_image,
                                             args.input_size)
        mp3d_depth_image = preprocess_depth(mp3d_depth_image, args.input_size)

        # Optional Image and Label Batching
        suncg_color_batch, suncg_depth_batch, mp3d_color_batch, mp3d_depth_batch = tf.train.batch(
            [
                suncg_color_image, suncg_depth_image, mp3d_color_image,
                mp3d_depth_image
            ],
            batch_size=args.batch_size)

    with tf.device("/gpu:0"):
        # suncg_input = tf.concat([suncg_color_batch, suncg_depth_batch], axis=3)
        # mp3d_input = tf.concat([mp3d_color_batch, mp3d_depth_batch], axis=3)
        suncg_input = suncg_color_batch
        mp3d_input = mp3d_color_batch
        source_input = tf.concat([suncg_input, suncg_input], axis=0)
        target_input = tf.concat([mp3d_input, suncg_input], axis=0)
        source_model = SourceModel(args, source_input)
        target_model = TargetModel(args, target_input)
        suncg_output, suncg_output_source = tf.split(source_model.output,
                                                     num_or_size_splits=2,
                                                     axis=0)
        mp3d_output, suncg_output_target = tf.split(target_model.output,
                                                    num_or_size_splits=2,
                                                    axis=0)
        adversary_ft = tf.concat([suncg_output, mp3d_output], 0)
        discriminator = Discriminator(args, adversary_ft)
        adversary_logits = discriminator.output
        label_ms = tf.fill([args.batch_size, 1], 1.0)
        label_mt = tf.fill([args.batch_size, 1], 0.0)
        adversary_label = tf.concat([label_ms, label_mt], 0)
        mapping_loss = tf.nn.sigmoid_cross_entropy_with_logits(
            logits=adversary_logits, labels=1 - adversary_label)
        adversary_loss = tf.nn.sigmoid_cross_entropy_with_logits(
            logits=adversary_logits, labels=adversary_label)
        suncg_output_source = tf.Print(suncg_output_source,
                                       [suncg_output_source],
                                       message="source")
        suncg_output_target = tf.Print(suncg_output_target,
                                       [suncg_output_target],
                                       message="target")
        identity_loss = tf.nn.l2_loss(suncg_output_source -
                                      suncg_output_target) * args.idt_loss

        # trainable_variables = tf.trainable_variables() # target_model, discriminator
        source_vars = list(util.collect_vars('source').values())
        target_vars = list(util.collect_vars('target').values())
        disc_vars = list(util.collect_vars('disc').values())
        target_l2_norm = tf.add_n([tf.nn.l2_loss(v)
                                   for v in target_vars]) * args.l2_norm
        disc_l2_norm = tf.add_n([tf.nn.l2_loss(v)
                                 for v in disc_vars]) * args.l2_norm
        l2_norm = target_l2_norm + disc_l2_norm
        target_grads = tf.gradients(mapping_loss + target_l2_norm +
                                    identity_loss,
                                    target_vars,
                                    name="target_grads")
        disc_grads = tf.gradients(adversary_loss + disc_l2_norm,
                                  disc_vars,
                                  name="disc_grads")
        lr_var = tf.Variable(args.lr, name='learning_rate', trainable=False)
        optimizer = tf.train.AdamOptimizer(lr_var)  # different from adda
        apply_op = optimizer.apply_gradients(zip(target_grads + disc_grads,
                                                 target_vars + disc_vars),
                                             name='apply_op')
        # apply_target_op = optimizer.apply_gradients(zip(target_grads, target_vars), name='target_apply_op')
        # apply_disc_op = optimizer.apply_gradients(zip(disc_grads, disc_vars), name='disc_apply_op')
        _extra_train_ops = []
        train_op = tf.group([apply_op] + _extra_train_ops)
        m_loss = tf.reduce_mean(mapping_loss)
        a_loss = tf.reduce_mean(adversary_loss)
        weight_norm = tf.reduce_mean(target_l2_norm) + tf.reduce_mean(
            disc_l2_norm)
        tf.summary.scalar('lr', optimizer._lr)
        tf.summary.scalar('mapping loss', m_loss)
        tf.summary.scalar('adversary loss', a_loss)
        tf.summary.scalar('weight norm', weight_norm)
        tf.summary.scalar('l2 loss', identity_loss)
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter("./tensorboard/data")

    sess = util.get_session()
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    util.load_checkpoints(
        "/home/linchao/unreal/suncg_s_a3c/checkpoint-13000767",
        "net_-1",
        "source",
        sess=sess)
    util.load_checkpoints(
        "/home/linchao/unreal/suncg_s_a3c/checkpoint-13000767",
        "net_-1",
        "target",
        sess=sess)
    # util.load_variables_redir("/home/fengda/baselines/saved_models/exp_017/checkpoint_10000.pt", 'a2c_model/pi', 'a2c_model/pi', sess=sess)
    # util.load_variables_redir("/home/fengda/baselines/saved_models/exp_017/checkpoint_10000.pt", 'a2c_model1/pi', 'a2c_model/pi', sess=sess)
    tf.train.start_queue_runners(sess)

    cnt = 0
    for epoch in range(args.num_epochs):
        for i_batch in range(int(min_len / args.batch_size)):
            _, summary, _m_loss, _a_loss, _idt_loss, _l2_norm = sess.run(
                [train_op, merged, m_loss, a_loss, identity_loss, l2_norm])
            writer.add_summary(summary, cnt)
            print("{}/{} loss: {} {} {} {}".format(epoch, i_batch, _m_loss,
                                                   _a_loss, _idt_loss,
                                                   _l2_norm))
            if cnt % args.save_iter == 0:
                # save_file = os.path.join(args.save_path, "checkpoint_{}.pt".format(cnt))
                print("save model iter {}".format(cnt))
                # util.save_variables(save_file, sess=sess)
                util.save_checkpoints(args.save_path, cnt, sess=sess)
            cnt += 1