Exemple #1
0
def getData(day=None):
    if day is None:
        today = datetime.date.today()
        time_filter = today - datetime.timedelta(days=int(env('TIMEDELTA')))
    else:
        time_filter = day

    # db config, connect to mariaDB-database
    hostname = 'localhost'
    username = env('DB_USERNAME')
    password = env('DB_PASSWORD')
    database = env('DB_DATABASE')
    port = int(env('DB_PORT'))

    # connect to db
    try:
        connection = mariadb.connect(host=hostname, user=username, password=password, database=database, port=port)
    except mariadb.Error as e:
        print(f"Error connecting to MariaDB Platform: {e}")
        sys.exit(1)

    # get data
    df = pd.read_sql_query(
        f"""SELECT * FROM {env('DB_TABLE')} WHERE eintragungsdatum = '{time_filter}' AND NOT region = 'Deutschland';""",
        connection)

    # close db connection
    connection.close()

    # return output
    return df
Exemple #2
0
 def __init__(self, envs=os.environ):
     """
     Figures out the Consul client hostname based on whether or
     not we're using a local Consul agent.
     """
     if env('CONSUL_AGENT', False, envs, fn=to_flag):
         self.host = 'localhost'
     else:
         self.host = env('CONSUL', 'consul', envs)
     self.client = pyconsul.Consul(host=self.host)
Exemple #3
0
def download():
    header = request.Head()
    header.addHeader('X-EBAY-API-CALL-NAME', 'GetCategories')
    header.addHeader('X-EBAY-API-APP-NAME', env('APP_NAME'))
    header.addHeader('X-EBAY-API-CERT-NAME', env('CERT_NAME'))
    header.addHeader('X-EBAY-API-DEV-NAME', env('EBAY_API_DEV_NAME'))
    header.addHeader('X-EBAY-API-SITEID', '0')
    header.addHeader('X-EBAY-API-COMPATIBILITY-LEVEL', '989')
    header.addHeader('Content-Type', 'text/xml')
    body = request.Body('ebayGetCategories', 'EBAY_AUTH_TOKEN').getXML()
    headers = header.getAll()
    ebayAPI = api.API('EBAY_API_URL')
    ebayAPI.requestXML(headers, body)
    ebayAPI.exportXML('GetCategories')
    return ebayAPI
Exemple #4
0
	def __init__(self):
		self.env = env()
		self.urlShort = urlShort()
		self.matches = {}
		self.r = ''
		self.matchLimit = 5 
		self.body = ''
Exemple #5
0
 def __init__(self, config):
     self.config = config
     self.env = env(self.config)
     from function_approximation import rnn_model
     np.random.seed(int(time.time()))
     os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
     self.pv_agent = rnn_model.create_model_without_distributed(self.config)
Exemple #6
0
def main():
    q_value = {}
    max_step = 1000
    model = Priority_model()
    params_dyna = DynaParamater()
    params_dyna.planning_steps = 5
    params_dyna.alpha = 0.5
    params_dyna.gamma = 0.95

    en = env()

    backups, avg_list_reward = priority_sweeping(q_value, model, en,
                                                 params_dyna, max_step)

    #  print(backups)
    #  print(avg_list_reward)
    df = pd.DataFrame({'x': range(max_step), 'y_1': avg_list_reward})
    plt.xlabel("Time Slot")
    plt.ylabel("Time Average Cost")
    plt.plot('x',
             'y_1',
             data=df,
             marker='o',
             markevery=int(max_step / 10),
             color='red',
             linewidth=1,
             label="planing")
    plt.legend()
    plt.grid()
    plt.show()
Exemple #7
0
    def __init__(self, num_stations, action_space, episode, threshold,
                 collaboration):

        # Performance Metrics
        self.success_ratios = {}
        self.team_cumulative_rewards = {}

        self.timestamp = self.get_timestamp(True)
        self.result_dir = "./performance_log/" + self.timestamp + "/graphs/"

        if not os.path.exists(self.result_dir):
            os.makedirs(self.result_dir)

        # Env, Agent, and Key Attributes
        self.env = env(num_stations, 50, threshold)
        self.init_stocks = []
        self.current_stocks = self.init_stocks
        self.num_stations = num_stations
        self.action_space = action_space
        self.q_tables = []
        self.merged_table = 0
        self.mode = "learn"
        self.collaboration = collaboration
        self.threshold = threshold

        for idx in range(num_stations):
            self.init_stocks.append(50)

        self.agent_manager = agent_manager(self.num_stations,
                                           self.action_space, self.init_stocks,
                                           self.collaboration)

        if type(episode) == int:
            self.eps = [episode]
Exemple #8
0
def main():
    en = env()
    dyna_params = DynaParamas()

    max_step = 10000

    q_value = {}

    model = TrivialModel()

    result, avg_result = dyna_q(q_value, model, en, dyna_params, max_step)

    print(avg_result)
    df = pd.DataFrame({'x': range(max_step), 'y_1': avg_result})
    plt.xlabel("Time Slot")
    plt.ylabel("Time Average Cost")
    plt.plot('x',
             'y_1',
             data=df,
             marker='o',
             markevery=int(max_step / 10),
             color='red',
             linewidth=1,
             label="planing")
    plt.legend()
    plt.grid()
    plt.show()
Exemple #9
0
    def __init__(self, usrp_source, epsilon_threshold):
        gr.sync_block.__init__(self,
                               name="RL_engine",
                               in_sig=[numpy.float32],
                               out_sig=None)
        self.usrp_source = usrp_source
        self.slot_interval = 0.5
        self.sensing_duration = 0.02
        self.time_factor = 1
        self.sum_slots = -1
        self.epsilon_threshold = epsilon_threshold
        self.average_power_dbm = 0.0
        self.samp_rate = 1000000
        self.N_samp = int((self.sensing_duration * self.samp_rate))
        self.magnitude_sq = []
        self.primary_user_status = {
            'no_result': 0,
            'not_detected': 1,
            'detected': 2,
        }  # 0: energy det. has no result, 1: PU not detected, 2: PU detected.

        #for RL
        self.RL_step_count = 0
        self.avg_reward = 0
        self.avg_window = 100
        self.round_to_change_greedy = 300
        self.env = env()
        self.RL = QLearningTable(actions=list(range(self.env.n_actions)))
        self.s = self.env.reset()
        # self.result=np.chararray((n_round,4),itemsize=10)
        #end for RL
        self.energy_det_status = self.primary_user_status['no_result']
        currenttime = self.usrp_source.get_mboard_sensor("gps_time")
        self.start_time = time.time()
        self.slot_monitor()
def game():
    global gamestate
    global result
    global action1
    Env = env.env(1,10)
    while gamestate == 1:
        while True:


            screen.blit(grass,(0,0))
            screen.blit(band,(600,0))
            #text = font.render("Red Win",10,(0,0,0))
            #screen.blit(text,(600,100))
            if Env.done == False:
                for i in range(Env.size):
                    for j in range(Env.size):
                        count = Env.state[i][j]
                        if( count % 10 == 1 ):
                            if Env.R_Soldier[0].bullet_count > 0:
                                screen.blit(redgun,(block*j,block*i))
                            else:
                                screen.blit(rednogun,(block*j,block*i))
                        if( count/10 == 1 ):
                            if Env.B_Soldier[0].bullet_count > 0:
                                screen.blit(bluegun,(block*j,block*i))
                            else:
                                screen.blit(bluenogun,(block*j,block*i))

            if (Env.gameover() == True):
                gamestate = 2
                if Env.win == 1:
                    result = 1
                elif Env.win == 0:
                    result = 0
                else:
                    result = -1
                break

            textRB = font.render("Red Bullet:" + str(Env.R_Soldier[0].bullet_count), 10, (255, 0, 0))
            screen.blit(textRB, (620, 100))
            textBB = font.render("Blue Bullet:" + str(Env.B_Soldier[0].bullet_count), 10, (0, 0, 255))
            screen.blit(textBB, (620, 150))
            distance = Env.calcdist(Env.R_Soldier[0], Env.B_Soldier[0])
            textdis = font.render("distance:" + str(distance), 10, (0, 0, 0))
            screen.blit(textdis, (620, 200))

            textRR = font.render("Red Acc:"+str(round(Shoot.f1(distance),3)),10,(255,0,0))
            screen.blit(textRR,(620,250))
            textBR = font.render("Blue Acc:"+str(round(Shoot.f2(distance),3)),10,(0,0,255))
            screen.blit(textBR,(620,300))
            if len(Env.R_Soldier) > 0 and len(Env.B_Soldier)>0:
                time.sleep(0.5)
                dis = Env.calcdist(Env.R_Soldier[0], Env.B_Soldier[0])
                action1 = Env.R_Soldier[0].act(dis,20,20)
                action2 = Env.B_Soldier[0].act(dis,21,21)
                Env.stepshoot(action1,action2)

            pygame.display.update()
Exemple #11
0
    def __init__(self, envs=os.environ):
        self.mysql_db = env('MYSQL_DATABASE', None, envs)
        self.mysql_user = env('MYSQL_USER', None, envs)
        self.mysql_password = env('MYSQL_PASSWORD', None, envs)
        self.mysql_root_password = env('MYSQL_ROOT_PASSWORD', '', envs)
        self.mysql_random_root_password = env('MYSQL_RANDOM_ROOT_PASSWORD',
                                              True, envs, to_flag)
        self.mysql_onetime_password = env('MYSQL_ONETIME_PASSWORD', False,
                                          envs, to_flag)
        self.repl_user = env('MYSQL_REPL_USER', None, envs)
        self.repl_password = env('MYSQL_REPL_PASSWORD', None, envs)
        self.datadir = env('MYSQL_DATADIR', '/var/lib/mysql', envs)
        self.pool_size = env('INNODB_BUFFER_POOL_SIZE', 0, envs, fn=int)

        # state
        self.ip = get_ip()
        self._conn = None
        self._query_buffer = OrderedDict()
def testppo(train, turn, battle, random_seed, nash_lower=20, nash_upper=20):
    a = args
    a.num_episode = train
    networkB = ppo(a)
    torch.manual_seed(10)
    np.random.seed(random_seed)
    Env = env.env(10)
    num_inputs = 4
    red = []
    blue = []
    for i_turn in range(turn):

        red_win = 0
        blue_win = 0
        running_state = ZFilter((num_inputs, ), clip=5.0)

        for j in range(battle):
            stateR, stateB, RR, RB, done, _ = Env.reset()
            while Env.done == False:
                stateR_last = stateR
                stateB_last = stateB
                stateB = running_state(stateB)
                action_mean, action_logstd, value = networkB(
                    Tensor(stateB).unsqueeze(0))
                action, logprob = networkB.select_action(
                    action_mean, action_logstd)
                action = action.data.numpy()[0]
                actionB = ou_ppo(np.argmax(action, axis=0), stateR_last,
                                 stateB_last, networkB.step)
                dis = Env.calcdist(Env.R_Soldier[0], Env.B_Soldier[0])
                actionR = Env.R_Soldier[0].act(dis)
                next_stateR, next_state, reward_R, reward, done, _ = Env.stepshoot(
                    actionR, actionB)

                stateR = next_stateR
                stateB = next_state

                if done:
                    break

            if Env.win == 1:
                red_win += 1
            elif Env.win == -1:
                blue_win += 1
            else:
                red_win += 0.5
                blue_win += 0.5

        red.append(red_win)
        blue.append(blue_win)
        print("Turn: {} ,red win: {},blue win: {}".format(
            i_turn, red_win, blue_win))
        print("-----------------")

    print("Blue Win: {},T test: {}".format(np.mean(blue),
                                           stats.ttest_rel(red, blue)))
Exemple #13
0
 def setRequesterCredentials(self, token):
     try:
         if isinstance(token, str) and self.root != None:
             for elem in self.root.iter():
                 if re.sub('{.*?}', '', elem.tag) == 'eBayAuthToken':
                     elem.text = env(token)
             self.tree.write(self.path, encoding="utf-8", method="xml")
             #print('Set Requester Credentials')
     except Exception as e:
         raise
Exemple #14
0
def default_env(*args, **kwargs):
    if not getattr(default_env, '_loaded', False):
        path = os.path.join(
            os.path.expanduser('~'),
            '.box.env',
        )
        if os.path.isfile(path):
            dotenv.read_dotenv(path)

        default_env._loaded = True

    return dotenv.env(*args, **kwargs)
Exemple #15
0
 def __init__(self):
     self.env = env()
     self.SMTP_SERVER = "smtp.gmail.com"
     self.SMTP_PORT = 587
     self.SMTP_USERNAME = self.env.sending_email
     self.SMTP_PASSWORD = self.env.email_pass
     self.EMAIL_TO = [self.env.revieving_email]
     self.EMAIL_FROM = self.env.sending_email
     self.EMAIL_SUBJECT = ""
     self.DATE_FORMAT = "%d/%m/%Y"
     self.EMAIL_SPACE = ", "
     self.DATA = ""
 def __init__(self):
     self.connection = pymysql.connect(
     host=env('mysql','host','127.0.0.1'),
     user=env('mysql','user','root'),
     password=env('mysql','password', '123456'),
     db=env('mysql','db', 'test'),
     port=int(env('mysql','port', 3306)),
     cursorclass=pymysql.cursors.DictCursor,
     write_timeout=int(env('mysql','write_timeout', 60)),
     read_timeout=int(env('mysql','read_timeout', 30))
     )
Exemple #17
0
    def on_status(self, status):
        """Called when a new status arrives"""

        data = status._json

        if data['user']['id_str'] not in self.twitter_ids:
            return True

        LOG.info(
            strftime("[%Y-%m-%d %H:%M:%S]", gmtime()) + " " +
            data['user']['screen_name'] + ' twittered.')

        for twitter_id in self.twitter_ids:
            if data['user']['id_str'] != twitter_id:
                worthPosting = False
                if env(
                        'includeReplyToUser'
                ):  # other Twitter user tweeting to your followed Twitter user
                    if data['in_reply_to_user_id_str'] == twitter_id:
                        worthPosting = True
            else:
                worthPosting = True
                # your followed Twitter users tweeting to random Twitter users
                # (relevant if you only want status updates/opt out of conversations)
                if env('includeUserReply'
                       ) is False and data['in_reply_to_user_id'] is not None:
                    worthPosting = False

            if env('includeRetweet') is False:
                if 'retweeted_status' in data:
                    worthPosting = False  # retweet

            if not worthPosting:
                continue

            text, media_type, media_url = resolve_tweet(data)
            telegram_publish(text, media_type, media_url)

        return True
Exemple #18
0
def td0(episodes):
    V = np.zeros((5, 7))
    discount = 0.9
    alpha = 0.2
    for _ in np.arange(episodes):
        startingState = (3, 0)
        grid = env.env(startingState)
        terminate = 0
        while terminate == 0:
            a = np.random.choice([1, 3, 5], p=[0.25, 0.5, 0.25])
            s = grid.state
            (r, terminate) = grid.step(a)
            V[s] = V[s] + alpha * (r + discount * V[grid.state] - V[s])
    return V
 def __init__(self):
     self.env = env()
     self.n_actions = len(self.env.actions)
     self.n_feature = len(self.env.state)
     self.learning_rate = 0.01
     self.gamma = 0.8
     self.e_greedy = 0
     self.e_greedy_increment = 0.0001
     self.memory_size = 100000
     self.batch_size = 10000
     self.replace_target_iter = 500
     self.output_graph = True
     self.double_q = True
     self.dueling = True
     pass
Exemple #20
0
def telegram_publish(text, media_type, media_url):
    for x in env('TELEGRAM_BOT_GROUPS'):
        try:
            bot.send_message(
                x,
                text,
                'Markdown',
                True  # disable_web_page_preview
            )
            if media_type == 'video' or media_type == 'gif':
                bot.send_video(x, media_url)
            elif media_type == 'photo':
                bot.send_photo(x, media_url)
        except Exception as err:
            print(err)
Exemple #21
0
def authenhandler(req):
	global username, name
	e = env.env(req, 'provider1.example.com')
	
	password = req.get_basic_auth_pw()
	username = req.user
	
	if (username and password):
		for user in structures.InternalUser.InternalUser.dbLoad(e, e.con, query={'Username': username, 'Password': password}, orderBy=('Username',)):
			# Reverify password because of possible database case-insensitivity.
			if user.values['Username'].lower() == username.lower() and user.values['Password'] == password:
				name = user.values['Name']
				break

	if (name):
		return apache.OK
	else:
		return apache.HTTP_UNAUTHORIZED
Exemple #22
0
    def load(self, envs=os.environ):
        """
        Fetches the ContainerPilot config file and asks ContainerPilot
        to render it out so that all environment variables have been
        interpolated.
        """
        self.path = env('CONTAINERPILOT', None, envs)
        log.debug("loading cp {}".format(self.path))
        try:
            cfg = subprocess.check_output(
                ['containerpilot', '-config', self.path, '-template'],
                env=envs.copy())
        except (subprocess.CalledProcessError, OSError) as ex:
            log.error('containerpilot -template returned error: %s', ex)
            raise (ex)

        config = json5.loads(cfg)
        self.config = config
Exemple #23
0
def QLearningUsingEpsilonGreedy(episodes):
    Q = np.zeros((5, 7, 8))
    discount = 0.9
    alpha = 0.2
    epsilon = 0.1
    for _ in np.arange(episodes):
        startingState = (3, 0)
        grid = env.env(startingState)
        terminate = 0
        while terminate == 0:
            s = grid.state
            if np.random.rand() > epsilon:
                a = np.argmax(Q[s[0], s[1], :])
            else:
                a = np.random.choice([1, 3, 5], p=[0.25, 0.5, 0.25])
            (r, terminate) = grid.step(a)
            Q[s[0], s[1],
              a] = Q[s[0], s[1], a] + alpha * (r + (discount * np.max(
                  Q[grid.state[0], grid.state[1], :])) - Q[s[0], s[1], a])
    return Q
Exemple #24
0
        
        tabla = fix_columns(tabla)
        tabla_h = fix_columns(tabla_h)
        tabla_wp = fix_columns(tabla_wp)
        
        tabla = tabla.astype({'response_time_avg' : 'float64'})
        tabla = tabla.astype({'response_time_first' : 'float64'})
        tabla = tabla.astype({'response_time_max' : 'float64'})
        
        self.history = tabla_h    
        self.webpath = tabla_wp
        self.tabla = tabla
        return tabla
 
if __name__ == "__main__"    : 
    env()    
    #Lista de parejas de fechas que representan un mes, empezando desde la fecha de entrada hasta la actualidad 
    lista =  fechas("2018-04-01").fechas
    
    engine = create_engine("postgresql+psycopg2://{user}:{contr}@{host}:{port}/{base}".format( user = os.environ['REDSHIFT_USER'], 
                                                                                            contr= os.environ["REDSHIFT_PASSWORD"],
                                                                                            port= os.environ["REDSHIFT_PORT"],
                                                                                            base= os.environ["REDSHIFT_DB"], 
                                                                                            host= os.environ["REDSHIFT_HOST"] ), 
                               connect_args={'options': '-csearch_path={schema}'.format( schema = os.environ["REDSHIFT_SCHEMA"] )}, echo = False)

    
    j = 0
    t = []
    h = []
    w = []
import urllib2
import argparse
import os
import sys
import logging
import time

DIRNAME = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(DIRNAME, '..', 'common'))
RESTART_SCRIPT = os.path.join(DIRNAME, "restart_remote_fcgi.sh")

import log
import env

siteconfig = os.path.join(DIRNAME, "..", "siteconfig.sh")
var = env.env(siteconfig)
SERVER_NAME = var["SERVER_NAME"]

MAX_RETRIES = 30

class RestartWorkerError(Exception):
    pass

def restart_remote_fcgi(server, username, sshport, request_url, logger, max_retries=MAX_RETRIES):
    # ssh username@server -p sshport, then executes restart_fcgi.sh on server
    # after executing restart_fcgi.sh on server, keeps requesting request_url
    # until the request succeeds (up to max_retries)
    logger.debug("Restarting FCGI workers on %s", server)
    logger.debug("max_retries = %d", max_retries)
    cmd = [RESTART_SCRIPT,
        username,
def ppo(args):
    Env = env.env(10)
    # env = gym.make(args.env_name)
    # num_inputs = env.observation_space.shape[0]
    # num_actions = env.action_space.shape[0]
    num_inputs = 4
    num_actions = 10
    # env.seed(args.seed)
    torch.manual_seed(args.seed)

    networkR = ActorCritic(num_inputs, num_actions, layer_norm=args.layer_norm)
    networkB = ActorCritic(num_inputs, num_actions, layer_norm=args.layer_norm)
    optimizerR = opt.Adam(networkR.parameters(), lr=args.lr)
    optimizerB = opt.Adam(networkB.parameters(), lr=args.lr)

    running_state = ZFilter((num_inputs, ), clip=5.0)

    # record average 1-round cumulative reward in every episode
    reward_record_R = []
    reward_record_B = []
    global_steps = 0

    lr_now = args.lr
    clip_now = args.clip

    for i_episode in range(args.num_episode):
        # 当前策略更新
        memoryR = Memory()
        memoryB = Memory()
        num_steps = 0
        reward_list_R = []
        reward_list_B = []
        len_list = []
        while num_steps < args.batch_size:
            stateR, stateB, _, _, _, _ = Env.reset()
            if args.state_norm:
                stateR = running_state(stateR)
                stateB = running_state(stateB)
            reward_sum_R = 0
            reward_sum_B = 0
            for t in range(args.max_step_per_round):
                action_mean_R, action_logstd_R, value_R = networkR(
                    Tensor(stateR).unsqueeze(0))
                action_mean_B, action_logstd_B, value_B = networkR(
                    Tensor(stateB).unsqueeze(0))
                actionR, logprobaR = networkR.select_action(
                    action_mean_R, action_logstd_R)
                actionB, logprobaB = networkB.select_action(
                    action_mean_B, action_logstd_B)
                actionR = actionR.data.numpy()[0]
                actionB = actionB.data.numpy()[0]
                actionR_real = np.argmax(actionR, axis=0)
                actionB_real = np.argmax(actionB, axis=0)
                if Env.R_Soldier[0].bullet_count == 0:
                    actionR_real = actionR_real % 5
                if Env.B_Soldier[0].bullet_count == 0:
                    actionB_real = actionB_real % 5
                logprobaR = logprobaR.data.numpy()[0]
                logprobaB = logprobaB.data.numpy()[0]
                dis = Env.calcdist(Env.R_Soldier[0], Env.B_Soldier[0])
                #actionR = Env.R_Soldier[0].act(dis)
                next_state_R, next_state_B, reward_R, reward_B, done, _ = Env.stepshoot(
                    actionR_real, actionB_real)
                reward_sum_R += reward_R
                reward_sum_B += reward_B
                if args.state_norm:
                    next_state_R = running_state(next_state_R)
                    next_state_B = running_state(next_state_B)
                mask = 0 if done else 1

                memoryR.push(stateR, value_R, actionR, logprobaR, mask,
                             next_state_R, reward_R)
                memoryB.push(stateB, value_B, actionB, logprobaB, mask,
                             next_state_B, reward_B)

                if done:
                    networkR.step += 1
                    networkB.step += 1
                    break

                stateR = next_state_R
                stateB = next_state_B

            num_steps += (t + 1)
            global_steps += (t + 1)
            reward_list_R.append(reward_sum_R)
            reward_list_B.append(reward_sum_B)
            len_list.append(t + 1)
        reward_record_R.append({
            'episode': i_episode,
            'steps': global_steps,
            'meanepreward': np.mean(reward_list_R),
            'meaneplen': np.mean(len_list)
        })

        reward_record_B.append({
            'episode': i_episode,
            'steps': global_steps,
            'meanepreward': np.mean(reward_list_B),
            'meaneplen': np.mean(len_list)
        })

        batchR = memoryR.sample()
        batchB = memoryB.sample()
        batch_size = len(memoryR)

        # Sample Parameters
        rewardsR = Tensor(batchR.reward)
        valuesR = Tensor(batchR.value)
        masksR = Tensor(batchR.mask)
        actionsR = Tensor(batchR.action)
        statesR = Tensor(batchR.state)
        oldlogprobaR = Tensor(batchR.logproba)

        rewardsB = Tensor(batchB.reward)
        valuesB = Tensor(batchB.value)
        masksB = Tensor(batchB.mask)
        actionsB = Tensor(batchB.action)
        statesB = Tensor(batchB.state)
        oldlogprobaB = Tensor(batchB.logproba)

        returnsR = Tensor(batch_size)
        deltasR = Tensor(batch_size)
        advantagesR = Tensor(batch_size)

        returnsB = Tensor(batch_size)
        deltasB = Tensor(batch_size)
        advantagesB = Tensor(batch_size)

        prev_return_R = 0
        prev_value_R = 0
        prev_advantage_R = 0

        prev_return_B = 0
        prev_value_B = 0
        prev_advantage_B = 0
        for i in reversed(range(batch_size)):
            returnsR[i] = rewardsR[i] + args.gamma * prev_return_R * masksR[i]
            deltasR[i] = rewardsR[
                i] + args.gamma * prev_value_R * masksR[i] - valuesR[i]
            advantagesR[i] = deltasR[
                i] + args.gamma * args.lamda * prev_advantage_R * masksR[i]

            returnsB[i] = rewardsB[i] + args.gamma * prev_return_B * masksB[i]
            deltasB[i] = rewardsB[
                i] + args.gamma * prev_value_B * masksB[i] - valuesB[i]
            advantagesB[i] = deltasB[
                i] + args.gamma * args.lamda * prev_advantage_B * masksB[i]

            prev_return_R = returnsR[i]
            prev_value_R = valuesR[i]
            prev_advantage_R = advantagesR[i]

            prev_return_B = returnsB[i]
            prev_value_B = valuesB[i]
            prev_advantage_B = advantagesB[i]

        if args.advantage_norm:
            advantagesR = (advantagesR -
                           advantagesR.mean()) / (advantagesR.std() + EPS)
            advantagesB = (advantagesB -
                           advantagesB.mean()) / (advantagesB.std() + EPS)

        for i_epoch in range(
                int(args.num_epoch * batch_size / args.minibatch_size)):
            # current batch sample
            minibatch_ind = np.random.choice(batch_size,
                                             args.minibatch_size,
                                             replace=False)
            minibatch_statesR = statesR[minibatch_ind]
            minibatch_actionsR = actionsR[minibatch_ind]
            minibatch_oldlogprobaR = oldlogprobaR[minibatch_ind]
            #print(minibatch_statesR,minibatch_actionsR)
            minibatch_newlogprobaR = networkR.get_logproba(
                minibatch_statesR, minibatch_actionsR)
            minibatch_advantagesR = advantagesR[minibatch_ind]
            minibatch_returnsR = returnsR[minibatch_ind]
            minibatch_newvaluesR = networkR._forward_critic(
                minibatch_statesR).flatten()

            minibatch_statesB = statesB[minibatch_ind]
            minibatch_actionsB = actionsB[minibatch_ind]
            minibatch_oldlogprobaB = oldlogprobaB[minibatch_ind]
            minibatch_newlogprobaB = networkB.get_logproba(
                minibatch_statesB, minibatch_actionsB)
            minibatch_advantagesB = advantagesB[minibatch_ind]
            minibatch_returnsB = returnsB[minibatch_ind]
            minibatch_newvaluesB = networkB._forward_critic(
                minibatch_statesB).flatten()

            ratioR = torch.exp(minibatch_newlogprobaR - minibatch_oldlogprobaR)
            ratioB = torch.exp(minibatch_newlogprobaB - minibatch_oldlogprobaB)

            surr1R = ratioR * minibatch_advantagesR
            surr2R = ratioR.clamp(1 - clip_now,
                                  1 + clip_now) * minibatch_advantagesR
            loss_surrR = -torch.mean(torch.min(surr1R, surr2R))

            surr1B = ratioB * minibatch_advantagesB
            surr2B = ratioB.clamp(1 - clip_now,
                                  1 + clip_now) * minibatch_advantagesB
            loss_surrB = -torch.mean(torch.min(surr1B, surr2B))

            # value clip in the paper,not work
            if args.lossvalue_norm:
                minibatch_return_6stdR = 6 * minibatch_returnsR.std()
                loss_valueR = torch.mean(
                    (minibatch_newvaluesR -
                     minibatch_returnsR).pow(2)) / minibatch_return_6stdR
                minibatch_return_6stdB = 6 * minibatch_returnsB.std()
                loss_valueB = torch.mean(
                    (minibatch_newvaluesB -
                     minibatch_returnsB).pow(2)) / minibatch_return_6stdB
            else:
                loss_valueR = torch.mean(
                    (minibatch_newvaluesR - minibatch_returnsR).pow(2))
                loss_valueB = torch.mean(
                    (minibatch_newvaluesB - minibatch_returnsB).pow(2))

            loss_entropyR = torch.mean(
                torch.exp(minibatch_newlogprobaR) * minibatch_newlogprobaR)
            loss_entropyB = torch.mean(
                torch.exp(minibatch_oldlogprobaB) * minibatch_newlogprobaB)

            total_lossR = loss_surrR + args.loss_coeff_value * loss_valueR + args.loss_coeff_entropy * loss_entropyR
            total_lossB = loss_surrB + args.loss_coeff_value * loss_valueB + args.loss_coeff_entropy * loss_entropyB
            optimizerR.zero_grad()
            optimizerB.zero_grad()
            total_lossR.backward()
            total_lossB.backward()
            optimizerR.step()
            optimizerB.step()

        if args.schedule_clip == 'linear':
            ep_ratio = 1 - (i_episode / args.num_episode)
            clip_now = args.clip * ep_ratio

        # adam learning rate
        if args.schedule_adam == 'linear':
            ep_ratio = 1 - (i_episode / args.num_episode)
            lr_now = args.lr * ep_ratio
            for g in optimizerR.param_groups:
                g['lr'] = lr_now
            for h in optimizerB.param_groups:
                h['lr'] = lr_now

        if i_episode % args.log_num_episode == 0:
            print('Episode: {} Red Reward: {:.4f} Red Loss = {:.4f} Blue Reward: {:.4f} Blue Loss = {:.4f}' \
                  .format(i_episode, ou_reward_ppo(reward_record_R[-1]['meanepreward'],networkR.step),total_lossR.data,ou_reward_ppo(reward_record_B[-1]['meanepreward'],networkB.step),total_lossB.data))
            print('-----------------')

    return networkB
import time
from string import Template
import logging

DIRNAME = os.path.dirname(os.path.realpath(__file__))

sys.path.append(os.path.join(DIRNAME, '..'))
sys.path.append(os.path.join(DIRNAME, '..', '..', 'common'))

import log
import env
import bouncer_process_manager
from bouncer_process_manager import BouncerProcessManager

remine_deps = os.path.join(DIRNAME, "..", "..", "apps", "redmine_app", "env.sh")
var = env.env(remine_deps)

INSTALL_REDMINE_PATH = var["INSTALL_REDMINE_PATH"]

# TODO: Is it possible to modify the configuration of mongrel such that it only
# handles one request at time? This would hopefully speed up the time it takes
# restart mongrel and lead to lower memory foot print.
# REDMINE_CMD_TEMPLATE_STR = 'mongrel_rails start -p $port -e production -c %s -P /tmp/mongrel$port.pid' % INSTALL_REDMINE_PATH
# REDMINE_CMD_TEMPLATE_STR = '/usr/bin/ruby /home/fcgi_user/redmine-1.4.0/script/server mongrel -e production -p $1'
REDMINE_CMD_TEMPLATE_STR = 'ruby %s/script/server mongrel -e production -p $port' \
    % INSTALL_REDMINE_PATH

class BouncerForRedmine(BouncerProcessManager):

    def start_worker(self, addr, port):
        '''Must attempt to launch the specified worker. Should return the popen object for the new worker
Exemple #28
0
transfer_list_rule3=[]
# repsonse time
response_list_rule3=[]
# reqeust details
request_list=[]
# tier infos
tier_list=[]

t0=time.time()

warnings.filterwarnings('ignore')
#progress=ProgressBar()
if os.path.exists('./Heatmap_3rdrule_1000-123-temp46-fixgenreq-tempdesc10/')==False:
    os.mkdir('./Heatmap_3rdrule_1000-123-temp46-fixgenreq-tempdesc10/')

env1=env(tier1)
env2=env(tier2)
env3=env(tier3)

for turn in range(1000):
    print('%dth turn'%turn)
    t1=time.time()
    ## generate requests
    #Request=Req_generator(env1.tier,env2.tier,env3.tier)
    #Request=Request.loc[Request['request']==1]
    #req_limit=random.randint(170,230)
    #Request=Request.sample(frac=1)[0:req_limit]
    
    ## use same requests
    Request=pd.read_csv('./Requests_temp46_1000/req_%d.csv'%turn)
    
 def __init__(self, Q):
     self.Q = Q
     self.env = env()
     self.Q[:, self.env.states.index(self.env.final_state)] = 0
Exemple #30
0
#!/usr/bin/env python3

import sys, time, twitter, spotipy
import datetime, traceback
import mysql.connector
import spotipy.util as util

sys.path.append('../')
from env import env

e = env()
alltime_playlist_uri = e.alltime_pl
weekly_playlist_uri = e.weekly_pl

twit = twitter.Api(consumer_key=e.twit_consumer_key,
                   consumer_secret=e.twit_consumer_secret,
                   access_token_key=e.twit_access_token_key,
                   access_token_secret=e.twit_access_token_secret)

mydb = mysql.connector.connect(host=e.mysql_host,
                               user=e.mysql_user,
                               passwd=e.mysql_pw,
                               database=e.mysql_db)

scope = 'playlist-modify-public playlist-modify-private'
token = util.prompt_for_user_token(username=e.spot_username,
                                   scope=scope,
                                   client_id=e.spot_client_id,
                                   client_secret=e.spot_client_secret,
                                   redirect_uri=e.spot_redirect_uri)
Exemple #31
0
def handler(req):
	e = env.env(req, 'provider1.example.com')
	auth = authenhandler(req)
	if auth != apache.OK: return(auth)
	
	module = formGet(e, 'module')
	action = formGet(e, 'action')
	if action: action = action.lower()

	# Redirect to SSL URL
	if e.requireSSL and req.server.port != 443:
		req.headers_out['Location'] = 'https://' + req.hostname + req.unparsed_uri
		req.status = apache.HTTP_MOVED_PERMANENTLY
		req.content_type = 'text/html'
		req.send_http_header()
		req.write("""<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
	<head>
		<link href="styles.css" media="all" rel="stylesheet" type="text/css" />
		<title>Redirecting...</title>
	</head>
	<body>
		<h1>Redirecting...</h1>
		<p>You are being redirected to: <a href="%(dest)s">%(dest)s</a></p>
	</body>
</html>""" % {'dest': req.headers_out['Location'] })
		return apache.OK

	req.content_type = 'text/html'
	req.send_http_header()

	req.write("""<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
	<head>
		<link href="styles.css" media="all" rel="stylesheet" type="text/css" />
		<title>Skime</title>
	</head>
	<body>
		<table class="top_header_table" summary="Top Header">
			<colgroup>
				<col width="1*" />
				<col width="*" />
			</colgroup>
			<tr class="top_header_row1">
				<td style="width: 1*;"><a href="/"><img src="images/logo.gif" alt="Skime" /></a></td>
				<td class="top_header_text">Skime</td>
			</tr>
			<tr class="top_header_row2">
				<td colspan="2">User: """ + name + ' (' + username + ')' + """</td>
			</tr>
		</table>
""")

	if not module:
		module = 'GeneralSearch'
		action = 'searchform'

	try:
		modFile = __import__('datamodules.'+module)
		modCls = eval('modFile.'+module+'.'+module)
		mod = modCls(e)
		req.write(mod.handleAction(e, action))
	except:
		req.write('Uh-oh.  Couldn\'t even load the standard module')

	req.write("</body></html>\n")

	return apache.OK
ADMINS = (
    ('FEINHEIT Developers', '*****@*****.**'),
)
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL = 'no-reply@${DOMAIN}'
SERVER_EMAIL = '*****@*****.**'

DATABASES = {
    'default': dj_database_url.config(),
}

CACHES = {
    'default': django_cache_url.config(),
}

SECRET_KEY = env('SECRET_KEY', required=True)
FORCE_DOMAIN = env('FORCE_DOMAIN')
ALLOWED_HOSTS = env('ALLOWED_HOSTS', required=True)

TIME_ZONE = 'Europe/Zurich'
LANGUAGE_CODE = 'de-ch'
LANGUAGES = (
    # ('en', 'English'),
    ('de', 'German'),
    # ('fr', 'French'),
    # ('it', 'Italian'),
)

USE_I18N = True
USE_L10N = True
USE_TZ = True
#!/usr/bin/env python
import env
import os
import sys

if __name__ == "__main__":
    env.read_dotenv()
    if 'test' in sys.argv:
        os.environ['DJANGO_SETTINGS_MODULE'] = '${PROJECT_NAME}.settings.test'
    else:
        os.environ.setdefault("DJANGO_SETTINGS_MODULE",
                              env.env("DJANGO_SETTINGS_MODULE",
                                      "${PROJECT_NAME}.settings.local"))

    from django.core.management import execute_from_command_line

    execute_from_command_line(sys.argv)
Exemple #34
0
		out.append(']),\n')
		out.append('FOREIGN KEY ([')
		out.append('],['.join([record.allFields[fieldName].dbName
		  for fieldName in fields
		  if record.allFields[fieldName].dbPrimaryKey]))
		out.append(']) REFERENCES [')
		out.append(record.dbTable)
		out.append(']\n')
		out.append(');\n')

	if record.dbMiscellaneous != '':
		out.append(record.dbMiscellaneous)

	return ''.join(out)

e = env.env(None)
tables = []

#
# NOTE:
#
# Only one instance of a dynamic table should be added here.
# This will be the top-level table with the non-dynamic properties.
#

# Round 1
if e.domain:
	import structures.Domain
	tables.append(structures.Domain.Domain)
if e.equipmentClass:
	import structures.EquipmentClass
import urllib2
import json
import re
import random
import httplib


DIRNAME = os.path.dirname(os.path.realpath(__file__))

sys.path.append(os.path.join(DIRNAME, '..', '..', '..', 'common'))

import log
import env

mediawiki_app = os.path.join(DIRNAME, "..", "env.sh")
var = env.env(mediawiki_app)
MEDIAWIKI_ATTACK_PAGES_RE = var["MEDIAWIKI_ATTACK_PAGES_RE"]
attack_re = re.compile(MEDIAWIKI_ATTACK_PAGES_RE)

siteconfig = os.path.join(DIRNAME, "..", "..", "..", "siteconfig.sh")
var = env.env(siteconfig)
SERVER_NAME = var["SERVER_NAME"]

def getjson(url):
    try:
        response = urllib2.urlopen(url)
    except urllib2.URLError:
        sys.stderr.write("Error: Could not access %s. Perhaps MediaWiki is not running.\n\n" % SERVER_NAME)
        sys.exit(1)
    return json.loads(response.read())
from string import Template
import logging

DIRNAME = os.path.dirname(os.path.realpath(__file__))

sys.path.append(os.path.join(DIRNAME, '..'))
sys.path.append(os.path.join(DIRNAME, '..', '..', 'common'))

import log
import env
import bouncer_process_manager
from bouncer_process_manager import BouncerProcessManager

dependencies = os.path.join(DIRNAME, "..", "..", "dependencies", "env.sh")

var = env.env(dependencies)

PHP_CGI_VULN_BIN = var["PHP_CGI_VULN_BIN"]
KILL_SQL_PHP = os.path.join(DIRNAME, "kill_sql.php")
PHP_FCGI_CMD_TEMPLATE_STR = '%s -b $addr:$port' % PHP_CGI_VULN_BIN

class BouncerForPhp(BouncerProcessManager):

    def start_worker(self, addr, port):
        '''Must attempt to launch the specified worker. Should return the popen object for the new worker
           or None, if the worker couldn't be be launched for some reason.'''
        cmd_str = Template(PHP_FCGI_CMD_TEMPLATE_STR).substitute( \
                addr = addr, \
                port = str(port) \
            )
        self.logger.debug("cmd_str='%s'" % cmd_str)
Exemple #37
0
def mainLoop():

    # Setting up Enviornment IE: Keys, and needed objs
    e = env()
    sp = spot(e)
    prev_tr_uri = str()
    state = 0
    printed = False

    # This is the main loop that the program will run.
    # States and their associated value:
    #
    # no_user = 0
    # prog_lt_tqp = 1
    # tweet_song = 2
    # prog_gt_tqp = 3
    # wait_for_resume = 4
    while True:

        # Spotify Authentication
        sp.update_obj(e)

        # No User
        if state == 0:
            if sp.sp_obj is not None:
                if not sp.ct_is_playing:
                    state = 4
                    printed = False

                else:
                    state = 1
                    printed = False
            else:
                check_printed(printed, state)
                state = 0
                printed = True

        # Progress < 3/4 track Lenght
        elif state == 1 and sp.sp_obj is not None:
            if not sp.ct_is_playing:
                state = 4
                printed = False

            elif (sp.ct_is_playing and (3 * sp.ct_length) / 4 < sp.ct_progress
                  and not sp.tweeted and sp.ct_uri == prev_tr_uri):
                state = 2
                printed = False

            else:
                # If the song is different from the previous song, then set tweeted to false
                if sp.ct_uri != prev_tr_uri:
                    sp.tweeted = False

                prev_tr_uri = sp.ct_uri
                check_printed(printed, state)
                state = 1
                printed = True

        # Tweet Song
        elif state == 2:

            # Apparently it is possible to get into this state without a valid obj
            if sp.sp_obj is not None and sp.ct_name == "":
                print(
                    "[{}]\033[31mWaring!\u001b[0m: Spotify object exists but no data filled! Forcing state 0!"
                    .format(get_curr_time()))
                state = 0
                continue

            sp_obj_ready = sp
            tweet_song(sp_obj_ready, e, state)
            update_db(sp_obj_ready, e)
            prev_tr_uri = sp.ct_uri
            state = 3
            printed = False
            sp_obj_ready = None

        # Progress > 3/4 track length && Tweeted
        elif state == 3 and sp.sp_obj is not None:
            if not sp.ct_is_playing:
                state = 4
                printed = False

            elif sp.ct_uri != prev_tr_uri:
                state = 1
                printed = False
                sp.tweeted = False

            else:
                check_printed(printed, state)
                state = 3
                printed = True

        # Waiting for Playback to be Resumed
        elif state == 4 and sp.sp_obj is not None:
            if sp.ct_is_playing:
                if sp.tweeted:
                    state = 3
                    printed = False

                else:
                    state = 1
                    printed = False

            else:
                check_printed(printed, state)
                state = 4
                printed = True

        # Correction for falling out of state, (Not State 0 but also sp_obj is None)
        # Essentially resetting to the default state
        else:
            state = 0
            printed = False

        # Add a sleep(5) as to not spam either Api
        time.sleep(5)
Exemple #38
0

class AtariProcessor(Processor):
    def process_reward(self, reward):
        return np.clip(reward, -100., 100.)


parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--env-name', type=str, default='WHG')
parser.add_argument('--weights', type=str, default=None)
args = parser.parse_args()
#env_name = "WHG"
# Get the environment and extract the number of actions.
#env = gym.make(args.env_name)
env = env(4)
np.random.seed(20)
env.seed(20)
nb_actions = env.action_space

# Next, we build our model. We use the same model that was described by Mnih et al. (2015).
model = Sequential()

# (width, height, channels)
model.add(Flatten(input_shape=(
    1,
    14,
)))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(64))
Exemple #39
0
	if reRbe.search(section):
		rbe = reRbe.search(section).group()
	else:
		rbe = 0

	if rePvc.search(section):
		pvc = rePvc.search(section).group(1)
	else:
		pvc = 0

	if reVbr.search(section):
		vbr = (reVbr.search(section).group(1), reVbr.search(section).group(2))
	else:
		vbr = (0,0)

	e = env.env(None, 'wiktel.com')
	cids = getCid(e, intPvcPart)
	if cids:
		cr = CustomerRecord()
		cr.dbLoad(e, {'CustomerID': cids[0]})

		ma = cr.data[0][0]
		founds.append("ATM Int: " + str(atmInt) + ", Unnumbered Int: " + str(unnum) + ", RBE: " + str(rbe) + ", PVC: " + str(pvc) + ", VBR: " + str(vbr) + "\n" +
			"Name: " + ma.values['FirstName'] + " " + ma.values['LastName'] + "    Company: " + ma.values['Company'] + "   Billing Number: " + ma.values['BillingNumber'] + "\n")
	else:
		notFounds.append("ATM Int: " + str(atmInt) + ", Unnumbered Int: " + str(unnum) + ", RBE: " + str(rbe) + ", PVC: " + str(pvc) + ", VBR: " + str(vbr) + "\n" +
			'No record found for interface ' + atmInt + '\n')

f = file("dsl.txt", 'w')
f.write("-- Not Found --\n")
for x in notFounds: f.write(x)
        type=validate.domain,
        help='Domain name')
    parser.add_argument(
        'nice_name',
        type=validate.nice_name,
        help='Nice name')
    parser.add_argument(
        '-p', '--project-name',
        type=validate.project_name,
        help='Python module for the project [%(default)s]',
        default='box')
    parser.add_argument(
        '-s', '--server',
        type=validate.server,
        help='Server [%(default)s]',
        default=env.env('SERVER'))
    parser.add_argument(
        '-d', '--destination',
        type=str,
        help='The destination path for the project [./build/$DOMAIN_SLUG]')
    parser.add_argument(
        '--charge',
        action='store_true',
        help='Charge ahead, do not ask for confirmation')
    args = parser.parse_args()

    if not args.server:
        print(color(
            'Either specify a server using --server or add a default value'
            ' for SERVER in ~/.box.env', 'red', True))
        sys.exit(1)
Exemple #41
0
from env import env


print(env("secret"))
print(env("app"))
import time
from string import Template
import logging

DIRNAME = os.path.dirname(os.path.realpath(__file__))

sys.path.append(os.path.join(DIRNAME, '..'))
sys.path.append(os.path.join(DIRNAME, '..', '..', 'common'))

import log
import env
import bouncer_process_manager
from bouncer_process_manager import BouncerProcessManager

osqa_deps = os.path.join(DIRNAME, "..", "..", "apps", "osqa_app", "env.sh")
var = env.env(osqa_deps)

INSTALL_OSQA_PATH = var["INSTALL_OSQA_PATH"]

OSQA_CMD_TEMPLATE_STR = 'python %s/manage.py run_gunicorn 0.0.0.0:$port' % INSTALL_OSQA_PATH

class BouncerForOsqa(BouncerProcessManager):

    def start_worker(self, addr, port):
        '''Must attempt to launch the specified worker. Should return the popen object for the new worker
           or None, if the worker couldn't be be launched for some reason.'''
        cmd_str = Template(OSQA_CMD_TEMPLATE_STR).substitute( \
                addr = addr, \
                port = str(port) \
            )
        self.logger.debug("cmd_str='%s'" % cmd_str)