Beispiel #1
0
    def __init__(self, args, env, env_params):
        self.args = args
        self.env = env
        self.env_params = env_params
        # create the network
        self.actor_network = actor(env_params)
        self.critic_network = critic(env_params)
        # sync the networks across the cpus
        sync_networks(self.actor_network)
        sync_networks(self.critic_network)
        # build up the target network
        self.actor_target_network = actor(env_params)
        self.critic_target_network = critic(env_params)
        # load the weights into the target networks
        self.actor_target_network.load_state_dict(
            self.actor_network.state_dict())
        self.critic_target_network.load_state_dict(
            self.critic_network.state_dict())
        # if use gpu
        if self.args.cuda and torch.cuda.is_available():
            self.actor_network.cuda()
            self.critic_network.cuda()
            self.actor_target_network.cuda()
            self.critic_target_network.cuda()
        # create the optimizer
        self.actor_optim = torch.optim.Adam(self.actor_network.parameters(),
                                            lr=self.args.lr_actor)
        self.critic_optim = torch.optim.Adam(self.critic_network.parameters(),
                                             lr=self.args.lr_critic)
        # her sampler
        self.her_module = her_sampler(self.args.replay_strategy,
                                      self.args.replay_k,
                                      self.env.compute_reward)
        # create the replay buffer
        self.buffer = replay_buffer(self.env_params, self.args.buffer_size,
                                    self.her_module.sample_her_transitions)
        # create the normalizer
        self.o_norm = normalizer(size=env_params['obs'],
                                 default_clip_range=self.args.clip_range)
        self.g_norm = normalizer(size=env_params['goal'],
                                 default_clip_range=self.args.clip_range)

        # add tensorboardX tool

        # create the dict for store the model
        if MPI.COMM_WORLD.Get_rank() == 0:
            if not os.path.exists(self.args.save_dir):
                os.mkdir(self.args.save_dir)
            # path to save the model
            self.model_path = os.path.join(self.args.save_dir,
                                           self.args.env_name)
            if not os.path.exists(self.model_path):
                os.mkdir(self.model_path)
            print('model path', self.model_path)

        # self.writer = SummaryWriter('./logs')
        self.reward_list = []
        self.reward_record = []
        self.success_rate_list = []
        self.success_list = []
Beispiel #2
0
def scrap_ratings(doctor_url):
    comments = list()
    r = requests.get(doctor_url)
    if r.status_code != 200:
        raise Exception(f"Request error: {r.status_code}")

    soup = BeautifulSoup(r.text, "html.parser")

    try:
        section = soup.find("div", {
            "class": "col-12 bg-white rounded-15 py-3 class-46"
        }).find_all("div", {"class": "col-12"})
    except:
        return comments

    for row in section:
        rating = int(
            re.findall(r'class="red" style="width: (\d+)', str(row))[0]) / 20
        comment = re.findall(r"<\/div> <\/div> <\/div> <\/div>([\s\S]+)<small",
                             str(row))[0]

        comment = re.sub(r"[،؟\?\.\!]+(?=[،؟\?\.\!])", "",
                         normalizer(comment.strip()))

        comments.append([comment, rating])
    return comments
Beispiel #3
0
def data_load(Config):
    path = Config['data_set']
    text = open(path).read()
    text = normalizer(text)
    print('courpus length:', len(text))

    chars = set(text)
    print('total chars:', len(chars))
    char_indices = dict((c, i) for i, c in enumerate(chars))
    indices_char = dict((i, c) for i, c in enumerate(chars))

    maxlen = 20
    step = 3
    sentences = []
    next_chars = []
    for i in range(0, len(text) - maxlen, step):
        sentences.append(text[i:i + maxlen])
        next_chars.append(text[i + maxlen])
    print('nb sequences:', len(sentences))

    print('vectorization...')
    X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
    y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
    for i, sentence in enumerate(sentences):
        for t, char in enumerate(sentence):
            X[i, t, char_indices[char]] = 1
        y[i, char_indices[next_chars[i]]] = 1

    return (chars, char_indices, indices_char, maxlen, X, y, text)
Beispiel #4
0
def crawl_linetoday(file):
    norm = normalizer()
    txt = loadtxt(file)
    txt = txt.replace("><", ">\n<")
    txt_list = txt.split("\n")
    result = ""
    idx_t = 0
    is_write = 0
    for t in txt_list:
        if '<p class="comm">' in t:
            if "</p>" not in t:
                t = txt_list[idx_t + 1]
            t = t.replace('<p class="comm">', "")
            idx = 0
            while '</p>' not in t[idx:(idx + 4)]:
                if t[idx] == "<":
                    is_write += 1
                elif t[idx] == ">":
                    is_write -= 1
                if is_write == 0 and t[idx] != ">":
                    result += t[idx].lower()
                idx += 1
            result += "\n"
        idx_t += 1
    result = norm.normalize_text(result)
    result = norm.clean_text(result)
    savetxt(result, "norm_" + file)

    return (result != '')
Beispiel #5
0
def scrap_comments(page_id):
    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Language": "en-US,en;q=0.5",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Sec-GPC": "1",
        "Cache-Control": "max-age=0",
        "TE": "Trailers",
    }

    comments = []
    for page_number in range(1, 25):
        url = f"https://taraazws.jabama.com/api/v1/reviews/place/{page_id}/reviews?page={page_number}"
        r = requests.get(url, headers=headers)
        if r.status_code != 200:
            raise Exception(f"request error{r.status_code}")

        page_comments = []
        for row in json.loads(r.text)["result"]:
            comment = re.sub(
                r"[،؟\?\.\!]+(?=[،؟\?\.\!])", "", normalizer(row["comment"]).strip(),
            )

            page_comments.append([comment, row["rating"]])
        if page_comments:
            comments += page_comments
        else:
            break

    return comments
Beispiel #6
0
def get_page_comments(url, page_number):
    restaurant_id = url.replace("snappfood.ir/restaurant/menu/", "").split("/")[0]
    comments_url = (
        f"https://snappfood.ir/restaurant/comment/vendor/{restaurant_id}/{page_number}"
    )
    r = requests.get(comments_url)
    if r.status_code != 200:
        raise Exception(f"Request Error: {r.status_code}")
    comments_json = json.loads(r.text)["data"]
    count = comments_json["count"]
    comments = []

    for comment_dict in comments_json["comments"]:
        comment = re.sub(
            r"[،؟\?\.\!]+(?=[،؟\?\.\!])",
            "",
            normalizer(comment_dict["commentText"]).strip(),
        )

        rate = comment_dict["rate"] / 2
        if rate == 0 and comment_dict["feeling"] == "HAPPY":
            rate = 5
        comments.append([comment, rate])

    page_comments = {"url": url, "count": count, "comments": comments}
    return page_comments
Beispiel #7
0
def scrap_comments(comment_url):
    ratings = list()
    r = requests.get(comment_url)
    if r.status_code != 200:
        raise Exception(f"request error{r.status_code}")
    json_data = r.json()
    for comment_json in json_data["pageProps"]["bookComments"]["commentsList"]:
        comment = re.sub(
            r"[،؟\?\.\!]+(?=[،؟\?\.\!])", "", normalizer(comment_json["comment"])
        )
        rate = comment_json["rate"]
        ratings.append([comment, rate])
    return ratings
    def __init__(self, args, env, env_params, sample_size=100):
        self.args = args
        self.env = env
        self.env_params = env_params
        self.sample_size = sample_size

        # create the network
        self.score_predictor = open_loop_image_predictor(env_params['obs'] + env_params['action'])

        # create the normalizer
        self.o_norm = normalizer(size=env_params['obs'], default_clip_range=self.args.clip_range)
        self.a_norm = normalizer(size=env_params['action'], default_clip_range=self.args.clip_range)

        # load model if load_path is not None
        if self.args.load_dir != '':
            load_path = self.args.load_dir + '/model.pt'
            o_mean, o_std, a_mean, a_std, model = torch.load(load_path)
            self.o_norm.mean = o_mean
            self.o_norm.std = o_std
            self.a_norm.mean = a_mean
            self.a_norm.std = a_std
            self.score_predictor.load_state_dict(model)

        # sync the networks across the cpus
        # sync_networks(self.score_predictor)

        # if use gpu
        if self.args.cuda:
            self.score_predictor.cuda()

        # create the optimizer
        self.optim = torch.optim.Adam(self.score_predictor.parameters(), lr=self.args.lr_actor)

        # create the replay buffer
        self.buffer = open_loop_buffer(self.env_params, self.args.buffer_size)

        # path to save the model
        self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
def load_data(seq_len, imf_index, ver):
    ts, num_req_normalize,MaxAbsScalerObj\
        =normalizer.normalizer(imf_index,ver,plot=False)

    sequence_length = seq_len + 1

    # %%%%%%%%%%%% Num Of Req %%%%%%%%%%%%%%%%%%%%%%%
    result = []
    for index in range(len(num_req_normalize) -
                       sequence_length):  ## looks like a moving avg !!!
        result.append(num_req_normalize[index:index + sequence_length])

    result = np.array(result)
    print('------------')
    print('shape of sequense created is ',
          result.shape)  # (len(total_data)-seq_length)   *   seq_length
    print('length of each sequence is ', len(result[0]))
    print('------------')
    ts = ts[:result.shape[0] - 1]

    row = round(0.9 * result.shape[0])
    train = result[:int(row), :]

    np.random.shuffle(train)
    x_train = train[:, :-1]
    ts_train = ts[:int(row)]
    y_train = train[:, -1]
    x_test = result[int(row):, :-1]
    y_test = result[int(row):, -1]
    ts_test = ts[int(row) - 1:]

    y_train_original_part = num_req_normalize[:int(row)]

    l = len(result)
    l_train = x_train.shape
    l_test = x_test.shape
    print('total data length is :', l)
    print('train set length : ', l_train)
    print('test set length :', l_test)

    x_train = np.reshape(x_train,
                         (x_train.shape[0], x_train.shape[1], 1))  # make 3D
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

    return [
        x_train, y_train, y_train_original_part, x_test, y_test, ts_train,
        ts_test, MaxAbsScalerObj
    ]
def load_data(seq_len):
    ts, num_req_normalize, minMaxScaler\
        =normalizer.normalizer(plot=True)

    sequence_length = seq_len + 1

    # %%%%%%%%%%%% Num Of Req %%%%%%%%%%%%%%%%%%%%%%%
    result = []
    for index in range(len(num_req_normalize) -
                       sequence_length):  ## looks like a moving avg !!!
        result.append(num_req_normalize[index:index + sequence_length])

    result = np.array(result)
    print('------------')
    print('shape of sequense created is ',
          result.shape)  # (len(total_data)-seq_length)   *   seq_length
    print('length of each sequence is ', len(result[0]))
    print('------------')
    ts = ts[:result.shape[0] - 1]

    row = round(0.9 * result.shape[0])
    train = result[:int(row), :]

    np.random.shuffle(train)
    x_train = np.squeeze(train[:, :-2])
    ts_train = ts[:int(row)]
    y_train = np.squeeze(train[:, -2:])
    x_test = np.squeeze(result[int(row):, :-2])
    y_test = np.squeeze(result[int(row):, -2:])
    ts_test = ts[int(row) - 1:]

    y_train_original_part = num_req_normalize[:int(row)]

    l = len(result)
    print('total data length is :', l)
    print('train set length : ', x_train.shape, y_train.shape)
    print('test set length :', x_test.shape, y_test.shape)
    print('\n------------------------------------------\n\n\n\n')

    x_train = np.reshape(x_train,
                         (x_train.shape[0], x_train.shape[1], 1))  # make 3D
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

    return [
        x_train, y_train, y_train_original_part, x_test, y_test, ts_train,
        ts_test, minMaxScaler
    ]
Beispiel #11
0
def scrap_comments(url):

    comments = []

    package_name = url.split("/")[-1]

    cafebazaar_api_url = "https://api.cafebazaar.ir/rest-v1/process/ReviewRequest"

    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0",
        "Accept": "application/json, text/plain, */*",
        "Accept-Language": "en-US,en;q=0.5",
        "Content-Type": "application/json;charset=utf-8",
        "Origin": "https://cafebazaar.ir",
        "Connection": "keep-alive",
        "Referer": "https://cafebazaar.ir/",
        "Sec-GPC": "1",
        "TE": "Trailers",
    }

    for start in range(0, 2_500, 100):
        page_comments = []
        data = f'{{"properties":{{"language":2,"clientID":"m7dywtvvb5z3kph88shahoou39y8w5jw","deviceID":"m7dywtvvb5z3kph88shahoou39y8w5jw","clientVersion":"web"}},"singleRequest":{{"reviewRequest":{{"packageName":"{package_name}","start":{start},"end":{start+100}}}}}}}'
        for retry in range(10):
            r = requests.post(cafebazaar_api_url, headers=headers, data=data)
            if r.status_code == 200:

                commnets_json = json.loads(r.text)

                for review in commnets_json["singleReply"]["reviewReply"]["reviews"]:
                    comment = re.sub(
                        r"[،؟\?\.\!]+(?=[،؟\?\.\!])",
                        "",
                        normalizer(review["comment"]).strip(),
                    )

                    page_comments.append([comment, review["rate"]])
                break
            elif r.status_code == 504:
                pass
            else:
                raise Exception(f"Request Error: {r.status_code}")
        if page_comments:
            comments += page_comments
        else:
            break
Beispiel #12
0
def scrap_all_rattings(pages_url):
    visited_urls = []

    pages_url = [list(row) for row in pages_url]
    random.shuffle(pages_url)

    with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
        future_to_url = {}
        futures = []
        for _, url, is_visited in pages_url:
            if not is_visited:
                futures_executor = executor.submit(scrap_rattings, url=url)
                future_to_url.update({futures_executor: url})
                futures.append(futures_executor)
        for future in tqdm(
                concurrent.futures.as_completed(futures),
                initial=len(pages_url) - len(futures),
                total=len(pages_url),
        ):
            url = future_to_url[future]
            try:
                ratings = future.result()
            except Exception as exc:
                tqdm.write(f"{url} generated an exception: {exc}")
            else:
                if url not in visited_urls:

                    with DimnaDatabase(db_path, logger) as db:
                        db.update_page_visit_status(
                            base_url,
                            url,
                            True,
                        )

                        for comment, rate in ratings:
                            # Regex replace multiple punctuations and normalize
                            comment = re.sub(r"[،؟\?\.\!]+(?=[،؟\?\.\!])", "",
                                             normalizer(comment))
                            db.insert_rating(
                                base_url,
                                comment,
                                rate,
                            )
                    visited_urls.append(url)
	def draw_relevance(self, data1, data2, data1_name, data2_name):

		norm = normalizer()

		data1_norm = norm.linear_normalize(data1)
		data2_norm = norm.linear_normalize(data2)

		plt.figure(1)
		plt.subplot(211)
		plt.plot(data1, 'b-', data2, 'r-')
		plt.legend([data1_name, data2_name], loc="upper left")

		plt.subplot(212)
		plt.plot(data1_norm, 'b-', data2_norm, 'r-')
		plt.legend([data1_name, data2_name], loc="upper left")
		plt.grid(True)
		plt.show()

		return
    def draw_relevance(self, data1, data2, data1_name, data2_name):

        norm = normalizer()

        data1_norm = norm.linear_normalize(data1)
        data2_norm = norm.linear_normalize(data2)

        plt.figure(1)
        plt.subplot(211)
        plt.plot(data1, 'b-', data2, 'r-')
        plt.legend([data1_name, data2_name], loc="upper left")

        plt.subplot(212)
        plt.plot(data1_norm, 'b-', data2_norm, 'r-')
        plt.legend([data1_name, data2_name], loc="upper left")
        plt.grid(True)
        plt.show()

        return
Beispiel #15
0
def scrap_comments(course_url):
    id = re.findall(r"mk(\d+)", course_url)[0]
    page_number = 1
    comments_url = f"https://maktabkhooneh.org/course/{id}/more_reviews/{page_number}/"

    ratings = list()

    r = requests.get(comments_url)
    if not "تا کنون نظری برای این دوره ثبت نشده است" in r.text:
        soup = BeautifulSoup(r.text, "html.parser")
        try:
            number_of_pages = int(
                max(
                    re.findall(
                        r"\d+",
                        str(soup.find("div",
                                      {"class", "filler left-aligned"})))))
        except:
            number_of_pages = 1

        for page_number in range(1, number_of_pages + 1):
            comments_url = (
                f"https://maktabkhooneh.org/course/{id}/more_reviews/{page_number}/"
            )

            r = requests.get(comments_url)
            if r.status_code != 200:
                raise Exception(f"Request Error: {r.status_code}")

            soup = BeautifulSoup(r.text, "html.parser")
            for comment_part in soup.find_all("div", class_="comments__field"):
                comment = comment_part.find(
                    "div",
                    {"class", "comments__desc-user top-margin"}).text.strip()
                comment = re.sub(r"[،؟\?\.\!]+(?=[،؟\?\.\!])", "",
                                 normalizer(comment))
                rate = len(
                    comment_part.find_all(
                        "i", {"class", "svg-icon--24 svg-icon--gold"}))
                ratings += [[comment, rate]]
    return ratings
Beispiel #16
0
def scrap_comments(url):
    cookies = {
        "route": "1619449476.736.583.641701",
        "unique-cookie": "nwRatz4P21XQoAh",
        "appid": "g*-*direct*-**-*",
        "ptpsession": "g--2212735793014068001",
        "srtest": "1",
    }

    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0",
        "Accept": "*/*",
        "Accept-Language": "fa",
        "lang": "fa",
        "Content-Type": "application/json",
        "X-Requested-With": "XMLHttpRequest",
        "Connection": "keep-alive",
        "Referer": "https://www.snapptrip.com/%D8%B1%D8%B2%D8%B1%D9%88-%D9%87%D8%AA%D9%84/%D8%B4%DB%8C%D8%B1%D8%A7%D8%B2/%D9%87%D8%AA%D9%84-%D8%A8%D8%B2%D8%B1%DA%AF?date_from=2021-04-26&date_to=2021-04-27",
        "Sec-GPC": "1",
        "TE": "Trailers",
    }

    params = (("limit", "500"),)

    comments_url = "https://www.snapptrip.com/rate_review/hotel/" + url.split("?")[-1]
    r = requests.get(comments_url, headers=headers, params=params, cookies=cookies)
    soup = BeautifulSoup(r.text, "html.parser")

    comments = []
    try:
        comments_li = soup.find_all("li", {"class": "comment-item box-frame"})
        for li in comments_li:

            rating = li.find("div", {"class": "rate-badge"}).text.strip()
            comment = li.find("span", {"class": "comment-text-wrapper"})
            if comment:
                comment = comment.text.strip()
            else:
                comment = ""
            pn_comment = li.find_all("p", {"class": "mb-0"})
            if pn_comment:
                if "mt-lg-1" in str(pn_comment[0]):
                    comment += "\n" + " نقاط قوت: " + pn_comment[0].text.strip()

                else:
                    comment += "\n" + " نقاط ضعف:" + pn_comment[0].text.strip()
                try:
                    if "mt-lg-1" in str(pn_comment[1]):
                        comment += "\n" + " نقاط قوت: " + pn_comment[1].text.strip()
                    else:
                        comment += "\n" + " نقاط ضعف: " + pn_comment[1].text.strip()
                except:
                    pass

            comment = re.sub(
                r"[،؟\?\.\!]+(?=[،؟\?\.\!])", "", normalizer(comment).strip(),
            )

            comments.append([comment, rating])
    except:
        pass
    return comments
Beispiel #17
0
    def __init__(self, args, env, env_params, image=True):
        self.args = args
        self.env = env
        self.env_params = env_params
        self.image = image

        # create the network
        if self.image:
            self.actor_network = actor_image(env_params, env_params['obs'])
            self.critic_network = critic_image(
                env_params, env_params['obs'] + env_params['action'])
        else:
            self.actor_network = actor(env_params, env_params['obs'])
            self.critic_network = critic(
                env_params, env_params['obs'] + env_params['action'])

        # create the normalizer
        self.o_norm = normalizer(size=env_params['obs'],
                                 default_clip_range=self.args.clip_range)

        # load model if load_path is not None
        if self.args.load_dir != '':
            load_path = self.args.load_dir + '/model.pt'
            o_mean, o_std, g_mean, g_std, model = torch.load(load_path)
            self.o_norm.mean = o_mean
            self.o_norm.std = o_std
            self.actor_network.load_state_dict(model)

        # sync the networks across the cpus
        # sync_networks(self.actor_network)
        # sync_networks(self.critic_network)
        # build up the target network
        if self.image:
            self.actor_target_network = actor_image(env_params,
                                                    env_params['obs'])
            self.critic_target_network = critic_image(
                env_params, env_params['obs'] + env_params['action'])
        else:
            self.actor_target_network = actor(env_params, env_params['obs'])
            self.critic_target_network = critic(
                env_params, env_params['obs'] + env_params['action'])
        # load the weights into the target networks
        self.actor_target_network.load_state_dict(
            self.actor_network.state_dict())
        self.critic_target_network.load_state_dict(
            self.critic_network.state_dict())

        # if use gpu
        if self.args.cuda:
            self.actor_network.cuda()
            self.critic_network.cuda()
            self.actor_target_network.cuda()
            self.critic_target_network.cuda()
        # create the optimizer
        self.actor_optim = torch.optim.Adam(self.actor_network.parameters(),
                                            lr=self.args.lr_actor)
        self.critic_optim = torch.optim.Adam(self.critic_network.parameters(),
                                             lr=self.args.lr_critic)
        # her sampler
        self.her_module = her_sampler(self.args.replay_strategy,
                                      self.args.replay_k,
                                      self.env().compute_reward)
        # create the replay buffer
        self.buffer = replay_buffer(self.env_params,
                                    self.args.buffer_size,
                                    self.her_module.sample_her_transitions,
                                    image=self.image)

        # path to save the model
        self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
import psycopg2
import numpy as np
import normalizer

std_cpu, std_ram, mean_cpu, mean_ram, ts, \
    cpu_values_normalize, \
    ram_values_normalize\
        =normalizer.normalizer(plot=False)

import matplotlib.pyplot as plt

fig = plt.figure(facecolor='white', figsize=(13.0, 8.0))
ax = fig.add_subplot(211)
ax.plot(ts, cpu_values_normalize, color='red', label='CPU', alpha=0.85)
plt.legend()
plt.grid()
plt.ylim([0, 1])
plt.ylabel('Normalized CPU Request')
ax = fig.add_subplot(212)
ax.plot(ts, ram_values_normalize, color='blue', label='RAM', alpha=0.85)
plt.legend()
plt.grid()
plt.ylim([0, 1])
plt.ylabel('Normalized RAM Request')
plt.xlabel('Time Symbol')
#
plt.savefig('REAL_100.png', format='png', dpi=800)
plt.show()
Beispiel #19
0
    def __init__(self, args, env, env_params):
        self.args = args
        self.env = env
        self.env_params = env_params

        # create the network
        self.actor_network = actor(env_params)
        self.critic_network = critic(env_params)
        # sync the networks across the cpus
        #sync_networks(self.actor_network)
        #sync_networks(self.critic_network)
        # build up the target network
        self.actor_target_network = actor(env_params)
        self.critic_target_network = critic(env_params)

        # Load the model if required
        if args.load_path != None:
            o_mean, o_std, g_mean, g_std, load_actor_model, load_critic_model = torch.load(
                args.load_path, map_location=lambda storage, loc: storage)
            self.actor_network.load_state_dict(load_actor_model)
            self.critic_network.load_state_dict(load_critic_model)

        # load the weights into the target networks
        self.actor_target_network.load_state_dict(
            self.actor_network.state_dict())
        self.critic_target_network.load_state_dict(
            self.critic_network.state_dict())
        # if use gpu
        if self.args.cuda:
            self.actor_network.cuda()
            self.critic_network.cuda()
            self.actor_target_network.cuda()
            self.critic_target_network.cuda()
        # create the optimizer
        self.actor_optim = torch.optim.Adam(self.actor_network.parameters(),
                                            lr=self.args.lr_actor)
        self.critic_optim = torch.optim.Adam(self.critic_network.parameters(),
                                             lr=self.args.lr_critic)
        # her sampler
        self.her_module = her_sampler(self.args.replay_strategy,
                                      self.args.replay_k,
                                      self.env.compute_reward)
        # create the replay buffer
        self.buffer = replay_buffer(self.env_params, self.args.buffer_size,
                                    self.her_module.sample_her_transitions)
        # create the normalizer
        self.o_norm = normalizer(size=env_params['obs'],
                                 default_clip_range=self.args.clip_range)
        self.g_norm = normalizer(size=env_params['goal'],
                                 default_clip_range=self.args.clip_range)
        # create the dict for store the model
        #if MPI.COMM_WORLD.Get_rank() == 0:
        if not os.path.exists(self.args.save_dir):
            os.mkdir(self.args.save_dir)

        # makeup a suffix for the model path to indicate which method is used for Training
        #self.folder_siffix = '_' + self.args.replay_strategy + '_' + self.args.env_params.reward_type
        # path to save the model
        self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
        if not os.path.exists(self.model_path):
            os.mkdir(self.model_path)
        self.model_path = os.path.join(self.model_path,
                                       'seed_' + str(self.args.seed))
        if not os.path.exists(self.model_path):
            os.mkdir(self.model_path)
 def __init__(self, args, env, env_params):
     self.savetime = 0
     self.args = args
     self.env = env
     self.env_params = env_params
     # create the network
     self.actor_network = actor(env_params)
     self.critic_network = critic(env_params)
     # sync the networks across the cpus
     sync_networks(self.actor_network)
     sync_networks(self.critic_network)
     # build up the target network
     self.actor_target_network = actor(env_params)
     self.critic_target_network = critic(env_params)
     # load the weights into the target networks
     self.actor_target_network.load_state_dict(
         self.actor_network.state_dict())
     self.critic_target_network.load_state_dict(
         self.critic_network.state_dict())
     # if use gpu
     if self.args.cuda:
         self.actor_network.cuda()
         self.critic_network.cuda()
         self.actor_target_network.cuda()
         self.critic_target_network.cuda()
     # create the optimizer
     self.actor_optim = torch.optim.Adam(self.actor_network.parameters(),
                                         lr=self.args.lr_actor)
     self.critic_optim = torch.optim.Adam(self.critic_network.parameters(),
                                          lr=self.args.lr_critic)
     # her sampler
     self.her_module = her_sampler(self.args.replay_strategy,
                                   self.args.replay_k,
                                   self.env.compute_reward)
     # create the replay buffer
     self.buffer = replay_buffer(self.env_params, self.args.buffer_size,
                                 self.her_module.sample_her_transitions)
     # 是否加入示教数据
     if self.args.add_demo:
         self._init_demo_buffer(
         )  # initialize replay buffer with demonstration
     # create the normalizer
     self.o_norm = normalizer(size=env_params['obs'],
                              default_clip_range=self.args.clip_range)
     self.g_norm = normalizer(size=env_params['goal'],
                              default_clip_range=self.args.clip_range)
     # load the data to continue the training
     # model_path = "saved_models/bmirobot-v3/125_True12_model.pt"
     # # # model_path = args.save_dir + args.env_name + '/' + str(args.seed) + '_' + str(args.add_demo) + '_model.pt'
     # # o_mean, o_std, g_mean, g_std, model = torch.load(model_path, map_location=lambda storage, loc: storage)
     # self.actor_network.load_state_dict(model)
     # self.o_norm.mean=o_mean
     # self.o_norm.std=o_std
     # self.g_norm.mean=g_mean
     # self.g_norm.std=g_std
     self.success_rates = []  # 记录每个epoch的成功率
     # create the dict for store the model
     if MPI.COMM_WORLD.Get_rank() == 0:
         if not os.path.exists(self.args.save_dir):
             os.mkdir(self.args.save_dir)
         # path to save the model
         self.model_path = os.path.join(self.args.save_dir,
                                        self.args.env_name)
         if not os.path.exists(self.model_path):
             os.mkdir(self.model_path)
Beispiel #21
0
def load_data(seq_len, mode, factor, first_plot=False):
    std_cpu, std_ram, mean_cpu, mean_ram, ts, \
    cpu_values_normalize, \
    ram_values_normalize,min_max_scaler\
        =normalizer.normalizer(plot=first_plot)

    sequence_length = seq_len + 1
    if mode == 1:
        # %%%%%%%%%%%% CPU %%%%%%%%%%%%%%%%%%%%%%%
        result = []
        for index in range(len(cpu_values_normalize) -
                           sequence_length):  ## looks like a moving avg !!!
            result.append(cpu_values_normalize[index:index + sequence_length])

        result = np.array(result)
        print('------------')
        print('shape of sequense created is ',
              result.shape)  # (len(total_data)-seq_length)   *   seq_length
        print('length of each sequence is ', len(result[0]))
        print('------------')
        ts = ts[:result.shape[0] - 1]

    elif mode == 2:
        # %%%%%%%%%%%% RAM %%%%%%%%%%%%%%%%%%%%%%%
        result = []
        for index in range(len(ram_values_normalize) -
                           sequence_length):  ## looks like a moving avg !!!
            result.append(ram_values_normalize[index:index + sequence_length])

        result = np.array(result)
        print('------------')
        print('shape of sequense created is ',
              result.shape)  # (len(total_data)-seq_length)   *   seq_length
        print('length of each sequence is ', len(result[0]))
        print('------------')
        ts = ts[:result.shape[0] - 1]

    row = round(factor * result.shape[0])
    train = result[:int(row), :]

    np.random.shuffle(train)
    x_train = train[:, :-1]
    ts_train = ts[:int(row)]
    y_train = train[:, -1]
    x_test = result[int(row):, :-1]
    y_test = result[int(row):, -1]
    ts_test = ts[int(row) - 1:]
    if mode == 1:
        y_train_original_part = cpu_values_normalize[:int(row)]
    elif mode == 2:
        y_train_original_part = ram_values_normalize[:int(row)]

    l = len(result)
    l_train = x_train.shape
    l_test = x_test.shape
    print('total data length is :', l)
    print('train set length : ', l_train)
    print('test set length :', l_test)

    x_train = np.reshape(x_train,
                         (x_train.shape[0], x_train.shape[1], 1))  # make 3D
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

    return [
        x_train, y_train, y_train_original_part, x_test, y_test, ts_train,
        ts_test, min_max_scaler
    ]
Beispiel #22
0
    csvfile = sys.argv[1]
    if (csvfile.lower().endswith('.csv')):
        df = pd.read_csv(csvfile)
        delete = None
        norm = None
        keepPortname = False
        switches = ['-minmax', '-robust', '-standard']
        if (not checkColumns(df)):
            sys.exit("csv file does not have required columns")
        # check for command line switches '-del' and the ones in switches are available
        if ('-del' in sys.argv):
            delete = delete_sub_500()
        # preserve port names if keepport switch is given
        if ('-keepport' in sys.argv):
            keepPortname = True
        if (any(word in sys.argv for word in switches)):
            index = [word in sys.argv for word in switches].index(True)
            switch = switches[index]
            norm = normalizer(switch)
        ip = interpolation()
        pc = preclean()
        clean(pc, ip, df, delete, norm, keepPortname)
    else:
        sys.exit("command line argument is not a csv file")
else:
    ship_list = ships()
    pre_clean = preclean()
    interp = interpolation()
    norma = normalizer('-minmax')
    ind = 0
Beispiel #23
0
hostname = 'localhost'
username = '******'
password = '******'
database = 'load_cloud'

conn = psycopg2.connect(host=hostname,
                        user=username,
                        password=password,
                        dbname=database)
cur = conn.cursor()

norm_Ver = 1  # 1 = maxAbsSclaer (-1,1) 2=minMaxSclaer (0,1)
seq_lag = 10
train_factor = 0.9

ts, num_req_normalize, MaxAbsScalerObj = normalizer(norm_Ver, False)
reqs = [j for i in num_req_normalize for j in i]
#print(len(reqs),len(ts))

df = pd.DataFrame({'requests': np.array(reqs)})
print(df.shape)


# define function for create N lags
def create_lags(df, N):
    for i in range(N):
        df['Lag' + str(i + 1)] = df.requests.shift(i + 1)
    return df


# create  lags
Beispiel #24
0
import numpy as np
import normalizer as norm
from load_data import load_csv

base_path = '/home/daniel/Documentos/Projetos/TCC/Normalizador/tests'  # Trocar por caminho relativo do projeto.

raw_data = load_csv(f'{base_path}/raw_data.csv')
normalized = norm.normalizer(raw_data)

np.savetxt(f'{base_path}/normalized.csv', normalized, fmt='%.8f')

print(normalized)
Beispiel #25
0
    def __init__(self, args, env, env_params):
        self.args = args
        self.env = env
        self.env_params = env_params

        # create the network
        self.actor_network = actor(env_params)
        self.critic_network = critic(env_params)
        # build up the target network
        self.actor_target_network = actor(env_params)
        self.critic_target_network = critic(env_params)

        # Load the model if required
        if args.load_path != None:
            o_mean, o_std, g_mean, g_std, load_actor_model, load_critic_model = torch.load(
                args.load_path, map_location=lambda storage, loc: storage)
            self.actor_network.load_state_dict(load_actor_model)
            self.critic_network.load_state_dict(load_critic_model)

        # load the weights into the target networks
        self.actor_target_network.load_state_dict(
            self.actor_network.state_dict())
        self.critic_target_network.load_state_dict(
            self.critic_network.state_dict())
        # if use gpu
        if self.args.cuda:
            self.actor_network.cuda()
            self.critic_network.cuda()
            self.actor_target_network.cuda()
            self.critic_target_network.cuda()
        # create the optimizer
        self.actor_optim = torch.optim.Adam(self.actor_network.parameters(),
                                            lr=self.args.lr_actor)
        self.critic_optim = torch.optim.Adam(self.critic_network.parameters(),
                                             lr=self.args.lr_critic)
        # her sampler
        self.her_module = her_sampler(self.args.replay_strategy,
                                      self.args.replay_k,
                                      self.env.compute_reward)
        # create the replay buffer
        if self.args.replay_strategy == 'future':
            self.buffer = replay_buffer(self.env_params, self.args.buffer_size,
                                        self.her_module.sample_her_transitions)
        else:
            self.buffer = replay_buffer(
                self.env_params, self.args.buffer_size,
                self.her_module.sample_normal_transitions)
        # create the normalizer
        self.o_norm = normalizer(size=env_params['obs'],
                                 default_clip_range=self.args.clip_range)
        self.g_norm = normalizer(size=env_params['goal'],
                                 default_clip_range=self.args.clip_range)
        # create the dict for store the model
        if not os.path.exists(self.args.save_dir):
            os.mkdir(self.args.save_dir)

        # makeup a suffix for the model path to indicate which method is used for Training
        buffer_len_epochs = int(
            self.args.buffer_size /
            (env_params['max_timesteps'] * self.args.num_rollouts_per_cycle *
             self.args.n_cycles))
        name_add_on = ''
        if self.args.exploration_strategy == 'pgg':
            if self.args.pgg_strategy == 'final':
                if self.args.replay_strategy == 'future':
                    name_add_on = '_final_distance_based_goal_generation_buffer' + str(
                        buffer_len_epochs) + 'epochs'
                else:
                    name_add_on = '_final_distance_based_goal_generation_withoutHER_buffer' + str(
                        buffer_len_epochs) + 'epochs'
            else:
                if self.args.replay_strategy == 'future':
                    name_add_on = '_distance_based_goal_generation_buffer' + str(
                        buffer_len_epochs) + 'epochs'
                else:
                    name_add_on = '_distance_based_goal_generation_withoutHER_buffer' + str(
                        buffer_len_epochs) + 'epochs'
        else:
            if self.args.replay_strategy == 'future':
                name_add_on = '_originalHER_buffer' + str(
                    buffer_len_epochs) + 'epochs'
            else:
                name_add_on = '_originalDDPG_buffer' + str(
                    buffer_len_epochs) + 'epochs'

        # path to save the model
        self.model_path = os.path.join(self.args.save_dir,
                                       self.args.env_name + name_add_on)

        if not os.path.exists(self.model_path):
            os.mkdir(self.model_path)
        self.model_path = os.path.join(self.model_path,
                                       'seed_' + str(self.args.seed))
        if not os.path.exists(self.model_path):
            os.mkdir(self.model_path)
Beispiel #26
0
from kMeans import k_means
from normalizer import normalizer
from dataGenerator import dataGenerator

dGen = dataGenerator()
norm = normalizer()

#positions on the data and normalized_data vectors refer to the same values (unnormalized and normalized).
#this is designed to allow us to trace back to the original values.
data = dGen.generateCartesianPoints([0, 10000], 500)
normalized_data = norm.normalize_data(data, 0, 100)

#Object declaration.
kMeans = k_means()
#Call to the clusterizer method.
clusters = kMeans.clusterize(normalized_data, 5)

#Plotting clusters
kMeans.plotCluster(clusters, data)
Beispiel #27
0
import numpy as np
import Read_Data
import normalizer
from generator import generator

std_cpu, std_ram, mean_cpu, mean_ram, ts, \
cpu_values_normalize, \
ram_values_normalize=\
                 normalizer.normalizer(plot=True)


lookback = 50
step = 1
delay = 1
batch_size = 1


# lookback = 1440
# step = 6
# delay = 144
# batch_size = 128


# ----------- RAM ---------
data=np.vstack((ts,ram_values_normalize))
data=data.T
print('************',data.shape)



l=data.shape[0]
Beispiel #28
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from extractor import extractor
from normalizer import normalizer
from converter import converter

if __name__ == "__main__":

  results = extractor()
  nodes, ties = normalizer(results)
  converter(nodes, ties)
    def __init__(self,
                 args,
                 envs_lst,
                 env_params,
                 expert_lst_dir,
                 recurrent=True,
                 ee_reward=True,
                 image=True):
        self.args = args
        self.envs_lst = envs_lst
        self.env_params = env_params
        self.recurrent = recurrent
        self.ee_reward = ee_reward
        self.image = image

        # initialize expert
        self.expert_lst = []
        for dir in expert_lst_dir:
            expert_load_path = dir + '/model.pt'
            o_mean, o_std, g_mean, g_std, model = torch.load(expert_load_path)
            expert_model = actor(env_params,
                                 env_params['obs'] + env_params['goal'])
            expert_model.load_state_dict(model)
            self.expert_lst.append({
                "model": expert_model,
                "o_mean": o_mean,
                "o_std": o_std,
                "g_mean": g_mean,
                "g_std": g_std
            })

        # create the network
        if self.recurrent:
            self.actor_network = actor_recurrent(
                env_params,
                env_params['obs'] + env_params['goal'] + env_params['action'],
                env_params['goal'])
            # self.critic_network = critic_recurrent(env_params, env_params['obs'] + env_params['goal'] + 2 * env_params['action'])
        else:
            self.actor_network = actor(
                env_params,
                env_params['obs'] + env_params['goal'] + env_params['action'],
                env_params['goal'])
        self.critic_network = critic(
            env_params,
            env_params['obs'] + 2 * env_params['goal'] + env_params['action'])

        # create the normalizer
        self.o_norm = normalizer(size=env_params['obs'],
                                 default_clip_range=self.args.clip_range)
        self.g_norm = normalizer(size=env_params['goal'],
                                 default_clip_range=self.args.clip_range)
        self.sg_norm = normalizer(size=env_params['action'],
                                  default_clip_range=self.args.clip_range)

        # load model if load_path is not None
        if self.args.load_dir != '':
            load_path = self.args.load_dir + '/model.pt'
            # o_mean, o_std, g_mean, g_std, sg_mean, sg_std, model = torch.load(load_path)
            o_mean, o_std, g_mean, g_std, model = torch.load(load_path)
            self.o_norm.mean = o_mean
            self.o_norm.std = o_std
            self.g_norm.mean = g_mean
            self.g_norm.std = g_std
            # self.sg_norm.mean = sg_mean
            # self.sg_norm.std = sg_std
            self.actor_network.load_state_dict(model)

        # sync the networks across the cpus
        sync_networks(self.actor_network)
        sync_networks(self.critic_network)
        # build up the target network
        if self.recurrent:
            self.actor_target_network = actor_recurrent(
                env_params,
                env_params['obs'] + env_params['goal'] + env_params['action'],
                env_params['goal'])
            # self.critic_target_network = critic_recurrent(env_params, env_params['obs'] + env_params['goal'] + 2 * env_params['action'])
        else:
            self.actor_target_network = actor(
                env_params,
                env_params['obs'] + env_params['goal'] + env_params['action'],
                env_params['goal'])
        self.critic_target_network = critic(
            env_params,
            env_params['obs'] + 2 * env_params['goal'] + env_params['action'])
        # load the weights into the target networks
        self.actor_target_network.load_state_dict(
            self.actor_network.state_dict())
        self.critic_target_network.load_state_dict(
            self.critic_network.state_dict())

        # if use gpu
        if self.args.cuda:
            self.actor_network.cuda()
            self.critic_network.cuda()
            self.actor_target_network.cuda()
            self.critic_target_network.cuda()
        # create the optimizer
        self.actor_optim = torch.optim.Adam(self.actor_network.parameters(),
                                            lr=self.args.lr_actor)
        self.critic_optim = torch.optim.Adam(self.critic_network.parameters(),
                                             lr=self.args.lr_critic)
        # her sampler
        self.her_module_lst = [
            her_sampler(self.args.replay_strategy, self.args.replay_k,
                        env.compute_reward) for env in self.envs_lst
        ]
        # create the replay buffer
        self.buffer_lst = [
            replay_buffer(self.env_params,
                          self.args.buffer_size,
                          her_module.sample_her_transitions,
                          ee_reward=True) for her_module in self.her_module_lst
        ]

        # path to save the model
        self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
Beispiel #30
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from extractor import extractor
from normalizer import normalizer
from converter import converter

if __name__ == "__main__":

    results = extractor()
    nodes, ties = normalizer(results)
    converter(nodes, ties)
Beispiel #31
0
database = 'load_cloud'

conn = psycopg2.connect(host=hostname,
                        user=username,
                        password=password,
                        dbname=database)
cur = conn.cursor()

norm_Ver = 1  # 1 = maxAbsSclaer (-1,1) 2=minMaxSclaer (0,1)
seq_lag = 10
train_factor = 0.9

for imf_index in range(1, 21):
    print(imf_index, '****************************************')

    ts, num_req_normalize, MaxAbsScalerObj = normalizer(
        imf_index, norm_Ver, False)
    reqs = [j for i in num_req_normalize for j in i]
    #print(len(reqs),len(ts))

    df = pd.DataFrame({'requests': np.array(reqs)})
    print(df.shape)

    # define function for create N lags
    def create_lags(df, N):
        for i in range(N):
            df['Lag' + str(i + 1)] = df.requests.shift(i + 1)
        return df

    # create  lags
    df = create_lags(df, seq_lag)
Beispiel #32
0
import numpy as np
import Read_Data
import normalizer
from generator import generator
from matplotlib import pyplot as plt

imf_index = 10

# std_cpu, std_ram, mean_cpu, mean_ram, ts, ts_reload, \
# cpu_values_normalize, cpu_reloaded_normalize, \
# ram_values_normalize, ram_reloaded_normalize = \
#                  normalizer.normalizer(imf_index,plot=False)

std_cpu,std_ram,mean_cpu,mean_ram,ts,cpu_values_normalize,ram_values_normalize = \
            normalizer.normalizer(imf_index,plot=False)

#
# from sklearn.preprocessing import MinMaxScaler
# offline_scaler = MinMaxScaler()
# offline_scaler.fit(ram_values_normalize.reshape(-1, 1))
# ram_values_normalize=np.array(offline_scaler.transform(ram_values_normalize.reshape(-1, 1)))

#
# lookback = 100  # Observations will go back 10 days
# step = 5  #  Observations will be sampled at one data point per hour.
# delay = 20  # Targets will be 24 hours in the future.
# batch_size = 128

lookback = 360
step = 6
delay = 36