def doProcess(tmin,tmax,data=data,mdef=mdef):
  ## -- do a single fit ... set up for parallelizing
  for key in data:
    mdef[key]['tfit'] = range(tmin,tmax)
  models = make_models(data=data,lkey=df.lkey,mdef=mdef)
  pdict0 = utf.get_prior_dict(df.define_prior,
   df.define_prior['nkey'],df.define_prior['okey'],df.num_nst,df.num_ost)
  prior = make_prior(models,prior_dict=pdict0,nst=df.num_nst,ost=df.num_ost)
  fitter = CorrFitter(models=models,maxit=df.maxit)
  #try: ## -- if catching value error, just exit
  ## -- p0 = initial values (dictionary)
  if df.do_initial:
   try:
    p0={}
    for key in df.define_init:
     eokey = utf.get_evenodd(key)
     if eokey == 'o':
      p0[key] = df.define_init[key][:df.num_ost]
     else:
      p0[key] = df.define_init[key][:df.num_nst]
    fit = fitter.lsqfit(data=data,prior=prior,p0=p0,svdcut=df.svdcut)
   except KeyError:
    print "Could not use initial point definitions"
    fit = fitter.lsqfit(data=data,prior=prior,svdcut=df.svdcut)
  else:
   fit = fitter.lsqfit(data=data,prior=prior,svdcut=df.svdcut)
  ## --
  print_fit(fit,prior)
  print_error_budget(fit)
  save_data('fit-timevary/fit_'+str(tmin)+'_'+str(tmax)+'.out',fit,data)
Example #2
0
def calculate_and_save_dists(example_image_path):
    start_time = datetime.datetime.now()

    target = open('D:\\feature_vectors.txt', 'r')
    feature_vectors_str = target.read()
    target.close()

    end_time = datetime.datetime.now()
    use_time = end_time - start_time
    print str(use_time.seconds+use_time.microseconds/1000000.0)

    feature_vectors_dic = json.loads(feature_vectors_str)
    example_image_feature_vector = feature_vectors_dic[example_image_path]

    dists = []

    start_time = datetime.datetime.now()

    for image_feature_vector in feature_vectors_dic.values():
        dist = dist_between_feature_vector(example_image_feature_vector, image_feature_vector)
        dists.append(dist)

    end_time = datetime.datetime.now()
    use_time = end_time - start_time
    print str(use_time.seconds+use_time.microseconds/1000000.0)

    start_time = datetime.datetime.now()

    examplae_image_name = example_image_path[14:-4]
    save_data.save_data('D:\\dists\\dists_of_'+examplae_image_name+'.txt', feature_vectors_dic.keys(), dists)
    # examplae_image_name [18:-4]

    end_time = datetime.datetime.now()
    use_time = end_time - start_time
    print str(use_time.seconds+use_time.microseconds/1000000.0)
Example #3
0
    def load_file(self):
        filename = self.open_file()
        print(filename)
        if filename == "":
            show_text = "取消读取文件"
            self.change_show_text(show_text)
            return
        show_text = "正在打开" + filename + " \n最大读取包数:" + str(self.packet_load_num)
        self.change_show_text(show_text)
        self.root.update()
        this_pcap_file = Pcap_class.build_pcap(filename, self.packet_load_num)
        show_text = "成功创建Pcap类,共含" + str(len(this_pcap_file.get_packets())) + "个流量包"
        self.change_show_text(show_text)
        self.load_in_flag = 1
        save_data.clear_all_table()
        show_text = "清空临时表完毕"
        self.change_show_text(show_text)
        save_data.save_data(this_pcap_file)
        show_text = "填写临时表完毕"
        self.change_show_text(show_text)
        self.static_datas = statistic_analysis.statistic_analysis_entrance()
        show_text = "统计分析完成"
        self.change_show_text(show_text)
        self.fill_data_table(self.static_datas)


        width = int(self.maxWidth / 3)
        height = self.maxHeight - 120
        labels = [u'TCP', u'UDP', u'DNS',u'HTTP', u'HTTPS']
        protocol_adjust_list = copy.deepcopy(self.static_datas[-2])
        protocol_adjust_list[0] = protocol_adjust_list[0] - protocol_adjust_list[3] - protocol_adjust_list[4]
        protocol_adjust_list[1] = protocol_adjust_list[1] - protocol_adjust_list[2]
        statistic_analysis.draw_pie(protocol_adjust_list, labels, "protocol")
        self.photo_main = self.Load_img('protocol_pie.png', width, height)
        self.label_image.config(image=self.photo_main)
Example #4
0
def brand_scrap(start, stop):
    fuse = -1
    brand_lines = []

    link = "https://www.ultimatespecs.com/car-specs/BMW-models"
    soup = get_body_content(link)
    home_models_line = soup.find_all(class_="home_models_line")
    model_lines = []

    # Getting all model lines
    for model_divs in home_models_line:
        models_lines = model_divs.find_all("a")
        if models_lines != []:
            model_lines += models_lines

    # Scrapping model lines
    for model_line in model_lines:
        fuse += 1
        if fuse < start:
            continue
        elif fuse > stop:
            break

        link_to_generations = f"https://www.ultimatespecs.com{model_line['href']}"
        generations = scrap_generations(link_to_generations)
        num_of_models = sum([gen.num_of_models for gen in generations])

        line_name = model_line.find("h2").string
        line_name = clear_text(line_name)
        line_image = generations[0].models[0].model_image

        line_info = model_line.find("p").text
        line_info = re.split(",", line_info)

        if len(line_info) == 3:
            line_info = [clear_text(x) for x in line_info]
            from_year = line_info[0].split(" ")[1]
        else:
            from_year = "No data"

        obj = ModelLine(
            line_name,
            "",
            line_image,
            from_year,
            len(generations),
            num_of_models,
            generations,
        )
        save_data(obj, "test")

        brand_lines.append(obj)

    return brand_lines
Example #5
0
def test():
    global modelName

    versions = scrap_versions(
        "https://www.ultimatespecs.com/car-specs/BMW/M52/E31-8-Series", "test")
    modelName = "model"

    car_model = Model(
        modelName,
        modelName,
        f"http://v-ie.uek.krakow.pl/~s215740/bmw_catalog/bmw_test.png",
        versions,
    )

    save_data(car_model, "test")
def good_automat():
    """ прототип роботи торгового автомата """
    while True:
        load_data('.', 'data.txt')
        display_info([], [])
        act = input_operation(False)
        calculation([], [], [])
        save_data([], '', '')
        save_story([], '', None)
        pay()
        break
        if act == 0:
            return 0

    return 0
Example #7
0
def calculate_and_save_feature_vectors(filename, image_set_folder):
    ImageFile.LOAD_TRUNCATED_IMAGES = True

    image_paths = dist.get_imlist(image_set_folder)
    image_feature_vectors = []
    start_time = datetime.datetime.now()
    # count = 0
    for image_path in image_paths:
        image_feature_vector = feature_vector_of_image(image_path)
        image_feature_vectors.append(image_feature_vector)
        # count = count + 1
        print image_path
    end_time = datetime.datetime.now()
    use_time = end_time - start_time
    print str(use_time.seconds+use_time.microseconds/1000000.0)
    save_data.save_data(filename, image_paths, image_feature_vectors)
    return image_feature_vectors
Example #8
0
 def test_no_file_data(self):
     """ коли файла з даними нема """
     # генеруємо виключення WrongFileData
     os.remove(PATH_FILE + '\\' + FILE_DATA)
     with self.assertRaises(WrongFileData) as ex:
         a = save_data(goods, PATH_FILE, FILE_DATA)
         self.assertEqual(ex.message,
                          'The machine does not work. Contact this admin!')
Example #9
0
def text_process(tweets):
    final = []
    temp_file_number = 1
    for person in tweets:
        filename = "../data/%d.dat" % temp_file_number
        if os.path.exists(filename):
            continue
        all_tokenized = cleaning(person[1])
        all_lematized = []
        for list_of_tokens in all_tokenized:
            lemmatized = lemmatization(list_of_tokens)
            all_lematized.append(lemmatized)
        to_append = [person[0],all_lematized]
        final.append(to_append)
        save_data.save_data(to_append, filename)
        temp_file_number+=1

    return final
Example #10
0
    def test_err_wr_data(self):
        """ коли файл даних не пишеться """
        # вивалюємося? "

        with self.assertRaises(WrongFileData) as ex:
            with mock.patch('save_data.my_write', fake_write):
                a = save_data(goods, PATH_FILE, FILE_DATA)
            self.assertEqual(ex.message,
                             'The machine does not work. Contact this admin!')
Example #11
0
 def test_err_data_bk(self):
     """ коли не перейменовується в резервний """
     # вивалюємося? "
     with open(PATH_FILE + '\\' + FILE_BK, 'w') as f:
         with self.assertRaises(WrongFileData) as ex:
             a = save_data(goods, PATH_FILE, FILE_DATA)
             self.assertEqual(
                 ex.message,
                 'The machine does not work. Contact this admin!')
Example #12
0
 def save_file_path(self):
     print self.ts_path
     key = self.ts_path.strip()
     #value={'int':12,'float':9.5,'string':'sample data'}
     #value=self.test_object.pts_x_y
     #value=self.test_object.get_programe_PID()
     value = self.test_object.program_PID
     print "value is %s" % value
     data = save_data.save_data()
     data.creat_shelf(key, value)
     data.print_shelf(key)
     data.get_all_val()
Example #13
0
def get_per_comment_features(tweets):

    feature = []
    labels = []

    for person in tweets:
        labels.append(person[0])
        list_of_tweets = person[1]
        word_count_for_user = []
        for sentence in list_of_tweets:
            word_count = len(sentence.split())
            word_count_for_user.append(word_count)

        words_per_comment = sum(word_count_for_user) / len(
            word_count_for_user)  #words per comment
        variance_of_words_per_comment = np.var(word_count_for_user)
        feature.append(words_per_comment)
        feature.append(variance_of_words_per_comment)

    save_data(np.reshape(feature, ((-1, 2))), 'words_per_comment.data')
    return np.reshape(feature, ((-1, 2))), np.array(labels)
Example #14
0
 def save_file_path(self):
     print self.ts_path
     key=self.ts_path.strip()
     #value={'int':12,'float':9.5,'string':'sample data'}
     #value=self.test_object.pts_x_y
     #value=self.test_object.get_programe_PID()
     value=self.test_object.program_PID
     print "value is %s"%value
     data = save_data.save_data()
     data.creat_shelf(key,value)     
     data.print_shelf(key)
     data.get_all_val()
 def load_file(self):
     filename = self.open_file()
     print(filename)
     if filename == "":
         show_text = "取消读取文件"
         self.change_show_text(show_text)
         return
     show_text = "正在打开" + filename + " \n最大读取包数:" + str(self.packet_load_num)
     self.change_show_text(show_text)
     self.root.update()
     this_pcap_file = Pcap_class.build_pcap(filename, self.packet_load_num)
     show_text = "成功创建Pcap类,共含" + str(len(this_pcap_file.get_packets())) + "个流量包"
     self.change_show_text(show_text)
     save_data.clear_all_table()
     show_text = "清空临时表完毕"
     self.change_show_text(show_text)
     save_data.save_data(this_pcap_file)
     show_text = "填写临时表完毕"
     self.change_show_text(show_text)
     static_datas = statistic_analysis.statistic_analysis_entrance()
     show_text = "统计分析完成"
     self.change_show_text(show_text)
     print(static_datas)
     self.fill_data_table(static_datas)
Example #16
0
 def get_file_path(self):
     
     data = save_data.save_data()
     stream_list = data.get_all_val()
     print stream_list
     self.tableWidget.setRowCount(len(stream_list))
     #self.tableWidget.setColumnCount(len(stream_list))
     
     colume = 0
     for i in stream_list:     
         self.plainTextEdit.appendPlainText(i[0].decode('utf8'))
         newItem = QTableWidgetItem(i[0].decode('utf8'))
         newItem_value = QTableWidgetItem(str(i[1]))    
         self.tableWidget.setItem(colume, 0, newItem) 
         self.tableWidget.setItem(colume, 1, newItem_value)
         colume += 1            
Example #17
0
    def get_file_path(self):

        data = save_data.save_data()
        stream_list = data.get_all_val()
        print stream_list
        self.tableWidget.setRowCount(len(stream_list))
        #self.tableWidget.setColumnCount(len(stream_list))

        colume = 0
        for i in stream_list:
            self.plainTextEdit.appendPlainText(i[0].decode('utf8'))
            newItem = QTableWidgetItem(i[0].decode('utf8'))
            newItem_value = QTableWidgetItem(str(i[1]))
            self.tableWidget.setItem(colume, 0, newItem)
            self.tableWidget.setItem(colume, 1, newItem_value)
            colume += 1
Example #18
0
    def test_normal(self):
        """ нормальна ситуація """
        # перейменовуємо файл даних у резервний, записуємо дані в основний
        a = save_data(goods, PATH_FILE, FILE_DATA)
        self.assertEqual(a, 0)

        # перевіряємо чи є файл даних і що в ньому
        fd = os.access(PATH_FILE + '\\' + FILE_DATA, os.F_OK)
        self.assertTrue(fd, 'Not data file!')
        with open(PATH_FILE + '\\' + FILE_DATA, 'r') as f:
            self.assertMultiLineEqual(str(goods), f.read(), 'Data not true')

        # перевіряємо чи є резервний файл і що в ньому
        fb = os.access(PATH_FILE + '\\' + FILE_BK, os.F_OK)
        self.assertTrue(fb, 'Not backup file!')
        with open(PATH_FILE + '\\' + FILE_BK, 'r') as f:
            self.assertMultiLineEqual(str(goods_data), f.read(),
                                      'BackUp not true')
Example #19
0
    def test_zero_zero(self):
        """ коли список товарів пустий """
        # тоді функція нічого не робить
        goods = []
        a = save_data(goods, PATH_FILE, FILE_DATA)
        self.assertEqual(a, -1, 'Not -1 code for zero goods')

        # перевіряємо чи є файл даних і що в ньому
        fd = os.access(PATH_FILE + '\\' + FILE_DATA, os.F_OK)
        self.assertTrue(fd, 'Not data file!')
        with open(PATH_FILE + '\\' + FILE_DATA, 'r') as f:
            self.assertMultiLineEqual(str(goods_data), f.read(),
                                      'Data not true')
        # перевіряємо чи є резервний файл і що в ньому
        fb = os.access(PATH_FILE + '\\' + FILE_BK, os.F_OK)
        self.assertTrue(fb, 'Not backup file!')
        with open(PATH_FILE + '\\' + FILE_BK, 'r') as f:
            self.assertMultiLineEqual(str(goods_old), f.read(),
                                      'BackUp not true')
Example #20
0
face_cascade = cv2.CascadeClassifier('../haarcascade_frontalface_default.xml')
# exit variable
want_to_quit = 0
# list of images
new_data = []

while not want_to_quit:
    ret, frame = cap.read()

    # getting face position
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    face = face_cascade.detectMultiScale(gray, 1.3, 7)

    # printing all squares around faces
    for (x, y, w, h) in face:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)

    # print cam
    cv2.imshow("cam", frame)
    cv2.waitKey(10)

    # add an image into the list
    if keyboard.is_pressed(' ') and len(face) == 1:
        new_data.append(get_trained_data(gray, face))

    # quit
    if keyboard.is_pressed('q'):
        want_to_quit = 1

save_data(new_data, sys.argv[1])
print(sys.argv[1], "has been saved in the database !")
Example #21
0
#x[4] = V*math.sin(beta)
#x[5] = V*math.sin(alfa)*math.cos(beta)
x[0, 10] = p_inter[0]  # rotacion inicial en body
x[1:N + 1, 10] = p_inter
x[0, 11] = q_inter[0]
x[1:N + 1, 11] = q_inter
x[0, 12] = r_inter[0]
x[1:N + 1, 12] = r_inter
x[0, 13] = alpha_inter[0]
x[1:N + 1, 13] = alpha_inter
x[0, 14] = beta_inter[0]
x[1:N + 1, 14] = beta_inter

#sv.save_data(N, time, x, Ixx, Iyy, Izz)
#sv.save_data(N-1, time, x, 0, 0, 0,dir_save)
sv.save_data(N, time_hdf5, x, 0, 0, 0, dir_save)

print(x[1, :])
'''
#print(np.shape(data))
plt.plot(t_alpha,alpha,label='alpha BFP')
plt.plot(time,alpha_inter,label='alpha inter')
plt.legend()
#plt.set_xlim([min(time), max(time)])
plt.title('Alpha BFP')
plt.xlabel('Time [s]')

#print(np.shape(data))
plt.plot(t_Fztot,Fztot,label='Fz tot BFP')
plt.plot(time,Fztot_inter,label='Fz tot inter')
plt.plot(time,Fz,label='Fz s/G inter')
Example #22
0
 def on_post(self, req, resp):
     body = json.loads(req.stream.read().decode('utf-8'))
     msg = save_data.save_data(body['data'], body['file_name'])
     resp.body = json.dumps(msg)
     resp.status = falcon.HTTP_200
#PLot 2
plt.figure(figsize=(5,5))
sns.set(style="darkgrid") 
print(df['type'])
sns.countplot(x="type", data=df, order = df['type'].value_counts().index)
plt.xlabel('Type')
plt.ylabel('Frequency')
plt.show()
'''

#plot 3
X_sentiment_features = load_data('sentiment_scores.dat')
X_bow, y = get_bag_of_words('data.dat')
X_wpc = load_data('words_per_comment.data')

X_avg_senti = [mean(l) for l in X_sentiment_features]
X_avg_senti = np.reshape(X_avg_senti, ((-1, 1)))
X_bow = X_bow[:, :20]
X_wpc = np.reshape(X_wpc, ((8674, -1)))

X = np.append(X_bow, X_avg_senti, axis=1)
X = np.append(X, X_wpc, axis=1)
#print(y)

print(X_bow)
save_data(X, 'featur2_data.dat')

model = XGBClassifier()
model.fit(X, y)
plot_importance(model)
pyplot.show()
Example #24
0
    pd.set_option('display.width', desired_width)
    pd.set_option('display.max_columns', 8)

    print(final_df)


if __name__ == '__main__':
    print('Running Program...')
    countries = get_countries('https://www.residentadvisor.net/promoters.aspx')

    for country in countries:
        country_data = []
        print('-' * 45)
        print(f'Getting data from {country.name}')
        print('-' * 45)
        cities = get_cities(url=country.link)

        for city in cities:
            promoters, total_records = get_links(city_link=city.link)
            print(f'Total Promoters Found in {city.name}: {total_records}')
            # rec = input('Records to pull (0 means all) > ')
            country_data.extend(
                get_init_data(promoter_list=promoters, num_of_records=int(0)))
        final_data_frame = format_data(country_data)
        print(f'Saving data for {country.name}...')
        print('-' * 45)
        save_data('Data', country.name, final_data_frame)

        # Uncomment this to show results in console
        show_results(final_data_frame)
Example #25
0
def main():
    brand_lines = brand_scrap(11, 11)
    cars_data = CarsData("test", brand_lines)
    save_data(cars_data, "bmw_test")
                # collect all races' rows from the table
                # find all links, which match a pattern
                race_path = urlparse.urlparse(venue_url).path
                link_pattern = re.compile(r'%s/\d$' % race_path, re.IGNORECASE)
                races = venue_soup.find_all('a', {'href': link_pattern})
                races_hrefs = set()
                for race in races:
                    races_hrefs.add(race['href'])

                # get race data
                races = get_races(races_hrefs)
                print('Parsed {0} races.'.format(len(races)))

                venues[venue] = races

            print('Parsing finished.')

            save_data(venues)
            print('Data saved!')
            print('Finished!\n')
        except Exception as ex:
            if count_tries == 3:
                raise

            print('{0}! Retrying ...'.format(ex))
            time.sleep(180)
            count_tries += 1
            continue

        break
Example #27
0
    temp_file_number = 1
    for person in tweets:
        filename = "../data/%d.dat" % temp_file_number
        if os.path.exists(filename):
            continue
        all_tokenized = cleaning(person[1])
        all_lematized = []
        for list_of_tokens in all_tokenized:
            lemmatized = lemmatization(list_of_tokens)
            all_lematized.append(lemmatized)
        to_append = [person[0],all_lematized]
        final.append(to_append)
        save_data.save_data(to_append, filename)
        temp_file_number+=1

    return final

if __name__ == "__main__":
    start = time.time()
    tweets = get_tweets.get_tweets_list('../project/mbti_1.csv')
    size = len(tweets)
    # print(tweets)

    data = text_process(tweets)
    save_data.save_data(data,"data.dat")
    end = time.time()
    print(end - start)
    # print(data)

    # print(save_data.load_data("data.dat"))
     if train:
         save.csvfile.close()
     break
 elif k == ord('m'):
     status_mouse = not status_mouse
     status_dino = False
 elif k == ord('d'):
     status_dino = not status_dino
     status_mouse = False
 elif k == ord('b'):
     bgImg = cropped_img
     isBgCaptured = 1
     print('[INFO] Background Captured')
 if train:
     if k == ord('0'):
         save.save_data(capture, finger_point_pos, finger_angle_pos, 0,
                        thresh)
         capture += 1
     elif k == ord('1'):
         save.save_data(capture, finger_point_pos, finger_angle_pos, 1,
                        thresh)
         capture += 1
     elif k == ord('2'):
         save.save_data(capture, finger_point_pos, finger_angle_pos, 2,
                        thresh)
         capture += 1
     elif k == ord('3'):
         save.save_data(capture, finger_point_pos, finger_angle_pos, 3,
                        thresh)
         capture += 1
     elif k == ord('4'):
         save.save_data(capture, finger_point_pos, finger_angle_pos, 4,
Example #29
0
     eokey = utf.get_evenodd(key)
     if eokey == 'o':
      p0[key] = df.define_init[key][:df.num_ost]
     else:
      p0[key] = df.define_init[key][:df.num_nst]
    fit = fitter.lsqfit(data=data,prior=prior,p0=p0,svdcut=df.svdcut)
   except KeyError:
    print "Could not use initial point definitions"
    fit = fitter.lsqfit(data=data,prior=prior,svdcut=df.svdcut)
  else:
   fit = fitter.lsqfit(data=data,prior=prior,svdcut=df.svdcut)
  #bs_avg = make_bootstrap(fitter,dset,df.mdp.n_bs)
  print_fit(fit,prior)
  print_error_budget(fit)
  #save_data(mdp.output_path +'/'+ mdp.fit_fname,fit,data)
  save_data('./test.fit.out',fit,data)
  save_prior_from_fit(df.define_prior,df.define_model,fit,"test.prior.out",
    round_e=2,round_a=1,preserve_e_widths=True,preserve_a_widths=True)
  
  if df.do_plot:
   if df.do_default_plot:
    fitter.display_plots()
   plot_corr_double_log(models,data,fit,**df.fitargs)
   plot_corr_normalized(models,data,fit,**df.fitargs)
   plt.show()
pass #do_2pt

if df.do_3pt:
  ## -- test routines
  if df.do_symm == "s":
    if df.do_irrep == "8":
Example #30
0
    with open("D:\\pychardir\\price_evaluate\\data\\train.tsv", 'r', encoding='utf8') as f:
        p = csv.reader(f, delimiter="\t")
        index = 0
        datas = []
        for i, r in tqdm(enumerate(p), desc="iteratordata"):
            if index == 0:
                print(r)
                index += 1
            else:
                elem = r[-1].strip().replace("\n", "").replace(" [rm]", "")
                datas.append([r[1],r[2], r[3], r[4],r[6], elem, r[5]])
    fiter_datas = filter_dataset(datas)
    saveData = {
        "all_data": fiter_datas
    }
    save_data.save_data(save_fileName,saveData)
else:
    saveData = save_data.load_data(save_fileName)

    name_max = 0
    category_max = 0
    brand_max = 0
    sum_len = 0
    for elem in saveData["all_data"]:
        data = elem[0]
        if sum_len == 0:
            print(data)
            sum_len+=1
        name_temp_len = len(data[0])
        category_temp_len = len(data[2])
        brand_temp_len = len(data[3])
Example #31
0
    'alpha': [0.001, 1],
    'num_hidden_layers': {
        'one_layer': {
            'n_hidden_1': [1, 128]
        },
        'two_layers': {
            'n_hidden_1': [1, 128],
            'n_hidden_2': [1, 128]
        },
        'three_layers': {
            'n_hidden_1': [1, 128],
            'n_hidden_2': [1, 128],
            'n_hidden_3': [1, 128]
        }
    }
}

#main process
if __name__ == "__main__":

    #options are classification, regression and unsupervised learning
    #pmap=optunity.pmap
    hps, info, _2 = optunity.minimize_structured(train_model_mlp,
                                                 space_classification,
                                                 num_evals=5)

    print "optimised vals: ", hps  # results
    print "optimisation information: ", info.stats
    print "saving data..."
    save_data(hps, info)
Example #32
0
from barbarian import Barbarian, Tank, Totem
from bard import Bard, Eloquent, Valor
from character import Character
from fighter import Fighter, Sword, Bow
from rogue import Rogue, Assasin, Thief
from wizard import Wizard, Storms, Arcanum

# Checks if a saved character already exists###################################
load = 'no'

if (path.exists("savedcharacter.obj") is True):
    load = input('Would you like to load your last saved character? ')
# Load Previously Saved Character##############################################
if (load == 'yes'):
    file = open('savedcharacter.obj', 'rb')
    save_data(file)

# Initial Character Creation with Random stats#################################
elif ((path.exists("savedcharacter.obj") is False) or (load != 'yes')):
    strength = funcs.d20(8)
    constitution = funcs.d20(10)
    dexterity = funcs.d20(8)
    wisdom = funcs.d20(8)
    intelligence = funcs.d20(8)
    charisma = funcs.d20(8)

    player = Character(strength, constitution, dexterity, intelligence, wisdom,
                       charisma)
    print(player.getStats())

    # Allows user to choose what class their character is and resets character#####
Example #33
0
        # load an agent
        agent = agents[scheme]

        # start experiment
        obs = env.observe()
        c = 0
        total_reward = 0
        done = False
        s_start = time.time()
        while not done:
            node, use_exec = agent.get_action(obs)
            obs, reward, done = env.step(node, use_exec)
            total_reward += reward
            c = c + 1
        s_end = time.time()
        save_data.save_data(s_end - s_start, env, scheme, c,
                            args.result_folder)
        all_total_reward[scheme].append(total_reward)

        original = os.getcwd()
        os.chdir(args.result_folder)
        visualize_dag_time_save_pdf(
                env.finished_job_dags, env.executors,
                args.result_folder + 'visualization_dag_time_exp_' + \
                str(exp) + '_scheme_' + scheme + \
                '.png', plot_type='app')

        visualize_executor_usage(env.finished_job_dags,
                args.result_folder + 'visualization_ex_usage_exp_' + \
                str(exp) + '_scheme_' + scheme + '.png')
        os.chdir(original)
Example #34
0
shape_input = train_data[0][0].shape
size_inputs = train_data[0][0].size
num_outputs = train_data[0][1].size
num_hidden = train_data[0][1].size

from autoencoder_conf import prepare_autoencoder2
encoder, loss_encoder, decoder, loss_decoder = prepare_autoencoder2(num_hidden, num_outputs, model_ctx)
elapsed_time = time.time() - start_time
print 'Time of initializing data: ', elapsed_time

num_epochs = 15
learning_rate = .008

from pretrain_stack import pretrain_stack
start_time = time.time()
encoder, decoder = pretrain_stack(train_data, encoder, loss_encoder, decoder, loss_decoder, model_ctx, num_epochs, learning_rate)
elapsed_time = time.time() - start_time
print 'Time of pretraining net: ', elapsed_time

from train import train
start_time = time.time()
encoder, decoder = train(train_data, test_data, encoder, loss_encoder, decoder, loss_decoder, model_ctx, num_epochs, learning_rate)
elapsed_time = time.time() - start_time
print 'Time of training net: ', elapsed_time

from save_data import save_data
start_time = time.time()
save_data("./results/predictions/lab4_conf2_pred_stack.npy", all_data, train_data, encoder, model_ctx)
elapsed_time = time.time() - start_time
print 'Time saving net: ', elapsed_time
Example #35
0

# execute
if __name__ == '__main__':
    count_tries = 1
    count_fails = 1
    while count_tries <= 2 and count_fails <= 3:
        try:
            brisbane_timezone = pytz.timezone('Australia/Brisbane')
            brisbane_now = datetime.now(brisbane_timezone)
            
            # parse web pages
            all_races = main()
            print('Parsed %s venues.' % len(all_races))

            save_data(all_races)

            print('Data saved to the database.')

            print('Scraping succesful! Retrying ...')
            count_tries += 1
            time.sleep(30)
        except Exception as ex:
            if count_fails == 3:
                raise

            print('Scraping failed: {0}! Retrying ...'.format(ex))
            time.sleep(60)
            count_fails += 1
            count_tries -= 1
            continue