Ejemplo n.º 1
0
    def normal(self):
        Tools.clear()

        comparador = Comparador(2)

        while (self.running is True):
            print(self.tablero)
            #time.sleep(0.5)
            c = comparador.comparar(self.tablero.tablero)

            if (c is not False):
                if (c == 2):
                    input('Oscilador Nivel 2 encontrado, presionar Enter ' +
                          'para terminar')
                    self.running = False

                elif (c == 1):
                    input('Vida Estatica encontrada, presionar Enter ' +
                          'para terminar')
                    self.running = False
            else:
                self.tablero.tablero = GeneradorPatrones.nextStep(
                    self.tablero.tablero)
                input('Presionar Enter para continuar')
            Tools.clear()
Ejemplo n.º 2
0
 def action(self):
     choices = Tools.get_choose_dict(
         data={"choose": list(self.actions.keys())})
     Tools.print_choose_dict(choices)
     user_input = input()
     if user_input not in choices.keys():
         Tools.print_error("Пожалуйста введите корректные данные.")
     else:
         self.context.next(self.actions[choices[user_input]]())
Ejemplo n.º 3
0
 def __init__(self, env, args):
     self.direction = args.direction
     self.env = env
     self.num_episodes = args.episodes
     self.episode_start = 0
     self.noise = OUNoise(mu=np.zeros(self.env.action_space.shape))
     self.noise_decay = args.noise_decay
     self.count_exp_replay = 0
     self.train_iteration = 0
     self.tau = args.TAU
     self.tools = Tools()
Ejemplo n.º 4
0
 def render(self, country, lan, company=None):
     with open("templates/modules/module_text/agencyAdd.json") as json_file:
         form = json.load(json_file)
     tools = Tools()
     agency_list = tools.get_list_of_agencies(country)
     return self.render_string('modules/agencyAdd.html',
                               c=company,
                               country=country,
                               agency_list=json.dumps(agency_list),
                               lan=lan,
                               form=form[lan][country])
Ejemplo n.º 5
0
 def download_file(self, category, file):
     response = self.context.api.get_data(file["id"])
     if response.status_code == 200:
         path = Path(f"{self.context.storage_path}/{category['name']}/")
         path.mkdir(parents=True, exist_ok=True)
         file_name = f"{path}/{Tools.random_string()}{mimetypes.guess_extension(response.headers['content-type'])}"
         if not os.path.isfile(file_name):
             print(path)
             with open(file_name, "wb+") as destination:
                 for chunk in response:
                     destination.write(chunk)
         Tools.print_ok_message(f"Файл успешно загружен в {file_name}")
Ejemplo n.º 6
0
 def action(self):
     choices = Tools.get_choose_dict(
         data={"choose": list(self.actions.keys())})
     Tools.print_choose_dict(choices)
     user_input = input()
     if user_input not in choices.keys():
         Tools.print_error("Пожалуйста введите корректные данные.")
     elif choices[user_input] == "Выбрать категорию":
         go_to = self.handle_download()
         self.context.next(go_to() if go_to is not None else self.
                           actions[choices[user_input]]())
     else:
         self.context.next(self.actions[choices[user_input]]())
Ejemplo n.º 7
0
 def render(self, country, lan, company=None):
     with open("templates/modules/module_text/agencyAdd.json") as json_file:
             form = json.load(json_file)
     tools = Tools()
     agency_list = tools.get_list_of_agencies(country)
     return self.render_string(
         'modules/agencyAdd.html', 
         c=company, 
         country=country,
         agency_list = json.dumps(agency_list),
         lan=lan, 
         form=form[lan][country]
         )
Ejemplo n.º 8
0
 def handle_file_download(self, category):
     files, choices = self.download_choices(category, "name")
     file_name_input = input()
     if choices[file_name_input] in base_actions.keys():
         return base_actions[files[file_name_input]]
     if file_name_input not in choices.keys():
         Tools.print_error("Пожалуйста введите корректные данные.")
     else:
         file = 0
         for d in files:
             if d["name"] == choices[file_name_input]:
                 file = d
         self.download_file(category, file)
     return None
Ejemplo n.º 9
0
 def handle_user_input_download(self, category):
     values, choices = self.download_choices(category, "name")
     choice = input()
     if choices[choice] in base_actions.keys():
         return base_actions[values[choice]]
     if choice not in choices.keys():
         Tools.print_error("Пожалуйста введите корректные данные.")
     else:
         user_input = {}
         for d in values:
             if d["name"] == choices[choice]:
                 user_input = d
         response = self.context.api.get_data(user_input["id"])
         if response.status_code == 200:
             Tools.print_ok_message("Данные успешно получены.")
             print(response.text)
Ejemplo n.º 10
0
def init():
    tools = Tools()

    for i in range(200):
        observation, reward, done, info = env.step(
            tools.active_one_muscle("iliopsoas", "r", 1))

        if i == 20:
            state_desc = env.get_state_desc()
            print(type(state_desc["body_pos"]["toes_r"][0:2]))
            print(state_desc["body_pos"]["talus_l"][0:2])
            print(state_desc["body_pos"]["talus_r"][0:2])
            print(state_desc["misc"]["mass_center_pos"])
            print(state_desc["body_pos_rot"])
            input("Press Enter to continue...")
            print(reward)
Ejemplo n.º 11
0
 def get_html(self, page):
     url = self.base_url.format(sys.argv[1], page)
     html = Tools.get_response(url)
     if html is None:
         return
     html_xpath = etree.HTML(html)
     return html_xpath
Ejemplo n.º 12
0
def first_move_batter_NN(joints_array_batter,
                         release_frames,
                         model="saved_models/batter_first_step",
                         start_after_release=10,
                         sequence_length=40):
    """
    Neural network method: takes an array of some joint trajectories data,
    cuts it to length 32, starting from 10 frames after the relase frame,
    returns predicted first movement frame index

    joints_array_batter: list or array of size nr_data, nr_frames, nr_joints, nr_cordinates
    (should be smoothed and interpolated) - can be list because different data can have different nr_frames
    release frames: array of size nr_data, required to cut array at the right spot
    """
    # start_after_release = int(model.split("_")[-2])
    # sequence_length = int(model.split("_")[-1])

    # print(start_after_release, sequence_length)
    data = []
    for i, d in enumerate(joints_array_batter):
        cutoff_min = release_frames[i] + start_after_release
        cutoff_max = cutoff_min + sequence_length
        data.append(d[cutoff_min:cutoff_max, :12])
    data = Tools.normalize01(np.array(data))
    lab, out = test(data, model)
    labels = np.asarray(
        lab.reshape(-1)) + np.asarray(release_frames) + start_after_release
    return labels
Ejemplo n.º 13
0
 def action(self):
     choices = Tools.get_choose_dict(
         data={"choose": list(self.actions.keys())})
     Tools.print_choose_dict(choices)
     user_input = input()
     if user_input not in choices.keys():
         Tools.print_error("Пожалуйста введите корректные данные.")
     elif choices[user_input] == "Начать ввод":
         raw_data = input()
         if os.path.isfile(raw_data):
             files = {"data": open(raw_data, "rb")}
             self.context.api.create_file(files)
         else:
             self.context.api.create_user_input(raw_data)
         self.context.next(self.actions[choices[user_input]]())
     else:
         self.context.next(self.actions[choices[user_input]]())
Ejemplo n.º 14
0
        def coordenar(ancho, coordenadas):
            pass

            t = Tablero(ancho, ancho)
            for c in coordenadas:
                x, y = Tools.calccoord(ancho, c)
                t.tablero[x][y] = 1
            return t
Ejemplo n.º 15
0
 async def getPlayerBEID(self, player: str):
     #get updated player list, only if player not found
     if (not player in Tools.column(self.playerList, 4)):
         self.playerList = await self.CommandRcon.arma_rcon.getPlayersArray(
         )
     for id, ip, ping, guid, name in self.playerList:
         if (name.endswith(" (Lobby)")):  #Strip lobby from name
             name = name[:-8]
         if (player == name):
             return id
Ejemplo n.º 16
0
def testing(data, labels, save_path):
    """
    Tests movement classification model on the first 5% of the data in the csv file (trained on last 95%)
    """

    print("Data shape", data.shape, "Mean of data", np.mean(data))
    tic = time.time()
    labs, out = test(data, save_path)
    toc = time.time()
    print("time for nr labels", len(labs), toc - tic)
    for i in range(20):  #len(labs)):
        print(labs[i], np.around(out[i], 2))

    #  To compare with labels
    print(labels.shape)
    for i in range(20):  #len(labels)):
        print('{:20}'.format(labels[i]), '{:20}'.format(
            labs[i]))  #, ['%.2f        ' % elem for elem in out_test[i]])
    print("Accuracy:", Tools.accuracy(np.asarray(labs), labels))
    print("Balanced accuracy:",
          Tools.balanced_accuracy(np.asarray(labs), labels))
Ejemplo n.º 17
0
 def handle_download(self):
     categories = json.loads(self.context.api.get_categories().text)
     choices = self.format_to_choose(categories, "name")
     category_input = input()
     if category_input not in choices.keys():
         Tools.print_error("Пожалуйста введите корректные данные.")
     elif choices[category_input] in base_actions.keys():
         return base_actions[choices[category_input]]
     else:
         category = {}
         is_file = True
         for d in categories:
             if d["name"] == choices[category_input]:
                 category = d
                 if d["name"] == "Строки" or d["name"] == "Числа":
                     is_file = False
         if is_file:
             return self.handle_file_download(category)
         else:
             return self.handle_user_input_download(category)
     return None
Ejemplo n.º 18
0
    def still_life(self):
        def coordenar(ancho, coordenadas):
            pass

            t = Tablero(ancho, ancho)
            for c in coordenadas:
                x, y = Tools.calccoord(ancho, c)
                t.tablero[x][y] = 1
            return t

        tam = len(self.tablero.tablero)
        self.combs = math.factorial(
            tam**2) / (math.factorial(tam**2 - self.vidas) * math.factorial(self.vidas))
        Tools.clear()

        print("Combinaciones: " + repr(self.combs))
        print("\n" + "Trabajando..." + "\n")

        if len(self.combMatrix) == 0:
            self.combMatrix = list(combinations(range(tam**2), self.vidas))

        i = self.iter
        for c in range(i , len(self.combMatrix)):
            self.iter = self.iter + 1
            t = coordenar(tam, self.combMatrix[c])
            comp = Comparador(1)
            comp.pushTablero(t.tablero)
            if comp.comparar(GeneradorPatrones.nextStep(t.tablero)) == 1:
                self.stills += 1
                print(self.combMatrix[c])
                print("\n")
                print(t)
                self.work.append(self.combMatrix[c])
                print(
                    "\n" + "Progreso: %" + ("%.2f" % ((self.iter / self.combs) * 100)))
                # input("\n")
        print("\n" + "Trabajo Completado, presionar Enter " +
              "para volver al menu principal")
        input("Encontrados: " + repr(self.stills))
        self.running = False
Ejemplo n.º 19
0
 def run(self):
     page = 0
     while True:
         html_xpath = self.get_html(page)
         if html_xpath is None:
             break
         if page == 0:
             self.get_total(html_xpath)
         content_list = self.title_url(html_xpath)
         self.save_mongodb(content_list, page)
         page += 10
         time.sleep(Tools.sleep_seconds())
     print(f"此次一共抓取了{page}页数据")
Ejemplo n.º 20
0
 def save_mongodb(content_list, page):
     """
     保存到mongodb数据库中
     :param content_list:
     :param page:
     :return:
     """
     collection = Tools.mongodb_cursor()
     for content in content_list:
         try:
             collection.insert_one(content)
         except Exception as e:
             # print("URL已存在", e)
             continue
     print(f"保存第{int((page + 10)/10)}页数据完成")
Ejemplo n.º 21
0
        def arguments(function):
            if ("name" in kwargs):
                name = kwargs["name"]
            else:
                name = function.__name__

            if (name in Tools.column(RconCommandEngine.commands, 0)):
                raise Exception("Command '{}' already exists".format(name))
            #init
            async def wrapper(*args, **kwargs):
                return await function(RconCommandEngine.cogs, *args, **kwargs)

            t = wrapper
            RconCommandEngine.commands.append(
                [name, t, function.__code__.co_varnames])
            return t
Ejemplo n.º 22
0
 def title_url(html_xpath):
     """
     处理响应,提取标题和标题对应URL
     :param html_xpath:
     :return:
     """
     content = html_xpath.xpath("//div[@class='c-tools']/@data-tools")
     if len(content) == 0:
         print("匹配标题和URL失败,请检查代码")
         exit()
     content_list = []
     for c_dict in content:
         c_dict = eval(c_dict)
         c_dict["_id"] = Tools.deal_hash(
             c_dict["url"].encode("utf-8"))  # 对URL进行hash处理做为'_id'
         c_dict["insert_time"] = int(time.time())
         c_dict["update_time"] = int(time.time())
         content_list.append(c_dict)
     return content_list
Ejemplo n.º 23
0
def test(data, restore_file):
    """
    Runs model of restore_file for the data
    data must be normalized before and aligned if it was normalized/aligned when training the model
    returns labels and vector output of labels
    """
    tf.reset_default_graph()

    saver = tf.train.import_meta_graph(restore_file + '.meta')
    graph = tf.get_default_graph()

    # try except is weird but somehow this made if work on the cluster
    try:
        sess = tf.InteractiveSession()
    except:
        sess = tf.Session()

    # restore
    saver.restore(sess, restore_file)
    out = tf.get_collection("out")[0]
    unique = tf.get_collection("unique")[
        0]  # required to have classes to decode the one hot vector in the end
    tic = time.time()

    # run for data
    out_test = sess.run(out, {"input:0": data, "training:0": False})

    # Decode one hot vectors
    toc = time.time()
    print("time for nr labels", toc - tic)
    pitches_test = Tools.decode_one_hot(out_test, unique.eval(session=sess))
    try:
        pitches = [elem.decode("utf-8") for elem in pitches_test]
    except AttributeError:
        pitches = pitches_test

    sess.close()
    return pitches, out_test
Ejemplo n.º 24
0
        'country': 'Korea', # this will be stored as 'kr'
        'companyName': row['company_name'].decode('utf8', errors='ignore'),
        'state': row['state'].decode('utf8', errors='ignore')
    })
    company.url = row['url'].decode('utf8', errors='ignore')
    company.yearFounded = row['year_founded'].decode('utf8', errors='ignore')
    company.city = row['city'].decode('utf8', errors='ignore')
    company.zipCode = row['zip_code'].decode('utf8', errors='ignore').zfill(5) 
    company.companyCategory = row['company_category'].decode('utf8', errors='ignore')
    company.companyType = row['company_type'].decode('utf8', errors='ignore')
    company.fte = row['full_time_employees'].decode('utf8', errors='ignore')
    company.revenueSource = row['revenue_source'].decode('utf8', errors='ignore').split(', ')
    company.businessModels = row['business_model'].decode('utf8', errors='ignore')
    company.socialImpact = row['social_impact'].decode('utf8', errors='ignore').split(', ')
    company.description = row['description'].decode('utf8', errors='ignore')
    company.prettyName = Tools.prettify(company['companyName'])
    company.filters = []

    if company.companyType.lower() == 'nonprofit':
        company.companyType = 'Non-profit'

    if company.fte == '10-Jan':
        company.fte = '1 - 10'
    elif company.fte == 'Nov-50':
        company.fte = '11 - 50'

    company.companyCategory = company.companyCategory.split(',')[0]

    agency_names = row['agencies'].decode('utf8', errors='ignore').split(', ')
    agencies = []
    for agency_name in agency_names:
Ejemplo n.º 25
0
    def run(self):
        try:
            shutil.rmtree("/Users/ninawiedemann/Desktop/UNI/Praktikum/logs")
            print("logs removed")
        except:
            print("logs could not be removed")

        nr_classes = len(self.unique)
        print("classes", self.unique)
        # labels = labels_string
        print("save path", self.SAVE)
        model = Model()

        M, N, nr_joints, nr_coordinates = self.data.shape

        # print("Test set size: ", len_test, " train set size: ", len_train)
        # print("Shapes of train_x", train_x.shape, "shape of test_x", test_x.shape)
        ind = np.random.permutation(len(self.data))
        SEP = int(M * 0.1)
        print("ind", ind)

        max_accuracy = []
        mean_end_accuracy = []
        balanced_max = []
        for i in range(10):
            print("--------------- Train on ", i,
                  "-th part of testing data (10-fold cross validation)")
            tf.reset_default_graph()
            sess = tf.InteractiveSession()

            train_ind = ind[:SEP * i].tolist() + ind[SEP * (i + 1):].tolist()
            # print("train_ind", len(train_ind), train_ind)
            print(train_ind[0], train_ind[-1])
            test_ind = ind[SEP * i:SEP * (i + 1)]
            print(test_ind[0], test_ind[-1])
            print(ind[0], ind[-1])
            # print("test ind", len(test_ind), test_ind)

            # self.data = Tools.normalize(self.data)

            labels = Tools.onehot_with_unique(self.labels_string, self.unique)
            ex_per_class = self.BATCH_SZ // nr_classes
            BATCHSIZE = nr_classes * ex_per_class

            train_x = self.data[train_ind]
            #me = np.mean(train_x)
            #std = np.std(train_x)
            test_x = self.data[test_ind]

            #train_x = (train_x - me)/std
            #test_x = (test_x -me)/std

            print("mean of train", np.mean(train_x))
            print("mean of test", np.mean(test_x))

            train_t = labels[train_ind]
            test_t = labels[test_ind]
            labels_string_train = self.labels_string[train_ind]
            labels_string_test = self.labels_string[test_ind]

            print(train_x.shape, train_t.shape, labels_string_train.shape,
                  test_x.shape, test_t.shape, labels_string_test.shape)
            #train_x, labels_string_train = Tools.balance(train_x, labels_string_train)
            index_liste = []
            for pitches in self.unique:
                index_liste.append(np.where(labels_string_train == pitches)[0])
            #print(index_liste)
            len_test = len(test_x)
            len_train = len(train_x)

            x = tf.placeholder(tf.float32,
                               (None, N, nr_joints, nr_coordinates),
                               name="input")
            y = tf.placeholder(tf.float32, (None, nr_classes))
            training = tf.placeholder_with_default(False,
                                                   None,
                                                   name="training")

            if self.network == "conv1d_big":
                out, logits = model.conv1d_big(x, nr_classes, training,
                                               self.rate_dropout, self.act)
            elif self.network == "adjustable conv1d":
                out, logits = model.conv1d_with_parameters(
                    x, nr_classes, training, self.rate_dropout, self.act,
                    self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
            elif self.network == "rnn":
                out, logits = model.RNN(x, nr_classes, self.n_hidden,
                                        self.nr_layers)
            elif self.network == "adjustable conv2d":
                out, logits = model.conv2d(
                    x, nr_classes, training, self.rate_dropout, self.act,
                    self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
            elif self.network == "conv 1st move":
                out, logits = model.conv1stmove(
                    x, nr_classes, training, self.rate_dropout, self.act,
                    self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
            elif self.network == "combined":
                out_normal, logits = model.conv1stmove(
                    x, nr_classes, training, self.rate_dropout, self.act,
                    self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
                out_normal = tf.reshape(out_normal, (-1, self.unique[0], 1, 1))
                wrist_ellbow_right = tf.reduce_mean(
                    x[:, :, 1:3, 1], 2)  # y coordinate of ellbow and wrist
                print(wrist_ellbow_right)
                wrist_ellbow_left = tf.reduce_mean(x[:, :, 4:6, 1], 2)
                print(wrist_ellbow_left)
                shoulder_left = tf.reshape(x[:, :, 0, 1],
                                           (-1, self.unique[0], 1))
                shoulder_right = tf.reshape(x[:, :, 3, 1],
                                            (-1, self.unique[0], 1))
                print(shoulder_right)
                shoulder_both = tf.concat([shoulder_left, shoulder_right], 2)
                print(shoulder_both)
                shoulders = tf.reduce_mean(shoulder_both,
                                           2)  # y coordinate of shoulders
                print(shoulders)
                new_x = tf.reshape(
                    tf.concat([
                        tf.reshape(wrist_ellbow_right - shoulders,
                                   (-1, self.unique[0], 1)),
                        tf.reshape(wrist_ellbow_left - shoulders,
                                   (-1, self.unique[0], 1))
                    ], 2), (-1, self.unique[0], 2, 1))
                print(new_x)
                #out_wrist_ellbow, logits = model.conv1stmove(x, nr_classes, training, self.rate_dropout, self.act, self.first_conv_filters, self.first_conv_kernel, self.second_conv_filter,
                #                                            self.second_conv_kernel, self.first_hidden_dense, self.second_hidden_dense)
                combined_x = tf.concat([out_normal, new_x], 2)
                out, logits = model.conv1d_with_parameters(
                    combined_x, nr_classes, training, self.rate_dropout,
                    self.act, self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
            elif self.network == "sv+cf":
                out_cf, logits_cf = model.conv1stmove(
                    x[:, :, :, :2], nr_classes, training, self.rate_dropout,
                    self.act, self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)

                out_sv, logits_sv = model.conv1stmove(
                    x[:, :, :, 2:], nr_classes, training, self.rate_dropout,
                    self.act, self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
                combined_x = tf.reshape(tf.concat([out_cf, out_sv], 2),
                                        (-1, N, 2, 1))
                out, logits = model.conv1d_with_parameters(
                    combined_x, nr_classes, training, self.rate_dropout,
                    self.act, self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
            elif self.network == "conv+rnn":
                first_out, _ = model.conv1stmove(x,
                                                 nr_classes,
                                                 training,
                                                 self.rate_dropout,
                                                 self.act,
                                                 self.first_conv_filters,
                                                 self.first_conv_kernel,
                                                 self.second_conv_filter,
                                                 self.second_conv_kernel,
                                                 self.first_hidden_dense,
                                                 self.second_hidden_dense,
                                                 out_filters=20)
                print(first_out)
                out, logits = model.RNN(first_out, nr_classes, self.n_hidden,
                                        self.nr_layers)
            elif self.network == "split_train":

                first_out = model.only_conv(
                    x, nr_classes, training, self.rate_dropout, self.act,
                    self.first_conv_filters, self.first_conv_kernel,
                    self.second_conv_filter, self.second_conv_kernel,
                    self.first_hidden_dense, self.second_hidden_dense)
                print(first_out)
                with tf.variable_scope("rnn"):
                    out, logits = model.RNN(first_out, nr_classes,
                                            self.n_hidden, self.nr_layers)

                # shapes = first_out.get_shape().as_list()
                # ff = tf.reshape(first_out, (-1, shapes[1]*shapes[2]))
                # ff = tf.layers.dense(ff, self.first_hidden_dense, activation = self.act, name = "ff1")
                # if self.second_hidden_dense!=0:
                #     ff = tf.layers.dense(ff, self.second_hidden_dense, activation = self.act, name = "ff2")
                # logits = tf.layers.dense(ff, nr_classes, activation = None, name = "ff3")
                # out = tf.nn.softmax(logits)
            else:
                print("ERROR, WRONG", self.network, "INPUT")

            tv = tf.trainable_variables()

            out = tf.identity(out, "out")
            uni = tf.constant(self.unique, name="uni")

            if len(self.unique) == 1:
                out = tf.sigmoid(logits)
                loss = tf.reduce_mean(tf.square(y - out))
            else:
                loss_entropy = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(labels=y,
                                                            logits=logits))
                loss_regularization = self.regularization * tf.reduce_sum(
                    [tf.nn.l2_loss(v) for v in tv])
                loss = loss_entropy + loss_regularization  #+  loss_maximum #0.001  loss_entropy +

            # max_out = tf.argmax(out, axis = 1)
            # max_lab = tf.argmax(y, axis = 1)
            # diff = tf.cast(max_out-max_lab, tf.float32)
            # loss = tf.reduce_mean(tf.square(diff))

            optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(
                loss
            )  #, var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='rnn'))

            # TENSORBOARD comment all in
            # tf.summary.scalar("loss_entropy", loss_entropy)
            # tf.summary.scalar("loss_regularization", loss_regularization)
            # tf.summary.scalar("loss_maximum", loss_maximum)
            # tf.summary.scalar("loss", loss)
            #
            # merged = tf.summary.merge_all()
            # train_writer = tf.summary.FileWriter("./logs/nn_logs" + '/train', sess.graph)

            def scope_variables(name):
                with tf.variable_scope(name):
                    return tf.get_collection(
                        tf.GraphKeys.GLOBAL_VARIABLES,
                        scope=tf.get_variable_scope().name)

            # train_vars = scope_variables("convolution")
            # print(train_vars)
            # saver = tf.train.Saver(train_vars)
            saver = tf.train.Saver(tv)

            #saver1 = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='convolution'))
            #saver1.restore(sess, self.SAVE)

            #tf.summary.scalar("loss_entropy", loss_entropy)
            #tf.summary.scalar("loss_regularization", loss_regularization)
            # tf.summary.scalar("loss_maximum", loss_maximum)
            tf.summary.scalar("loss", loss)

            merged = tf.summary.merge_all()
            train_writer = tf.summary.FileWriter("./logs/nn_logs" + '/train',
                                                 sess.graph)

            # TRAINING

            sess.run(tf.global_variables_initializer())

            def balanced_batches(x, y, nr_classes):
                #print("balanced function: ", nr_classes)
                for j in range(self.batch_nr_in_epoch):
                    liste = np.zeros((nr_classes, ex_per_class))
                    for i in range(nr_classes):
                        # print(j, i, np.random.choice(index_liste[i][0], ex_per_class))
                        liste[i] = np.random.choice(index_liste[i],
                                                    ex_per_class,
                                                    replace=True)
                    liste = liste.flatten().astype(int)
                    yield j, x[liste], y[liste]

            def batches(x, y, nr_classes, batchsize=40):
                permute = np.random.permutation(len(x))
                for i in range(0, len(x) - batchsize, batchsize):
                    indices = permute[i:i + batchsize]
                    yield i, x[indices], y[indices]

            acc_test = []
            acc_train = []
            acc_balanced = []
            losses = []
            print("Loss", "Acc test", "Acc balanced")
            # Run session for self.EPOCHS
            for epoch in range(self.EPOCHS + 1):
                for i, batch_x, batch_t in balanced_batches(
                        train_x, train_t, nr_classes):
                    summary, _ = sess.run([merged, optimizer], {
                        x: batch_x,
                        y: batch_t,
                        training: True
                    })
                    train_writer.add_summary(
                        summary, i + self.batch_nr_in_epoch * epoch)

                tf.add_to_collection("out", out)
                tf.add_to_collection("unique", uni)

                loss_test, out_test = sess.run([loss, out], {
                    x: test_x,
                    y: test_t,
                    training: False
                })
                pitches_test = Tools.decode_one_hot(out_test, self.unique)
                acc_test.append(
                    np.around(Tools.accuracy(pitches_test, labels_string_test),
                              2))
                losses.append(np.around(loss_test, 2))
                acc_balanced.append(
                    np.around(
                        Tools.balanced_accuracy(pitches_test,
                                                labels_string_test), 2))
                #Train Accuracy
                out_train = sess.run(out, {
                    x: train_x,
                    y: train_t,
                    training: False
                })
                pitches_train = Tools.decode_one_hot(out_train, self.unique)
                acc_train.append(
                    np.around(
                        Tools.accuracy(pitches_train, labels_string_train), 2))
                # print(loss_test, acc_test[-1], acc_balanced[-1])
                # if acc_train!=[]:
                #     print("acc_train: ", acc_train[-1])
                if epoch % 20 == 0:
                    print('{:>20}'.format("Loss"), '{:>20}'.format("Acc test"),
                          '{:>20}'.format("Acc balanced"),
                          '{:>20}'.format("Acc train"))
                print('{:20}'.format(round(float(loss_test),
                                           3)), '{:20}'.format(acc_test[-1]),
                      '{:20}'.format(acc_balanced[-1]),
                      '{:20}'.format(acc_train[-1]))

            print("\nMAXIMUM ACCURACY TEST: ", max(acc_test))
            #print("MAXIMUM ACCURACY TRAIN: ", max(acc_train))

            #print("Accuracy test by class: ", Tools.accuracy_per_class(pitches_test, labels_string_test))
            # print("True                Test                 ", self.unique)
            # if len(self.unique)==1:
            #     for i in range(10): #len(labels_string_test)):
            #         print(labels_string_test[i], pitches_test[i])
            # else:
            # # print(np.swapaxes(np.append([labels_string_test], [pitches_test], axis=0), 0,1))
            #     for i in range(30): #len(labels_string_test)):
            #         print('{:20}'.format(labels_string_test[i]), '{:20}'.format(pitches_test[i])) #, ['%.2f        ' % elem for elem in out_test[i]])

            max_accuracy.append(max(acc_test))
            mean_end_accuracy.append(np.mean(acc_test[-5:]))
            balanced_max.append(max(acc_balanced))
            # print("maximums", max_accuracy, "means of last five", mean_end_accuracy)
            # Tools.confusion_matrix(np.asarray(pitches_test), np.asarray(labels_string_test)) # confused_classes(np.asarray(pitches_test), np.asarray(labels_string_test))
            sess.close()

        # END: save results of ten fold cross validation
        print("result 10 fold cross:", np.mean(max_accuracy),
              np.mean(mean_end_accuracy), np.mean(balanced_max))
        with open(
                os.path.join("2_Movement_classification",
                             "ten_fold_results.json"), "r") as infile:
            dic = json.load(infile)
        dic_accuracies = {
            "maximum": max_accuracy,
            "mean_over5": mean_end_accuracy,
            "balanced_max": balanced_max
        }
        dic[self.SAVE] = dic_accuracies

        with open(
                os.path.join("2_Movement_classification",
                             "ten_fold_results.json"), "w") as outfile:
            json.dump(dic, outfile)

        return 0
Ejemplo n.º 26
0
                                           })
                inference[inference > 1.0] = 1.0
                inference[inference < 0.0] = 0.0
                inference = inference * 255.0

                metric = tool.psnr(inference, test_data[j][2])
                format_time = str(
                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                log_info = format_time + ' ' + 'iters:%d, img:%d, loss:%.6f, psnr:%.6f' % (
                    i, j, loss, metric)
                print(log_info)
                val_log.write(log_info + '\n')
        writer.add_summary(train_summary, i)
    writer.close()


if __name__ == '__main__':
    cfg = Config('SRCNN')
    tool = Tools()
    batch_size = 64
    # train data
    datasets_path = './datasets/training_91_image_patches.h5'
    data, label = tool.read_h5_file(datasets_path)
    data_loder = tool.data_iterator(data, label, batch_size)

    # val data
    path = './datasets/Test/Set5'
    test_data = tool.read_test_data(path, cfg)

    train(cfg, data_loder, test_data)
Ejemplo n.º 27
0
    company.url = row['url'].decode('utf8', errors='ignore')
    company.yearFounded = row['year_founded'].decode('utf8', errors='ignore')
    company.city = row['city'].decode('utf8', errors='ignore')
    company.zipCode = row['zip_code'].decode('utf8', errors='ignore').zfill(5)
    company.companyCategory = row['company_category'].decode('utf8',
                                                             errors='ignore')
    company.companyType = row['company_type'].decode('utf8', errors='ignore')
    company.fte = row['full_time_employees'].decode('utf8', errors='ignore')
    company.revenueSource = row['revenue_source'].decode(
        'utf8', errors='ignore').split(', ')
    company.businessModels = row['business_model'].decode('utf8',
                                                          errors='ignore')
    company.socialImpact = row['social_impact'].decode(
        'utf8', errors='ignore').split(', ')
    company.description = row['description'].decode('utf8', errors='ignore')
    company.prettyName = Tools.prettify(company['companyName'])
    company.filters = []

    if company.companyType.lower() == 'nonprofit':
        company.companyType = 'Non-profit'

    if company.fte == '10-Jan':
        company.fte = '1 - 10'
    elif company.fte == 'Nov-50':
        company.fte = '11 - 50'

    company.companyCategory = company.companyCategory.split(',')[0]

    agency_names = row['agencies'].decode('utf8', errors='ignore').split(', ')
    agencies = []
    for agency_name in agency_names:
Ejemplo n.º 28
0
import uavcan
from contextlib import closing
from MountControl import MountControl
from utils import Tools
import cv2 as cv
import threading
import time
'''
should handle timeout, when uav move route after 90s
'''

if __name__ == '__main__':
    tools = Tools('/home/mmc/tx2logs', usecam=False)
    uavcan.load_dsdl()
    node_info = uavcan.protocol.GetNodeInfo.Response()
    node_info.name = 'com.mmc.tx2'

    direction = [[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1],
                 [1, -1]]

    with closing(
            uavcan.make_node('/dev/ttyACM0',
                             node_id=20,
                             bitrate=1000000,
                             node_info=node_info)) as node:
        # init mount, task
        mount = MountControl(node)

        def publisNodeInfo():
            # publish the uavcan node info
            msg = uavcan.protocol.NodeStatus()
Ejemplo n.º 29
0
    def __init__(self):
        self.tools = Tools()

        self.unique_labels = ["0", "1", "2"]
Ejemplo n.º 30
0
    def run(self):
        try:
            shutil.rmtree("/Users/ninawiedemann/Desktop/UNI/Praktikum/logs")
            print("logs removed")
        except:
            print("logs could not be removed")

        tf.reset_default_graph()
        sess = tf.InteractiveSession()

        nr_classes = len(self.unique)
        print("classes", self.unique)

        model = Model()

        M, N, nr_joints, nr_coordinates = self.data.shape
        SEP = int(M * 0.9)
        # print("Test set size: ", len_test, " train set size: ", len_train)
        # print("Shapes of train_x", train_x.shape, "shape of test_x", test_x.shape)
        ind = np.random.permutation(len(self.data))
        train_ind = ind[:SEP]
        test_ind = ind[SEP:]

        labels = Tools.onehot_with_unique(self.labels_string, self.unique)
        ex_per_class = self.BATCH_SZ // nr_classes
        BATCHSIZE = nr_classes * ex_per_class

        self.data = Tools.normalize01(self.data)
        train_x = self.data[train_ind]
        test_x = self.data[test_ind]

        train_t = labels[train_ind]
        test_t = labels[test_ind]
        labels_string_train = self.labels_string[train_ind]
        labels_string_test = self.labels_string[test_ind]

        print(train_x.shape, train_t.shape, labels_string_train.shape,
              test_x.shape, test_t.shape, labels_string_test.shape)
        #train_x, labels_string_train = Tools.balance(train_x, labels_string_train)
        len_test = len(test_x)
        len_train = len(train_x)

        x = tf.placeholder(tf.float32, (None, N, nr_joints, nr_coordinates),
                           name="input")
        y = tf.placeholder(tf.float32, (None, nr_classes))
        training = tf.placeholder_with_default(False, None, name="training")

        if self.network == "adjustable conv1d":
            out, logits = model.conv1d_with_parameters(
                x, nr_classes, training, self.rate_dropout, self.act,
                self.first_conv_filters, self.first_conv_kernel,
                self.second_conv_filter, self.second_conv_kernel,
                self.first_hidden_dense, self.second_hidden_dense)
        elif self.network == "rnn":
            out, logits = model.RNN(x, nr_classes, self.n_hidden,
                                    self.nr_layers)
        elif self.network == "conv 1st move":
            out, logits = model.conv1stmove(
                x, nr_classes, training, self.rate_dropout, self.act,
                self.first_conv_filters, self.first_conv_kernel,
                self.second_conv_filter, self.second_conv_kernel,
                self.first_hidden_dense, self.second_hidden_dense)
        elif self.network == "combined":
            out_normal, logits = model.conv1stmove(
                x, nr_classes, training, self.rate_dropout, self.act,
                self.first_conv_filters, self.first_conv_kernel,
                self.second_conv_filter, self.second_conv_kernel,
                self.first_hidden_dense, self.second_hidden_dense)
            out_normal = tf.reshape(out_normal, (-1, self.unique[0], 1, 1))
            wrist_ellbow_right = tf.reduce_mean(
                x[:, :, 1:3, 1], 2)  # y coordinate of ellbow and wrist
            print(wrist_ellbow_right)
            wrist_ellbow_left = tf.reduce_mean(x[:, :, 4:6, 1], 2)
            print(wrist_ellbow_left)
            shoulder_left = tf.reshape(x[:, :, 0, 1], (-1, self.unique[0], 1))
            shoulder_right = tf.reshape(x[:, :, 3, 1], (-1, self.unique[0], 1))
            print(shoulder_right)
            shoulder_both = tf.concat([shoulder_left, shoulder_right], 2)
            print(shoulder_both)
            shoulders = tf.reduce_mean(shoulder_both,
                                       2)  # y coordinate of shoulders
            print(shoulders)
            new_x = tf.reshape(
                tf.concat([
                    tf.reshape(wrist_ellbow_right - shoulders,
                               (-1, self.unique[0], 1)),
                    tf.reshape(wrist_ellbow_left - shoulders,
                               (-1, self.unique[0], 1))
                ], 2), (-1, self.unique[0], 2, 1))
            print(new_x)
            #out_wrist_ellbow, logits = model.conv1stmove(x, nr_classes, training, self.rate_dropout, self.act, self.first_conv_filters, self.first_conv_kernel, self.second_conv_filter,
            #                                            self.second_conv_kernel, self.first_hidden_dense, self.second_hidden_dense)
            combined_x = tf.concat([out_normal, new_x], 2)
            out, logits = model.conv1d_with_parameters(
                combined_x, nr_classes, training, self.rate_dropout, self.act,
                self.first_conv_filters, self.first_conv_kernel,
                self.second_conv_filter, self.second_conv_kernel,
                self.first_hidden_dense, self.second_hidden_dense)
        elif self.network == "conv+rnn":
            first_out, _ = model.conv1stmove(x,
                                             nr_classes,
                                             training,
                                             self.rate_dropout,
                                             self.act,
                                             self.first_conv_filters,
                                             self.first_conv_kernel,
                                             self.second_conv_filter,
                                             self.second_conv_kernel,
                                             self.first_hidden_dense,
                                             self.second_hidden_dense,
                                             out_filters=128)
            print(first_out)
            out, logits = model.RNN(first_out, nr_classes, self.n_hidden,
                                    self.nr_layers)
        else:
            print("ERROR, WRONG", self.network, "INPUT")

        tv = tf.trainable_variables()

        out = tf.identity(out, "out")
        uni = tf.constant(self.unique, name="uni")

        if len(self.unique) == 1:
            out = tf.sigmoid(logits)
            loss = tf.reduce_mean(tf.square(y - out))
        else:
            loss_entropy = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(labels=y,
                                                        logits=logits))
            loss_regularization = self.regularization * tf.reduce_sum(
                [tf.nn.l2_loss(v) for v in tv])
            loss = loss_entropy + loss_regularization  #+  loss_maximum #0.001  loss_entropy +

        # max_out = tf.argmax(out, axis = 1)
        # max_lab = tf.argmax(y, axis = 1)
        # diff = tf.cast(max_out-max_lab, tf.float32)
        # loss = tf.reduce_mean(tf.square(diff))

        optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)

        # TENSORBOARD comment all in
        # tf.summary.scalar("loss_entropy", loss_entropy)
        # tf.summary.scalar("loss_regularization", loss_regularization)
        # tf.summary.scalar("loss_maximum", loss_maximum)
        # tf.summary.scalar("loss", loss)
        #
        # merged = tf.summary.merge_all()
        # train_writer = tf.summary.FileWriter("./logs/nn_logs" + '/train', sess.graph)

        saver = tf.train.Saver(tf.global_variables())

        #tf.summary.scalar("loss_entropy", loss_entropy)
        #tf.summary.scalar("loss_regularization", loss_regularization)
        # tf.summary.scalar("loss_maximum", loss_maximum)
        tf.summary.scalar("loss", loss)

        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter("./logs/nn_logs" + '/train',
                                             sess.graph)

        # TRAINING

        sess.run(tf.global_variables_initializer())

        def batches(x, y, nr_classes, batchsize=40):
            permute = np.random.permutation(len(x))
            for i in range(0, len(x) - batchsize, batchsize):
                indices = permute[i:i + batchsize]
                yield i, x[indices], y[indices]

        acc_test = []
        acc_train = []
        acc_balanced = []
        losses = []
        # print('{:20}'.format("Loss"), '{:20}'.format("Acc test"), '{:20}'.format("Acc balanced"), '{:20}'.format("Acc train"))
        # Run session for self.EPOCHS
        for epoch in range(self.EPOCHS + 1):
            for i, batch_x, batch_t in batches(train_x, train_t, nr_classes):
                summary, _ = sess.run([merged, optimizer], {
                    x: batch_x,
                    y: batch_t,
                    training: True
                })
                train_writer.add_summary(summary,
                                         i + self.batch_nr_in_epoch * epoch)

            tf.add_to_collection("out", out)
            tf.add_to_collection("unique", uni)

            loss_test, out_test = sess.run([loss, out], {
                x: test_x,
                y: test_t,
                training: False
            })
            pitches_test = Tools.decode_one_hot(out_test, self.unique)
            acc_test.append(
                np.around(Tools.accuracy(pitches_test, labels_string_test), 2))
            losses.append(np.around(loss_test, 2))
            acc_balanced.append(
                np.around(
                    Tools.balanced_accuracy(pitches_test, labels_string_test),
                    2))
            #Train Accuracy
            if epoch % 20 == 0:
                print('{:>20}'.format("Loss"), '{:>20}'.format("Acc test"),
                      '{:>20}'.format("Acc balanced"),
                      '{:>20}'.format("Acc train"))

            out_train = sess.run(out, {
                x: train_x,
                y: train_t,
                training: False
            })
            pitches_train = Tools.decode_one_hot(out_train, self.unique)
            acc_train.append(
                np.around(Tools.accuracy(pitches_train, labels_string_train),
                          2))
            print('{:20}'.format(round(float(loss_test),
                                       3)), '{:20}'.format(acc_test[-1]),
                  '{:20}'.format(acc_balanced[-1]),
                  '{:20}'.format(acc_train[-1]))

        # AUSGABE AM ENDE
        print(
            "\n\n\n---------------------------------------------------------------------"
        )
        #print("NEW PARAMETERS: ", BATCHSIZE, self.EPOCHS, self.act, self.align, self.batch_nr_in_epoch, self.rate_dropout, self.learning_rate, len_train, self.n_hidden, self.nr_layers, self.network, nr_classes, nr_joints)
        #Test Accuracy
        #print("Losses", losses)
        #print("Accuracys test: ", acc_test)
        #print("Accuracys train: ", acc_train)
        print("\nMAXIMUM ACCURACY TEST: ", max(acc_test))
        #print("MAXIMUM ACCURACY TRAIN: ", max(acc_train))

        #print("Accuracy test by class: ", Tools.accuracy_per_class(pitches_test, labels_string_test))
        print('{:20}'.format("Ground truth label"),
              '{:20}'.format("Predicted Output"))
        if len(self.unique) == 1:
            for i in range(20):  #len(labels_string_test)):
                print(labels_string_test[i], pitches_test[i])
        else:
            # print(np.swapaxes(np.append([labels_string_test], [pitches_test], axis=0), 0,1))
            for i in range(len(labels_string_test)):
                print('{:20}'.format(labels_string_test[i]),
                      '{:20}'.format(pitches_test[i])
                      )  #, ['%.2f        ' % elem for elem in out_test[i]])

        if self.SAVE != None:
            saver.save(sess, self.SAVE)

        pitches = np.append(pitches_test, pitches_train, axis=0)
        labs = np.append(labels_string_test, labels_string_train, axis=0)
        print("ACCURACY IN RANGE 2",
              Tools.accuracy_in_range(pitches.flatten(), labs.flatten(), 2))
        return test_ind, pitches_test, labels_string_test[i]
Ejemplo n.º 31
0
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_misaka import Misaka
from flask_mail import Mail
from config import config
from utils import Tools

bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
misaka = Misaka()
tools = Tools()

login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
login_manager.login_message = '请登录后访问该页面'

from .commands.hexo import Hexo
hexo = Hexo()

from admin import PeachAdmin
admin = PeachAdmin(name="Management", template_mode="bootstrap3")


def create_app(config_name):
    app = Flask(__name__, static_folder='static', static_url_path='')
Ejemplo n.º 32
0
        print("Number of data used for testing", len(csv.index))

    # DATA PREPARATION:
    # 1. cut to certain Pitching position?
    if len(cfg.position) > 0:
        assert cfg.position == "Windup" or cfg.position == "Stretch", "Wrong pitching position filtering in config file"
        csv = csv[csv["Pitching Position (P)"] == cfg.position]
        print("Only pitching position ", cfg.position, "included in data")

    # 2. the pitch type "eephus" is excluded because it only occurs once in the data
    if label_name == "Pitch Type":
        csv = csv[csv["Pitch Type"] != "Eephus"]

    # 3. cut to the 5 players with most data
    if cfg.five_players:
        csv = Tools.cut_csv_to_pitchers(csv)
        print("Only the five players with most data are included")

    # GET DATA
    data, labels = Tools.get_data_from_csv(csv,
                                           label_name,
                                           min_length=cfg.nr_frames)
    print("Data shape:", data.shape)
    # data = np.load("data_test.npy")
    # labels = np.load("labels_test.npy")

    # 4. Change labels to super classes (only for the pitch type!)
    if cfg.super_classes:
        labels = Tools.labels_to_classes(labels)
        print("Labels are transformed to superclasses")