Exemplo n.º 1
0
def start_evaluator():
    print("Starting Evaluator...")
    #Read Data
    gd = getData()
    rec_tmp = []

    print("Got first item")
    #Loop until nothing else to read
    while (gd.empty == False):
        global rec_db1

        #Call Algorithm and get Recommendations
        rec_tmp = callback(gd)
        if (len(rec_tmp) != 0):
            if (delay == True):
                rec_db1 = rec_db1.append(rec_tmp)
            else:
                rec_db1 = rec_tmp

        #Read Data Again
        gd = getData()

        # If the session and item matches a recommendation
        # then provide a reward
        if ((len(rec_db1) != 0) and (gd.loc[gd['SessionID'].isin(
                rec_db1.iloc[:, 0])]['Event'].values == 3)):
            #Generate Reward
            algoreward(gd)

        if ((delay == False) and (len(rec_db1) != 0)):
            rec_db1.drop(rec_db1.index[0], inplace=True)

        #Calculate and plot metrics
        calc_metrics(rec_tmp, gd)

    print('Evaluator Run Complete')
    #    print rec_db1;
    get_metrics()
Exemplo n.º 2
0
save_dir = os.path.join(THIS_FOLDER, "results")
glove_path = os.path.join(THIS_FOLDER, "glove")

################### Input ####################################################
option = input(
    "Please select what you would like to do:\n 1. Train and chat  2. Load model and chat \n"
)
embedding = '2'
if option == '1':
    embedding = input(
        "Please select what kind of embedding would you like to use:\n 1. GloVe  2. Embedding layer \n"
    )
################### READ, NORMALIZE, CREATE PAIRS ############################

MAX_LENGTH = 15  # maximum words in a sentence
sentences_lengths = getData(DATA_PATH, PAIRS_PATH)  # read_data.py

# if the trimmed lines are already saved skip getTestData
if path.exists(LINES_PATH) == False or os.stat(LINES_PATH).st_size == 0:
    getTestData(DATA_TEST_PATH, LINES_PATH, MAX_LENGTH)  # read_data.py

#################### CREATE VOCABULARY AND NEW PAIRS #########################

voc, pairs = prepareData(PAIRS_PATH, MAX_LENGTH)  # vocabulary.py

print("total dialogues " + str(len(pairs)))
print("total words " + str(voc.__len__()))

lines = trimLines(LINES_PATH, voc)
#################### PREPARE DATA IN BATCHES #################################
Exemplo n.º 3
0
import read_data as rd
import matplotlib.pyplot as plt

data = rd.getData("example.in")
#data = rd.getData("mother_of_all_warehouses.in")
#data = rd.getData("redundancy.in")
drones = data[3]
print drones[0].position

environment = data[0]
warehouses = data[1]
orders = data[2]

plt.figure(1)
plt.margins(0.1,0.1)
j=0
for w in warehouses:
    if j == 0:
        plt.plot(w.position[0], w.position[1],'bo',markersize=20)
        j=1
    else:
        plt.plot(w.position[0], w.position[1],'go')

for o in orders:
    plt.plot(o.location[0], o.location[1],'ro',alpha=0.3)

plt.show()
Exemplo n.º 4
0
    Calculates the total weight of an order.
    :param order:
    :return:
    """
    total_weight = 0
    for product_id, num_items in enumerate(order.products):
        total_weight += num_items * product_weights[product_id]
    return total_weight


def get_current_weight(drone, product_weights):
    return calculate_total_weight(drone, product_weights)

if __name__ == '__main__':

    environment, warehouses, orders, drones = getData('busy_day.in')
    commands = []

    # sort order by total weight of items in order
    order_total_weights = [(order, calculate_total_weight(order, environment.product_weights)) for order in orders]

    orders = [order for order in sorted(order_total_weights, key=operator.itemgetter(1))]

    current_drone = drones[0]
    for order in orders:

        fulfilled = False
        product_id = 0

        # check if items are left in the order
        while order.products[product_id] > 0:
Exemplo n.º 5
0
    """
    # 定义超参
    D = 55
    p = 0.95
    ff = 0.2
    CL_i = {}  # 存放的是一个片段,不是一个点
    CL_j = {}
    CL_i_index = {}
    CL_j_index = {}

    outlying = []  # 存放异常值
    outlying_fine = []  # 存放异常值的整条线段
    start_time = time.time()

    # 数据划分
    data = getData()
    # 在粗糙路径数据集中两两配对寻找边缘路径

    print("---------------------------------数据读取完成,进入划分模块---------------------------------")
    for number, item in enumerate(data):
        for i in range(len(item[0])-1):
            L_i = [item[0][i], item[0][i + 1]]  # 取粗线段中的线段
            CL_j_index[number] = L_i
            for j in range(len(item[0])-1):
                L_j = [item[0][j], item[0][j + 1]]  # 取粗线段中的线段
                CL_i_index[number] = L_j
                if (L_i[0] == L_j[0]) and L_i[1] == L_j[1]:  # 寻找两个data几何中不相同的两条路径 L_i_l,L_j_l中存放的是粗线段的片段和对应的精细化片段
                    pass
                else:
                    # relu 1
                    if lower_bounds_dist(L_i, L_j, item[1]) > D: