Exemplo n.º 1
0
 def test_convert_us(self):
     us = [
         Unit(1, 10, KEY_PRESS_UNIT_TYPE),
         Unit(2, 11, KEY_PRESS_UNIT_TYPE),
         Unit(3, 10, KEY_RELEASE_UNIT_TYPE),
         Unit(4, 11, KEY_RELEASE_UNIT_TYPE),
     ]
     self.assertEqual(get_inputs_sequence(us, False),
                      [Input(10, 1, 3), Input(11, 2, 4)])
Exemplo n.º 2
0
Arquivo: main.py Projeto: gutorc92/unb
def main(argv=sys.argv):
	f = Face()
	n = Net()
	o = Output()
	if(len(argv) >= 2):
		argv.pop(0)
		for arg in argv:
			print(arg)
			d = Input(arg)
			o.outFaces(f.detect(d.getImage(Input.FACES)))
			dogs,cats = n.result(d.getImage(Input.PREDICTION))
			o.outAnimals(dogs,cats)
Exemplo n.º 3
0
 def test_get_i_d(self):
     inputs = [
         Input(10, 1, 3),
         Input(11, 2.1, 4)
     ]
     self.assertEqual(
         get_inner_distance_sequence(inputs),
         [
             InnerDistance(10, 2),
             InnerDistance(11, 1.9)
         ]
     )
Exemplo n.º 4
0
 def test_get_dist(self):
     inputs = [
         Input(10, 1, 3),
         Input(11, 2, 4),
         Input(12, 6, 8)
     ]
     self.assertEqual(
         get_outer_distance_sequence(inputs),
         [
             OuterDistance(10, 11, -1),
             OuterDistance(11, 12, 2)
         ]
     )
Exemplo n.º 5
0
def run():
    model = Input(4)
    model = Affine(model, 128)
    model = LReLU(model)
    model = Affine(model, 2)
    model = Softmax(model)

    world = StochasticPolicy(Gym(make_env, max_steps=500))

    opt = Adam(np.random.randn(model.n_params) * 0.1, lr=0.01)

    for _ in range(50):
        model.load_params(opt.get_value())

        trajs = world.trajectories(model, 16)
        print_reward(trajs, max_value=5000)

        trajs = discount(trajs, horizon=500)
        trajs = normalize(trajs)

        grad = policy_gradient(trajs, policy=model)
        opt.apply_gradient(grad)

    while True:
        world.render(model)
Exemplo n.º 6
0
def get_inputs_sequence_from_messages_sequence(messages_sequence):
    input_sequence = list()

    for message in messages_sequence:
        temp_seq = list()
        i = 0
        while i < len(message):
            if message[i].type == KEY_RELEASE_UNIT_TYPE:
                i += 1
                continue

            current_unit = message[i]

            j = 1
            while i + j < len(message):
                if message[i +
                           j].key_code == current_unit.key_code and message[
                               i + j].type == KEY_RELEASE_UNIT_TYPE:
                    temp_seq.append(
                        Input(current_unit.key_code, current_unit.time_stamp,
                              message[i + j].time_stamp))
                    break
                else:
                    j += 1
            i += 1
        input_sequence.append(tuple(temp_seq))
    return input_sequence
Exemplo n.º 7
0
def walker():
    walker = Input(STATE_SIZE)
    walker = Affine(walker, 64)
    walker = LReLU(walker)
    walker = Affine(walker, ACTION_SIZE)
    walker = Tanh(walker)
    return walker
Exemplo n.º 8
0
def makeTransaction(node_send, node_rec, val):
    get_val = val
    transactions = copy.deepcopy(node_send.unSpentTransactions)
    inputs = []
    for hash in transactions:
        tr = transactions[hash]
        for index in range(len(tr.outputs)):
            output = tr.outputs[index]
            if output is not None:
                if output.pub_key == node_send.pub_key.to_string():
                    signature = node_send.priv_key.sign(hash.encode('utf-8'))
                    input = Input(hash, index, signature)
                    inputs.append(input)
                    get_val -= output.value
                    if get_val < 0.0:
                        break
    if get_val > 0.0:
        print("Not enough Coin to make Transaction...")
        return False

    outputs = []
    main_output = Output(node_rec.pub_key.to_string(), val)
    outputs.append(main_output)
    if get_val < 0.0:
        outputs.append(Output(node_send.pub_key.to_string(), 0.0 - get_val))
    new_transaction = Transaction(inputs, outputs)
    node_send.sendTransaction(new_transaction)
    return True
Exemplo n.º 9
0
def get_inputs_sequence(unit_sequence, is_raw):
    if is_raw:
        unit_sequence = remove_intermediate_pressing(
            remove_duplicates(unit_sequence))
    input_sequence = list()

    i = 0
    while i < len(unit_sequence):
        if unit_sequence[i].type == KEY_RELEASE_UNIT_TYPE:
            i += 1
            continue

        current_unit = unit_sequence[i]

        j = 1
        while i + j < len(unit_sequence):
            if unit_sequence[
                    i + j].key_code == current_unit.key_code and unit_sequence[
                        i + j].type == KEY_RELEASE_UNIT_TYPE:
                input_sequence.append(
                    Input(current_unit.key_code, current_unit.time_stamp,
                          unit_sequence[i + j].time_stamp))
                break
            else:
                j += 1
        i += 1
    return input_sequence
Exemplo n.º 10
0
def build_classifier():
    model = Input(2)
    model = LReLU(Affine(model, 32))
    model = LReLU(Affine(model, 32))
    model = Affine(model, 2)
    model = Softmax(model)
    return model
Exemplo n.º 11
0
def createRewardTransaction(node):
    hash = 'BLOCK-REWARD'
    index = -1
    signature = "YOLO"
    input = Input(hash, index, signature)
    output = Output(node.pub_key.to_string(), 5.0)
    transaction = Transaction([input], [output])
    print("Transaction: {} being sent...".format(transaction.hash))
    node.sendTransaction(transaction)
Exemplo n.º 12
0
def run():
    classifier = Input(2)
    classifier = Affine(classifier, 16)
    classifier = LReLU(classifier)
    classifier = Affine(classifier, 2)
    classifier = Softmax(classifier)

    gausses = [Constant(2)]
    gausses[0].load_params([0., 0.])

    plt.ion()

    def train_one():
        gaussOpt = Adam(
            [0., 0.],
            lr=0.010,
            memory=0.5,
        )
        classOpt = Adam(np.random.randn(classifier.n_params) * 0.1,
                        lr=0.5,
                        memory=0.99)
        gaussCenterer = Constant(2)
        gausses.append(gaussCenterer)
        curAccuracy = 0.
        while curAccuracy < 0.98:
            classifier.load_params(classOpt.get_value())
            gaussCenterer.load_params(gaussOpt.get_value())

            trajs = [[(gauss_observation(gausses[:-1]), [1, 0], 1.)]
                     for _ in range(500)]
            trajs += [[(gauss_observation(gausses[-1:]), [0, 1], 1.)]
                      for _ in range(500)]
            accTrajs = accuracy(trajs, model=classifier)
            print_reward(accTrajs, max_value=1.0)
            accs = [traj[0][2] for traj in accTrajs]
            curAccuracy = np.mean(accs)

            grad = policy_gradient(trajs, policy=classifier)
            classOpt.apply_gradient(grad)
            trajs2 = learn_from_classifier(classifier, trajs[500:], 1)
            trajs2 = normalize(trajs2)
            grad2 = policy_gradient(trajs2, policy=gaussCenterer)
            gaussOpt.apply_gradient(grad2)
            plt.clf()
            plt.grid()
            plt.gcf().axes[0].set_ylim([-1, 1])
            plt.gcf().axes[0].set_xlim([-1, 1])
            x, y = zip(*[o for ((o, _, _), ) in trajs[:500]])
            plt.scatter(x, y, color="blue")
            x, y = zip(*[o for ((o, _, _), ) in trajs[500:]])
            plt.scatter(x, y, color="red")
            plt.pause(0.01)

    for i in range(10):
        print("Teaching agent %d." % i)
        train_one()
    plt.pause(10000000000000.)
def load_data(path):
    inputs = []
    outputs = []
    df = pd.read_csv(path)
    # inputs = df.question_text.to_numpy()
    # print(inputs)
    for _, r in df.iterrows():
        t = r['question_text'].strip().lower()
        id = r['target']
        inputs.append(Input(t, id))
        score = r['target']
        outputs.append(Output(score))
    return inputs, outputs
Exemplo n.º 14
0
def posts_post():
    """ Add a new post """
    data = request.json

    # Check that the JSON supplied is valid
    # If not we return a 422 Unprocessable Entity
    try:
        validate(data, post_schema)
    except ValidationError as error:
        data = {"message": error.message}
        return Response(json.dumps(data), 422, mimetype="application/json")

    # Add the post to the database
    post = Input(title=data["title"], rent=data["rent"])
    session.add(post)
    session.commit()

    # Return a 201 Created, containing the post as JSON and with the
    # Location header set to the location of the post
    data = json.dumps(post.as_dictionary())
    headers = {"Location": url_for("post_get", id=post.id)}
    return Response(data, 201, headers=headers,
                    mimetype="application/json")
Exemplo n.º 15
0
def run():
    model = Input(28, 28)
    model = Affine(model, 128)
    model = LReLU(model)
    model = Affine(model, 10)
    model = Softmax(model)

    train_world = StochasticPolicy(Accuracy(Mnist()))

    opt = Adams(np.random.randn(model.n_params), lr=0.00002, memory=0.99)

    for i in range(600):
        model.load_params(opt.get_value())
        trajs = train_world.trajectories(model, 128)
        print_reward(trajs, max_value=1)
        grad = policy_gradient(trajs, policy=model)
        opt.apply_gradient(grad)
Exemplo n.º 16
0
def run():
    world = Bytes(b"aabbaab", max_steps=4, charset=b'abcd')

    print("\nConstant model:\n")
    model = Constant(4)
    model = Softmax(model)
    train(world, model)

    print("\nLast character:\n")
    model = Input(4)
    model = Affine(model, 4)
    model = LReLU(model)
    model = Affine(model, 4)
    model = Softmax(model)
    train(world, model)

    print("\nLast two characters:\n")
    model = Input(2, 4)
    model = Affine(model, 4)
    model = LReLU(model)
    model = Affine(model, 4)
    model = History(model, length=2)
    model = Softmax(model)
    train(world, model)
Exemplo n.º 17
0
def run():
    world = StochasticPolicy(Gym("CartPole-v1"))

    model = Input(4)
    model = Affine(model, 64)
    model = LReLU(model)
    model = Affine(model, 2)
    model = Softmax(model)

    if len(sys.argv) >= 2:
        params = np.load(sys.argv[1])
    else:
        params = train(world, model)
        np.save("__cartpole.npy", params)

    model.load_params(params)
    world.render(model)
Exemplo n.º 18
0
def fileToObject(filename):
    with open(filename, 'r') as fin:
        lines = fin.read().splitlines()
        numOfBooks, numOfLibs, deadline = map(int, lines[0].split(' '))
        bookScores = list(map(int, lines[1].split(' ')))
        libs = []

        for i in range(2, len(lines), 2):
            if (lines[i] == ''):
                continue
            _, signUp, booksOutput = map(int, lines[i].split(' '))
            books = []
            for idx in map(int, lines[i+1].split(' ')):
                books.append(Book(idx, bookScores[idx]))

            libs.append(Library(len(libs), signUp, booksOutput, books, []))

        return Input(numOfBooks, deadline, libs)
Exemplo n.º 19
0
def run():
    if len(sys.argv) < 2:
        print("Usage: imitate.py <file>")
        return

    with open(sys.argv[1], "r") as f:
        data = f.buffer.read()
    charset = set(data)
    world = Bytes(data, max_steps=100, charset=charset)
    print("Charset size: %d" % len(charset))

    model = Input(len(charset))
    model = LSTM(model)
    model = Affine(model, len(charset))
    model = Softmax(model)

    train(world, model)

    for _ in range(10):
        world.render(model)
Exemplo n.º 20
0
def run():
    classifier = Input(2)
    classifier = Affine(classifier, 16)
    classifier = LReLU(classifier)
    classifier = Affine(classifier, 2)
    classifier = Softmax(classifier)

    curCarr = carr()
    curCarr.load_params(np.random.randn(curCarr.n_params))

    world = Gym("MountainCarContinuous-v0", max_steps=500)
    world = ActionNoise(world, stddev=0.1)
    world = Curiosity(world,
                      classifier=classifier,
                      history_length=800,
                      plot=True)

    def train_one(carrOpt):
        if carrOpt == None:
            carrOpt = Adam(
                np.random.randn(curCarr.n_params),
                lr=0.10,
                memory=0.5,
            )
        nextBreak = 5
        for i in range(250):
            curCarr.load_params(carrOpt.get_value())

            realTrajs, curiosityTrajs = world.trajectories(curCarr, 50)
            curScore = np.mean(get_rewards(realTrajs, episode=np.sum)) / 90.
            print_reward(realTrajs,
                         max_value=90.0,
                         episode=np.sum,
                         label="Real reward:      ")
            print_reward(curiosityTrajs,
                         max_value=1.0,
                         episode=np.max,
                         label="Curiosity reward: ")
            curCuriosity = np.mean(get_rewards(curiosityTrajs, episode=np.max))
            if curCuriosity > 0.98:
                if nextBreak == 0:
                    break
                else:
                    nextBreak -= 1
            else:
                nextBreak = np.min([nextBreak + 1, 5])

            realTrajs = replace_rewards(realTrajs, episode=np.sum)
            realTrajs = normalize(realTrajs)
            curiosityTrajs = replace_rewards(curiosityTrajs, episode=np.max)
            #this is stupid, we should care more(?) if the costs are to high
            realWeight = 0.001 + np.max([np.min([curScore, 0.2]), 0.
                                         ]) * 0.998 / 0.2
            curiosityWeight = 1. - realWeight
            print('RWeight: %f, CWeight: %f' % (realWeight, curiosityWeight))
            trajs = combine_rewards([realTrajs, curiosityTrajs],
                                    [realWeight, curiosityWeight])
            trajs = normalize(trajs)
            grad = policy_gradient(trajs, policy=curCarr)
            carrOpt.apply_gradient(grad)
            if i % 10 == 0:
                print("%d episodes in." % i)
        world.remember_agent(curCarr)
        world.render(curCarr)
        if curScore > 0.01:
            return carrOpt
        else:
            return None

    theCarOpt = None
    for i in range(50):
        print("Teaching agent %d." % i)
        theCarOpt = train_one(theCarOpt)
Exemplo n.º 21
0
def carr():
    carr = Input(2)
    carr = Affine(carr, 32)
    carr = LReLU(carr)
    carr = Affine(carr, 3)
    return Softmax(carr)
Exemplo n.º 22
0
def build_oracle():
    model = Input(2)
    model = LReLU(Affine(model, 32))
    model = LReLU(Affine(model, 32))
    model = Affine(model, 2)
    return model
Exemplo n.º 23
0
def build_agent():
    model = Input(2)
    model = LReLU(Affine(model, 32))
    model = LReLU(Affine(model, 32))
    model = Affine(model, 1)
    return model
Exemplo n.º 24
0
def run():
    model = Input(28, 28)
    model = Conv2d(model, size=3, channels=8)
    model = LReLU(model)
    model = Maxpool(model, size=2)
    model = Conv2d(model, size=5, channels=16)
    model = LReLU(model)
    model = Maxpool(model, size=2)
    model = Affine(model, 128)
    model = LReLU(model)
    model = Affine(model, 10)
    model = Softmax(model)

    if len(sys.argv) >= 2:
        params = np.load(sys.argv[1])
    else:
        params = train(model)
        np.save("__mnist.npy", params)

    model.load_params(params)
    test_world = Mnist(test=True)
    trajs = test_world.trajectories(None, 5000)
    trajs = accuracy(trajs, model=model, percent=True)
    print_reward(trajs, max_value=100, label="Test accuracy:")
Exemplo n.º 25
0
def carr():
    carr = Input(2)
    carr = Affine(carr, 32)
    carr = LReLU(carr)
    carr = Affine(carr, 1)
    return carr
Exemplo n.º 26
0
def run():
    classifier = Input(7)
    classifier = Affine(classifier, 32)
    classifier = LReLU(classifier)
    classifier = Affine(classifier, 2)
    classifier = Softmax(classifier)

    agent = walker()
    agent.load_params(np.random.randn(agent.n_params) * 1.5)

    MAX_TRAIN_TIME = 200
    trainTimeLeft = MAX_TRAIN_TIME
    curAgentId = -1
    curMemoryId = 0

    def plot_tagged_trajs(trajs):
        nonlocal trainTimeLeft, curAgentId, curMemoryId
        COLORS = ["blue", "red"]
        plt.clf()
        plt.grid()
        plt.gcf().axes[0].set_xlim([-1.25, 1.25])
        plt.gcf().axes[0].set_ylim([-1.25, 1.25])
        plt.suptitle("Episode %d of agent %d, memories: %d" %
                     (MAX_TRAIN_TIME - trainTimeLeft, curAgentId, curMemoryId))
        for traj in trajs:
            tag = traj[0][1]
            xs, ys = [], []
            for state, _, _ in traj:
                x = state[2]
                y = state[3]
                xs.append(x)
                ys.append(y)
            plt.plot(xs, ys, color=COLORS[np.argmax(tag)], alpha=0.1)
        plt.gcf().set_size_inches(10, 8)
        plt.gcf().savefig("__step_a%03d_t%03d.png" %
                          (curAgentId, MAX_TRAIN_TIME - trainTimeLeft),
                          dpi=100)

    world = Gym("BipedalWalker-v2", max_steps=MAX_STEPS)
    world = ActionNoise(world, stddev=0.2)
    world = Curiosity(world,
                      classifier=classifier,
                      history_length=50,
                      for_classifier=lambda ts: change_obs_space(
                          ts, changer=interesting_part),
                      plot=plot_tagged_trajs)
    MAX_BOREDOM = 3
    boredom = MAX_BOREDOM

    MAX_MOTIVATION = 3
    motivation = MAX_MOTIVATION

    agentOpt = None
    lastScores = None

    def memorize():
        nonlocal boredom, curMemoryId
        print("Memorizing %d..." % curMemoryId)
        world.remember(agent)
        boredom = MAX_BOREDOM
        curMemoryId += 1

    def save_agent():
        np.save(
            "__ranger_a%03d_t%03d.npy" %
            (curAgentId, MAX_TRAIN_TIME - trainTimeLeft), agentOpt.get_value())

    def reset_agent():
        nonlocal agentOpt, trainTimeLeft, lastScores, curAgentId, motivation
        if agentOpt is not None:
            save_agent()
        print("Resetting agent %d." % curAgentId)
        agentOpt = Adam(
            np.random.randn(agent.n_params) * 1.5,
            lr=0.05,
            memory=0.9,
        )
        trainTimeLeft = MAX_TRAIN_TIME
        lastScores = [-0.4]
        curAgentId += 1
        motivation = MAX_MOTIVATION

    reset_agent()
    while True:
        agent.load_params(agentOpt.get_value())

        realTrajs, curiosityTrajs = world.trajectories(agent, 30)
        curScore = np.mean(get_rewards(realTrajs, episode=np.sum)) / 300.
        lastScores.append(curScore)
        lastScores = lastScores[-10:]
        scoreDev = np.std(lastScores)
        scoreMean = np.max([np.abs(np.mean(lastScores)), 1.])

        curCuriosity = np.mean(get_rewards(curiosityTrajs, episode=np.max))

        print_reward(realTrajs,
                     max_value=300.0,
                     episode=np.sum,
                     label="Real reward:      ")
        print_reward(curiosityTrajs,
                     max_value=1.0,
                     episode=np.max,
                     label="Curiosity reward: ")
        if curCuriosity > 0.85:
            if boredom == 0:
                save_agent()
                memorize()
            else:
                boredom -= 1
        else:
            boredom = np.min([boredom + 1, MAX_BOREDOM])

        if scoreDev / scoreMean < 0.010 or trainTimeLeft < 0:
            if motivation == 0:
                print("Not really learning.")
                save_agent()
                motivation = MAX_MOTIVATION
                trainTimeLeft = MAX_TRAIN_TIME
                if curScore < 0.01:
                    memorize()
                    reset_agent()
                    continue
            else:
                motivation -= 1
        else:
            motivation = np.min([motivation + 1, MAX_MOTIVATION])

        realTrajs = discount(realTrajs, horizon=200)
        realTrajs = normalize(realTrajs)
        curiosityTrajs = replace_rewards(curiosityTrajs, episode=np.max)
        realWeight = np.min([scoreDev / scoreMean * 10., 0.9])
        curiosityWeight = 1. - realWeight
        trajs = combine_rewards([realTrajs, curiosityTrajs],
                                [realWeight, curiosityWeight])
        trajs = normalize(trajs)
        grad = policy_gradient(trajs, policy=agent)
        agentOpt.apply_gradient(grad)

        trainTimeLeft -= 1
Exemplo n.º 27
0
def run():
    classifier = Input(2)
    classifier = Affine(classifier, 16)
    classifier = LReLU(classifier)
    classifier = Affine(classifier, 2)
    classifier = Softmax(classifier)

    world = Gym("MountainCar-v0")
    world = StochasticPolicy(world)

    curCarr = carr()
    curCarr.load_params(np.random.randn(curCarr.n_params))
    oldTrajs = world.trajectories(curCarr, 800)

    def train_one(carrOpt):
        nonlocal oldTrajs
        classOpt = Adam(
            np.random.randn(classifier.n_params) * 1.,
            lr=0.5,
            memory=0.9,
        )
        if carrOpt == None:
            carrOpt = Adam(
                np.random.randn(curCarr.n_params),
                lr=0.10,
                memory=0.5,
            )
        curScore = 0.
        curAccuracy = 0.
        for i in range(250):
            classifier.load_params(classOpt.get_value())
            curCarr.load_params(carrOpt.get_value())

            oldTrajIdx = np.random.choice(len(oldTrajs), size=50)
            trajs = [oldTrajs[i] for i in oldTrajIdx]
            trajs += world.trajectories(curCarr, 50)
            trajsForClass = [tag_traj(traj, [1, 0]) for traj in trajs[:50]]
            trajsForClass += [tag_traj(traj, [0, 1]) for traj in trajs[50:]]
            plot_tagged_trajs(trajsForClass)
            accTrajs = accuracy(trajsForClass, model=classifier)
            print_reward(accTrajs,
                         max_value=1.0,
                         episode=np.mean,
                         label="Cla reward: ")
            curAccuracy = np.mean(get_rewards(accTrajs, episode=np.mean))
            if curAccuracy > 1. - i / 500:
                break

            grad = policy_gradient(trajsForClass, policy=classifier)
            classOpt.apply_gradient(grad)
            trajs2 = learn_from_classifier(classifier, trajs[50:], 1)
            print_reward(trajs2,
                         max_value=1.0,
                         episode=np.max,
                         label="Car reward: ")
            curScore = np.mean(get_rewards(trajs2, episode=np.max))
            trajs2 = replace_rewards(trajs2, episode=np.max)
            trajs2 = normalize(trajs2)
            grad2 = policy_gradient(trajs2, policy=curCarr)
            carrOpt.apply_gradient(grad2)
            if i % 10 == 0:
                print("%d episodes in." % i)
        oldTrajs += world.trajectories(curCarr, 800)
        world.render(curCarr)
        if curScore > 0.11:
            return carrOpt
        else:
            return None

    theCarOpt = None
    for i in range(10):
        print("Teaching agent %d." % i)
        theCarOpt = train_one(theCarOpt)
Exemplo n.º 28
0
def message_text(event):
    All_state = json.load(open("db.txt"))
    user_input = event.message.text
    user_id = event.source.user_id

    if user_id not in All_state:
        All_state[user_id] = 0
    user_state = All_state[user_id]
    # print(All_state)

    if user_state == 0 and (user_input == "E" or user_input == "e"):
        text = "請依照下列格式輸入你的基金組合喔\U0010005E:\n\n" + \
            "[基金1統編]:[基金1比重],[基金2統編]:[基金2比重]...\n\n" + \
            "範例:\n" + \
            "26396604B:0.4,26286281F:0.6"
        message = TextMessage(text=text)
        line_bot_api.reply_message(event.reply_token, message)
        All_state[user_id] = 1

    elif user_state == 0:
        text = "" + \
            "嗨\U0010005E\U0010005E\U0010005E!\n" + \
            "這是一個評估/推薦基金組合的聊天機器人\U0010005E。\n\n" + \
            "如果想要評估目前的基金組合,請輸入\"E\"。"
        message = TextMessage(text=text)
        line_bot_api.reply_message(event.reply_token, message)

    elif user_state == 1 and (user_input == "Q" or user_input == "q"):
        text = "" + \
            "嗨\U0010005E\U0010005E\U0010005E!\n" + \
            "這是一個評估/推薦基金組合的聊天機器人\U0010005E。\n\n" + \
            "如果想要評估目前的基金組合,請輸入\"E\"。"
        message = TextMessage(text=text)
        line_bot_api.reply_message(event.reply_token, message)
        All_state[user_id] = 0

    elif user_state == 1:
        try:
            user_input_splited = user_input.split(",")
            selectFunds = []
            selectFund_weights = []
            for item in user_input_splited:
                temp_fund, temp_weight = item.split(":", 1)
                selectFunds.append(temp_fund)
                selectFund_weights.append(float(temp_weight))
            # print(selectFunds,selectFund_weights)

            All_selectFunds = json.load(open("selectFunds.txt"))
            All_selectFunds_weights = json.load(open("selectFund_weights.txt"))
            # print(All_selectFunds,All_selectFunds_weights)
            All_selectFunds[user_id] = selectFunds
            All_selectFunds_weights[user_id] = selectFund_weights
            json.dump(All_selectFunds, open("selectFunds.txt", "w"))
            json.dump(All_selectFunds_weights,
                      open("selectFund_weights.txt", "w"))
            # print(All_selectFunds,All_selectFunds_weights)

            # evaluate
            extra_ratio = 0
            recommend_num = 0

            user_input = Input(extra_ratio, selectFunds, selectFund_weights,
                               recommend_num)
            origin, result = recommend(user_input, line_bot_api, user_id)

            # Check
            check_ration = 0
            for item in selectFund_weights:
                if item < 0:
                    check_ration = -1
                    break
                check_ration += item
            if check_ration != 1:
                message = TextSendMessage(text="基金比重錯誤!請重新輸入!")
                line_bot_api.reply_message(event.reply_token,
                                           [message1, message2])
            elif origin == None:
                message = TextSendMessage(text="基金統編錯誤!請重新輸入!")
                line_bot_api.reply_message(event.reply_token,
                                           [message1, message2])
            else:
                message = TextMessage(text="評估結果:")
                message1 = TextSendMessage(text=origin.output_portfolio())
                confirm_template_message = TemplateSendMessage(
                    alt_text='Confirm template',
                    template=ConfirmTemplate(text='需要推薦基金組合嗎?',
                                             actions=[
                                                 MessageAction(label='是',
                                                               text='Y'),
                                                 MessageAction(label='否',
                                                               text='N')
                                             ]))

                line_bot_api.push_message(user_id, message)
                line_bot_api.push_message(user_id, message1)
                line_bot_api.reply_message(event.reply_token,
                                           confirm_template_message)
                All_state[user_id] = 2

        except:
            text = "格式錯誤或是基金不存在\U0010005E!請再輸入一次,如果要停止評估,請輸入\"Q\"。"
            message = TextMessage(text=text)
            line_bot_api.reply_message(event.reply_token, message)

    elif user_state == 2 and user_input == "N":
        text = "" + \
            "嗨\U0010005E\U0010005E\U0010005E!\n" + \
            "這是一個評估/推薦基金組合的聊天機器人\U0010005E。\n\n" + \
            "如果想要評估目前的基金組合,請輸入\"E\"。"
        message = TextMessage(text=text)
        line_bot_api.reply_message(event.reply_token, message)
        All_state[user_id] = 0

    elif user_state == 2:
        text = "" + \
            "請依照下列格式輸入資料:\n\n" + \
            "[轉換金額比例],[最多推薦的基金數量]\n\n" + \
            "範例:\n" + \
            "0.3,2\n\n" + \
            "**注意**\n" + \
            "轉換比例不能大於" + str(LIMIT_FUNDS_RATIO) + "\n" + \
            "推薦基金數量不能大於" +  str(LIMIT_FUNDS_NUM)
        message = TextMessage(text=text)
        line_bot_api.reply_message(event.reply_token, message)
        All_state[user_id] = 3

    elif user_state == 3 and (user_input == "Q" or user_input == "q"):
        text = "" + \
            "嗨\U0010005E\U0010005E\U0010005E!\n" + \
            "這是一個評估/推薦基金組合的聊天機器人\U0010005E。\n\n" + \
            "如果想要評估目前的基金組合,請輸入\"E\"。"
        message = TextMessage(text=text)
        line_bot_api.reply_message(event.reply_token, message)
        All_state[user_id] = 0

    elif user_state == 3:
        try:
            temp1, temp2 = user_input.split(",", 1)
            extra_ratio = float(temp1)
            recommend_num = int(temp2)

            #check
            if extra_ratio > float(LIMIT_FUNDS_RATIO):
                message = TextSendMessage(text="轉換金額比例大於限制!請重新輸入!")
                line_bot_api.reply_message(event.reply_token, message)
            elif recommend_num > int(LIMIT_FUNDS_NUM):
                message = TextSendMessage(text="推薦基金數量大於限制!請重新輸入!")
                line_bot_api.reply_message(event.reply_token, message)
            else:
                All_selectFunds = json.load(open("selectFunds.txt"))
                All_selectFunds_weights = json.load(
                    open("selectFund_weights.txt"))
                selectFunds = All_selectFunds[user_id]
                selectFund_weights = All_selectFunds_weights[user_id]

                # print(extra_ratio,recommend_num,selectFunds,selectFund_weights)

                #Optimize
                user_input = Input(extra_ratio, selectFunds,
                                   selectFund_weights, recommend_num)
                origin, result = recommend(user_input, line_bot_api, user_id)

                message = TextMessage(text="下列是基金組合的新舊對照\U0010005E:")
                message1 = TextSendMessage(text="舊:\n" +
                                           origin.output_portfolio())
                message2 = TextSendMessage(text="新:\n" +
                                           result.output_portfolio())
                message_last = TextMessage(text="如果還想要評估基金組合,請輸入\"E\"。")

                line_bot_api.push_message(user_id, message)
                line_bot_api.push_message(user_id, [message1, message2])
                line_bot_api.reply_message(event.reply_token, message_last)
                All_state[user_id] = 0

        except:
            text = "格式錯誤\U0010005E!請再輸入一次,如果要停止推薦,請輸入\"Q\"。"
            message = TextMessage(text=text)
            line_bot_api.reply_message(event.reply_token, message)

    else:
        message = TextMessage(text="\U0010005E\U0010005E\U0010005E")
        line_bot_api.reply_message(event.reply_token, message)
        All_state[user_id] = 0

    # print(All_state)
    json.dump(All_state, open("db.txt", "w"))