示例#1
0
    def process_input(self, Y):
        '''

        :param string y: Input STRING
        '''

        start = time()

        # Save the raw input string
        raw_input = Y

        Y = self.vectorizer.transform([Y])

        y_tf_idf = self.tf_idf_transformer.transform(Y)

        distances, ids = self.nbrs.kneighbors(y_tf_idf.todense()) 

        # Rate questions by their similarity scores using w2v
        q_similarity_scores = {}
        input_tokens = preprocess_input(raw_input)
        for id in ids[0]:
            question = self.dataset[id][0]
            sum_similarities = 0
            
            question = preprocess_input(question)
            
            for word in input_tokens:

                for q_word in question:
                    try:
                        # Find the maximum similarity of each input word
                        # to each word in the question
                        sims = [self.w2v_model.wv.similarity(word, q_word)]
                        sum_similarities += max(sims)
                    except KeyError:
                        print(f"Word {q_word} not in dataset")
            
            # Associate every question with it's similarity
            # to the input
            q_similarity_scores[id] = sum_similarities

        # Sort question IDs by their similarity to the input
        sorted_by_sim = {id: sim for id, sim in sorted(q_similarity_scores.items(), key = lambda x: x[1], reverse=True) }

        # Return the top 10 results
        retval = []
        i = 0
        for res in sorted_by_sim:
            retval.append((res, self.dataset[res]))
            i += 1
            if i == 10:
                break

        return retval
示例#2
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)
        steering_angle = float(
            model.predict(preprocessing.preprocess_input(
                image_array[None, :, :, :]),
                          batch_size=1))

        throttle = controller.update(float(speed))

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
示例#3
0
    def set_dataset(self, dataset, processed_dataset, corpus):
        '''

        :param dict dataset: {key, [question, answer]}
        '''

        self.dataset = dataset

        start = time()

        list_of_q = self._extract_questions(dataset)

        self.vectorizer = CountVectorizer(lowercase=True, analyzer='word')
        X = self.vectorizer.fit_transform(list_of_q)

        self.tf_idf_transformer = TfidfTransformer(use_idf=True).fit(X)

        x_tf_idf = self.tf_idf_transformer.transform(X)

        self.nbrs = NearestNeighbors(n_neighbors=10, algorithm='ball_tree', metric='manhattan').fit(x_tf_idf)

        print(start - time())

        self.corpus = corpus

        # Extract token list per question
        # for the w2v model
        token_list = []
        for question in list_of_q:
            token_list.append(preprocess_input(question))

        # Train w2v model
        self.w2v_model = Word2Vec(token_list, size=100, window=3, min_count=0, workers=4, iter=10)
示例#4
0
def main(_):
    if not os.path.exists("./outputs/steering_model"):
        os.makedirs("./outputs/steering_model")

    batch_size = 512

    X_image = []
    X_flip = []
    y_steering = []

    with open('driving_data.csv', 'rt') as csvfile:
        reader = csv.reader(csvfile, skipinitialspace=True)

        for row in reader:
            X_image.append(row[0])
            X_flip.append(float(row[1]))
            y_steering.append(float(row[2]))

    X_image = np.array(X_image)
    X_flip = np.array(X_flip)
    y_steering = np.array(y_steering)

    X_train_image, X_test_image, X_train_flip, X_test_flip, y_train_steering, y_test_steering = train_test_split(X_image, X_flip, y_steering, test_size=0.2)

    if len(X_train_image) == 0:
        print('No data found')
        return

    sample_image = preprocessing.preprocess_input(np.array([mpimg.imread(X_train_image[0])]))[0]
    print('Image Shape: ', sample_image.shape)

    print('Training Data Count:', len(X_train_image))
    print('Validation Data Count:', len(X_test_image))

    X_train_image, X_train_flip, y_train_steering = shuffle(X_train_image, X_train_flip, y_train_steering)

    comma_ai_model = get_comma_ai_model(sample_image.shape)
    nvidia_model = get_nvidia_model(sample_image.shape)

    merged = Merge([comma_ai_model, nvidia_model])

    model = Sequential()
    model.add(merged)
    model.add(Dense(1, activation='elu'))
    model.compile(optimizer="adam", loss="mse")

    model.fit_generator(image_generator(X_train_image, X_train_flip, y_train_steering, batch_size), samples_per_epoch=1024*25, nb_epoch=5, validation_data=image_generator(X_test_image, X_test_flip, y_test_steering, batch_size), nb_val_samples=1024 * 5)

    print("Saving model weights and configuration file.")

    model.save_weights("./outputs/steering_model/steering_angle.keras", True)
    with open('./outputs/steering_model/steering_angle.json', 'w') as outfile:
        json.dump(model.to_json(), outfile)
示例#5
0
def ExpressionDetection (image):
    detection_model_path = "%s/%s/haarcascade_frontalface_default.xml"%(PATH,"haar_cascade")


    emotion_model_path = '%s/vision/models/fer2013_mini_XCEPTION.102-0.66.hdf5'%(PATH)
    emotion_labels = get_labels('fer2013')

    # loading models
    face_detection = load_detection_model(detection_model_path)

    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    emotion_offsets = (20, 40)
    #read image


    # print 'dddddddd',gray_image
    #gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    gray_image=cv2.imread(image,0)
    faces = detect_faces(face_detection,gray_image)
    # print 'qqqqqqqq',faces

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        # print 'eeeeee',gray_face
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        # print 'the emotion probability is:',emotion_probability
        # print 'the emotion is:',emotion_label_arg

        # print 'the predicted emotion is:',emotion_text
        return emotion_text
示例#6
0
def image_generator(X_image, X_flip, y_steering, batch_size):
    sample_image_shape = mpimg.imread(X_image[0]).shape

    # Create empty arrays to contain batch of features and labels#
    batch_features = np.zeros((batch_size, sample_image_shape[0], sample_image_shape[1], sample_image_shape[2]))
    batch_labels = np.zeros((batch_size, 1))
    while True:
        for i in range(batch_size):
            #choose random index in features
            index = np.random.randint(len(X_image))
            image = mpimg.imread(X_image[index])
            if X_flip[index] == 0:
                batch_features[i] = image
                batch_labels[i] = y_steering[index]
            else:
                batch_features[i] = cv2.flip(image, flipCode=1)
                batch_labels[i] = y_steering[index]

        processed_feature = preprocessing.preprocess_input(batch_features)
        yield [processed_feature, processed_feature], batch_labels
示例#7
0
def telemetry(sid, data):
    # The current steering angle of the car
    steering_angle = data["steering_angle"]
    # The current throttle of the car
    throttle = data["throttle"]
    # The current speed of the car
    speed = data["speed"]
    # The current image from the center camera of the car
    imgString = data["image"]
    image = Image.open(BytesIO(base64.b64decode(imgString)))
    image_array = np.asarray(image)
    transformed_image_array = image_array[None, :, :, :]
    transformed_image_array = preprocessing.preprocess_input(
        transformed_image_array)

    # This model currently assumes that the features of the model are just the images. Feel free to change this.
    steering_angle = float(
        model.predict([transformed_image_array, transformed_image_array],
                      batch_size=1))
    # The driving model currently just outputs a constant throttle. Feel free to edit this.
    throttle = 0.2
    print(steering_angle, throttle)
    send_control(steering_angle, throttle)
示例#8
0
def main():

    # Read dataset
    dict = index_dataset()

    # Preprocess dataset if needed
    if not os.path.exists('./objects/indexer.pickle') or not os.path.exists(
            './objects/knn.pickle'):
        dataset, corpus = preprocess_dataset(dict,
                                             lemmatize=True,
                                             remove_stopwords=True,
                                             measure_time=True)

    # Load or create indexer
    if os.path.exists('./objects/indexer.pickle'):
        indexer = load_object('./objects/indexer.pickle')
    else:
        indexer = Indexer(dataset, measure_time=True)
        save_object(indexer, './objects/indexer.pickle')

    #Load or create KNN
    if os.path.exists('./objects/knn.pickle'):
        knn = load_object('./objects/knn.pickle')
    else:
        # Initialize KNN with given dataset
        knn = KNN(dataset, corpus, measure_time=True)
        save_object(knn, './objects/knn.pickle')

    # Main loop for user input
    print("Type a question:")
    q = input()
    while q != 'quit':

        processed_input = preprocess_input(q,
                                           lemmatize=True,
                                           remove_stopwords=True)

        terms_to_search_for = list(processed_input.keys())

        print('Terms to search for:')
        print(terms_to_search_for)
        print()

        containing_docs = indexer.retrieve_documents(terms_to_search_for,
                                                     measure_time=True)

        res = knn.find_nearest_neigbours(processed_input,
                                         containing_docs,
                                         k=10,
                                         measure_time=True)

        print("\nResults:\n")
        i = 1
        for r in res:
            print(f'#{i}')
            print(r)
            print()
            i += 1

        print("Type a question:")
        q = input()
import cv2
import numpy as np
import os
import preprocessing as pre

# Getting required directories
# Model Directory
mod_dir = "/".join(os.getcwd().split("/")[0:-1] + ['model/'])
# Data Directory
data_dir = "/".join(os.getcwd().split("/")[0:-1] + ['data/'])
# Base Directory
base_dir = "/".join(os.getcwd().split("/")[0:-1])

# data loading and preprocessing
faces, emotions = pre.load_data(data_dir + "fer2013.csv")
faces = pre.preprocess_input(faces)
xtrain, xtest, ytrain, ytest = train_test_split(faces,
                                                emotions,
                                                test_size=0.2,
                                                shuffle=True)

# parameters
batch_size = 64
num_epochs = 100
input_shape = (48, 48, 1)
verbose = 1
num_classes = 7
patience = 30
l2_regularization = 0.01

# data generator
示例#10
0
def main():
    input_file = os.path.abspath(args.input_file)
    data_output_dir = os.path.abspath(args.data_output_dir)
    final_output_dir = os.path.abspath(args.final_output_dir)
    if not os.path.exists(data_output_dir):
        os.makedirs(data_output_dir)
    if not os.path.exists(final_output_dir):
        os.makedirs(final_output_dir)
    # Support some standard preprocessing
    if args.tokenize:
        # tokenize input_file to token_file
        logging.info("tokenizing data")
        token_file = "%s/tokens.jsonlist.gz" % data_output_dir
        func = functools.partial(preprocessing.tokenize,
                                 filter_stopwords=args.nostopwords)
        preprocessing.preprocess_input(input_file, token_file)
        input_file = token_file
    if args.lemmatize:
        # lemmatize input_file to lemma_file
        logging.info("lemmatizing data")
        lemma_file = "%s/lemmas.jsonlist.gz" % data_output_dir
        func = functools.partial(preprocessing.lemmatize,
                                 filter_stopwords=args.nostopwords)
        preprocessing.preprocess_input(input_file, lemma_file)
        input_file = lemma_file
    # generate topics or lexicons
    option = args.option
    num_ideas = args.num_ideas
    cooccur_func = functools.partial(il.generate_cooccurrence_from_int_set,
                                     num_ideas=num_ideas)
    prefix = args.prefix
    if option == "topics":
        logging.info("using topics to represent ideas")
        prefix = "%s_topics" % prefix
        # generate mallet topics
        mt.get_mallet_input_from_words(input_file, data_output_dir)
        if not mt.check_mallet_directory(data_output_dir):
            # run mallet to prepare topics inputs
            # users can also generate mallet-style topic inputs inputs
            logging.info("running mallet to get topics")
            if not os.path.exists(os.path.join(args.mallet_bin_dir, 'mallet')):
                sys.exit("Error: Unable to find mallet at %s" %
                         args.mallet_bin_dir)
            os.system("./mallet.sh %s %s %d" %
                      (args.mallet_bin_dir, data_output_dir, num_ideas))
        # load mallet outputs
        articles, vocab, idea_names = mt.load_articles(input_file,
                                                       data_output_dir)
        table_top = 5
    elif option == "keywords":
        logging.info("using keywords to represent ideas")
        prefix = "%s_keywords" % prefix
        # idenfity keyword ideas using fighting lexicon
        lexicon_file = "%s/fighting_lexicon.txt" % data_output_dir
        other_files = [args.background_file]
        fl.get_top_distinguishing(input_file, other_files, data_output_dir,
                                  lexicon_file)
        # load keywords
        articles, word_set, idea_names = fl.load_word_articles(
            input_file, lexicon_file, data_output_dir, vocab_size=num_ideas)
        table_top = 10
    else:
        logging.error("unsupported idea representations")

    # compute strength between pairs and generate outputs
    il.generate_all_outputs(articles,
                            num_ideas,
                            idea_names,
                            prefix,
                            final_output_dir,
                            cooccur_func,
                            table_top=table_top,
                            group_by=args.group_by)
示例#11
0
def predict_vm(ecs_lines, input_lines):
    # Do your work from here#
    result = []
    if ecs_lines is None:
        return result
    if input_lines is None:
        return result

    mission = preprocessing.preprocess_input(input_lines)
    flavor_dict = preprocessing.preprocess_ecs_info(ecs_lines, mission)
    data_dict_merge = preprocessing.merge(flavor_dict, mission)
    data_dict_filled = preprocessing.fill_data(data_dict_merge, mission)
    error_point = ['']
    if len(mission.vm_type) == 3:
        if mission.opt_target == "CPU":
            flavname = data_dict_filled.keys()
            nn_parameter_dict = dict()
            deg = 53
            for name in flavname:
                data = data_dict_filled[name]
                series = [x[1] for x in data]
                X, Y = generate_samples(series, deg)
                X = X.transpose()
                Y = Y.transpose()
                parameters = nn_model(X,
                                      Y,
                                      4,
                                      learning_rate=0.01,
                                      num_iterations=405)
                nn_parameter_dict[name] = parameters

            # 读取需要预测的天数
            start_time = datetime.datetime.strptime(
                mission.time_limit[0].split(' ')[0], '%Y-%m-%d')
            end_time = datetime.datetime.strptime(
                mission.time_limit[1].split(' ')[0], '%Y-%m-%d')
            days = (end_time - start_time).days

            pred_result_dict = dict()
            pred_list = []
            for name in flavname:
                # 取数据的最后degree个样本点
                n_sum = 0  # flavor在该周期内的数量总和
                flavor_data = data_dict_filled[name]  # 数据点
                parameters = nn_parameter_dict[name]  # 系数

                samples = flavor_data[-deg:]
                samples = [x[1] for x in samples]
                samples = matrix([samples])
                samples = samples.transpose()
                temp = []
                for day in range(days):
                    A2, cache = forward_propagation(samples, parameters)
                    x_pred = A2.value[0][0]
                    x_pred = math.ceil(abs(x_pred))
                    n_sum = n_sum + x_pred
                    samples.value[0][0] = x_pred
                    temp.append(x_pred)
                pred_result_dict[name] = n_sum
            print("预测完毕")

            # 放置服务器
            phy_server = annealingoptimize(pred_result_dict,
                                           mission,
                                           T=10000.0,
                                           cool=0.98)

            print("放置完成")
            print("生成结果输出文件")
            n_sum = 0
            for name in pred_result_dict.keys():
                n_sum = n_sum + pred_result_dict[name]
            result.append(int(n_sum))
            for name in pred_result_dict.keys():
                result.append("%s %d" % (name, pred_result_dict[name]))
            result.append("")
            result.append(len(phy_server))
            for n in phy_server.keys():
                s = str(n)
                u_name = set(phy_server[n])
                for u_n in u_name:
                    cnt = phy_server[n].count(u_n)
                    s = s + ' ' + str(u_n) + ' ' + str(cnt)
                result.append(s)
        elif mission.opt_target == "MEM":
            flavname = data_dict_filled.keys()
            nn_parameter_dict = dict()
            deg = 55
            for name in flavname:
                data = data_dict_filled[name]
                series = [x[1] for x in data]
                X, Y = generate_samples(series, deg)
                X = X.transpose()
                Y = Y.transpose()
                parameters = nn_model(X,
                                      Y,
                                      4,
                                      learning_rate=0.01,
                                      num_iterations=40)
                nn_parameter_dict[name] = parameters

            # 读取需要预测的天数
            start_time = datetime.datetime.strptime(
                mission.time_limit[0].split(' ')[0], '%Y-%m-%d')
            end_time = datetime.datetime.strptime(
                mission.time_limit[1].split(' ')[0], '%Y-%m-%d')
            days = (end_time - start_time).days

            pred_result_dict = dict()
            pred_list = []
            for name in flavname:
                # 取数据的最后degree个样本点
                n_sum = 0  # flavor在该周期内的数量总和
                flavor_data = data_dict_filled[name]  # 数据点
                parameters = nn_parameter_dict[name]  # 系数

                samples = flavor_data[-deg:]
                samples = [x[1] for x in samples]
                samples = matrix([samples])
                samples = samples.transpose()
                temp = []
                for day in range(days):
                    A2, cache = forward_propagation(samples, parameters)
                    x_pred = A2.value[0][0]
                    x_pred = math.ceil(abs(x_pred))
                    n_sum = n_sum + x_pred
                    samples.value[0][0] = x_pred
                    temp.append(x_pred)
                pred_result_dict[name] = n_sum
            print("预测完毕")

            # 放置服务器
            phy_server = annealingoptimize(pred_result_dict,
                                           mission,
                                           T=10000.0,
                                           cool=0.98)

            print("放置完成")
            print("生成结果输出文件")
            n_sum = 0
            for name in pred_result_dict.keys():
                n_sum = n_sum + pred_result_dict[name]
            result.append(int(n_sum))
            for name in pred_result_dict.keys():
                result.append("%s %d" % (name, pred_result_dict[name]))
            result.append("")
            result.append(len(phy_server))
            for n in phy_server.keys():
                s = str(n)
                u_name = set(phy_server[n])
                for u_n in u_name:
                    cnt = phy_server[n].count(u_n)
                    s = s + ' ' + str(u_n) + ' ' + str(cnt)
                result.append(s)
    elif len(mission.vm_type) == 5:
        if mission.opt_target == "CPU":
            flavname = data_dict_filled.keys()
            nn_parameter_dict = dict()
            deg = 46
            for name in flavname:
                data = data_dict_filled[name]
                series = [x[1] for x in data]
                X, Y = generate_samples(series, deg)
                X = X.transpose()
                Y = Y.transpose()
                parameters = nn_model(X,
                                      Y,
                                      4,
                                      learning_rate=0.01,
                                      num_iterations=405)
                nn_parameter_dict[name] = parameters

            # 读取需要预测的天数
            start_time = datetime.datetime.strptime(
                mission.time_limit[0].split(' ')[0], '%Y-%m-%d')
            end_time = datetime.datetime.strptime(
                mission.time_limit[1].split(' ')[0], '%Y-%m-%d')
            days = (end_time - start_time).days

            pred_result_dict = dict()
            pred_list = []
            for name in flavname:
                # 取数据的最后degree个样本点
                n_sum = 0  # flavor在该周期内的数量总和
                flavor_data = data_dict_filled[name]  # 数据点
                parameters = nn_parameter_dict[name]  # 系数

                samples = flavor_data[-deg:]
                samples = [x[1] for x in samples]
                samples = matrix([samples])
                samples = samples.transpose()
                temp = []
                for day in range(days):
                    A2, cache = forward_propagation(samples, parameters)
                    x_pred = A2.value[0][0]
                    x_pred = math.ceil(abs(x_pred))
                    n_sum = n_sum + x_pred
                    samples.value[0][0] = x_pred
                    temp.append(x_pred)
                pred_result_dict[name] = n_sum
            print("预测完毕")

            # 放置服务器
            phy_server = annealingoptimize(pred_result_dict,
                                           mission,
                                           T=10000.0,
                                           cool=0.98)

            print("放置完成")
            print("生成结果输出文件")
            n_sum = 0
            for name in pred_result_dict.keys():
                n_sum = n_sum + pred_result_dict[name]
            result.append(int(n_sum))
            for name in pred_result_dict.keys():
                result.append("%s %d" % (name, pred_result_dict[name]))
            result.append("")
            result.append(len(phy_server))
            for n in phy_server.keys():
                s = str(n)
                u_name = set(phy_server[n])
                for u_n in u_name:
                    cnt = phy_server[n].count(u_n)
                    s = s + ' ' + str(u_n) + ' ' + str(cnt)
                result.append(s)
        elif mission.opt_target == "MEM":
            flavname = data_dict_filled.keys()
            nn_parameter_dict = dict()
            deg = 60
            for name in flavname:
                data = data_dict_filled[name]
                series = [x[1] for x in data]
                X, Y = generate_samples(series, deg)
                X = X.transpose()
                Y = Y.transpose()
                parameters = nn_model(X,
                                      Y,
                                      4,
                                      learning_rate=0.01,
                                      num_iterations=40)
                nn_parameter_dict[name] = parameters

            # 读取需要预测的天数
            start_time = datetime.datetime.strptime(
                mission.time_limit[0].split(' ')[0], '%Y-%m-%d')
            end_time = datetime.datetime.strptime(
                mission.time_limit[1].split(' ')[0], '%Y-%m-%d')
            days = (end_time - start_time).days

            pred_result_dict = dict()
            pred_list = []
            for name in flavname:
                # 取数据的最后degree个样本点
                n_sum = 0  # flavor在该周期内的数量总和
                flavor_data = data_dict_filled[name]  # 数据点
                parameters = nn_parameter_dict[name]  # 系数

                samples = flavor_data[-deg:]
                samples = [x[1] for x in samples]
                samples = matrix([samples])
                samples = samples.transpose()
                temp = []
                for day in range(days):
                    A2, cache = forward_propagation(samples, parameters)
                    x_pred = A2.value[0][0]
                    x_pred = math.ceil(abs(x_pred))
                    n_sum = n_sum + x_pred
                    samples.value[0][0] = x_pred
                    temp.append(x_pred)
                pred_result_dict[name] = n_sum
            print("预测完毕")

            # 放置服务器
            phy_server = annealingoptimize(pred_result_dict,
                                           mission,
                                           T=10000.0,
                                           cool=0.98)

            print("放置完成")
            print("生成结果输出文件")
            n_sum = 0
            for name in pred_result_dict.keys():
                n_sum = n_sum + pred_result_dict[name]
            result.append(int(n_sum))
            for name in pred_result_dict.keys():
                result.append("%s %d" % (name, pred_result_dict[name]))
            result.append("")
            result.append(len(phy_server))
            for n in phy_server.keys():
                s = str(n)
                u_name = set(phy_server[n])
                for u_n in u_name:
                    cnt = phy_server[n].count(u_n)
                    s = s + ' ' + str(u_n) + ' ' + str(cnt)
                result.append(s)
    else:
        if mission.opt_target == "CPU":
            flavname = data_dict_filled.keys()
            nn_parameter_dict = dict()
            deg = 42
            for name in flavname:
                data = data_dict_filled[name]
                series = [x[1] for x in data]
                X, Y = generate_samples(series, deg)
                X = X.transpose()
                Y = Y.transpose()
                parameters = nn_model(X,
                                      Y,
                                      4,
                                      learning_rate=0.01,
                                      num_iterations=405)
                nn_parameter_dict[name] = parameters

            # 读取需要预测的天数
            start_time = datetime.datetime.strptime(
                mission.time_limit[0].split(' ')[0], '%Y-%m-%d')
            end_time = datetime.datetime.strptime(
                mission.time_limit[1].split(' ')[0], '%Y-%m-%d')
            days = (end_time - start_time).days

            pred_result_dict = dict()
            pred_list = []
            for name in flavname:
                # 取数据的最后degree个样本点
                n_sum = 0  # flavor在该周期内的数量总和
                flavor_data = data_dict_filled[name]  # 数据点
                parameters = nn_parameter_dict[name]  # 系数

                samples = flavor_data[-deg:]
                samples = [x[1] for x in samples]
                samples = matrix([samples])
                samples = samples.transpose()
                temp = []
                for day in range(days):
                    A2, cache = forward_propagation(samples, parameters)
                    x_pred = A2.value[0][0]
                    x_pred = math.ceil(abs(x_pred))
                    n_sum = n_sum + x_pred
                    samples.value[0][0] = x_pred
                    temp.append(x_pred)
                pred_result_dict[name] = n_sum
            print("预测完毕")

            # 放置服务器
            phy_server = annealingoptimize(pred_result_dict,
                                           mission,
                                           T=10000.0,
                                           cool=0.98)

            print("放置完成")
            print("生成结果输出文件")
            n_sum = 0
            for name in pred_result_dict.keys():
                n_sum = n_sum + pred_result_dict[name]
            result.append(int(n_sum))
            for name in pred_result_dict.keys():
                result.append("%s %d" % (name, pred_result_dict[name]))
            result.append("")
            result.append(len(phy_server))
            for n in phy_server.keys():
                s = str(n)
                u_name = set(phy_server[n])
                for u_n in u_name:
                    cnt = phy_server[n].count(u_n)
                    s = s + ' ' + str(u_n) + ' ' + str(cnt)
                result.append(s)
        elif mission.opt_target == "MEM":
            flavname = data_dict_filled.keys()
            nn_parameter_dict = dict()
            deg = 48
            for name in flavname:
                data = data_dict_filled[name]
                series = [x[1] for x in data]
                X, Y = generate_samples(series, deg)
                X = X.transpose()
                Y = Y.transpose()
                parameters = nn_model(X,
                                      Y,
                                      4,
                                      learning_rate=0.01,
                                      num_iterations=40)
                nn_parameter_dict[name] = parameters

            # 读取需要预测的天数
            start_time = datetime.datetime.strptime(
                mission.time_limit[0].split(' ')[0], '%Y-%m-%d')
            end_time = datetime.datetime.strptime(
                mission.time_limit[1].split(' ')[0], '%Y-%m-%d')
            days = (end_time - start_time).days

            pred_result_dict = dict()
            pred_list = []
            for name in flavname:
                # 取数据的最后degree个样本点
                n_sum = 0  # flavor在该周期内的数量总和
                flavor_data = data_dict_filled[name]  # 数据点
                parameters = nn_parameter_dict[name]  # 系数

                samples = flavor_data[-deg:]
                samples = [x[1] for x in samples]
                samples = matrix([samples])
                samples = samples.transpose()
                temp = []
                for day in range(days):
                    A2, cache = forward_propagation(samples, parameters)
                    x_pred = A2.value[0][0]
                    x_pred = math.ceil(abs(x_pred))
                    n_sum = n_sum + x_pred
                    samples.value[0][0] = x_pred
                    temp.append(x_pred)
                pred_result_dict[name] = n_sum
            print("预测完毕")

            # 放置服务器
            phy_server = annealingoptimize(pred_result_dict,
                                           mission,
                                           T=10000.0,
                                           cool=0.98)
            print("放置完成")
            print("生成结果输出文件")
            n_sum = 0
            for name in pred_result_dict.keys():
                n_sum = n_sum + pred_result_dict[name]
            result.append(int(n_sum))
            for name in pred_result_dict.keys():
                result.append("%s %d" % (name, pred_result_dict[name]))
            result.append("")
            result.append(len(phy_server))
            for n in phy_server.keys():
                s = str(n)
                u_name = set(phy_server[n])
                for u_n in u_name:
                    cnt = phy_server[n].count(u_n)
                    s = s + ' ' + str(u_n) + ' ' + str(cnt)
                result.append(s)
    return result
def main(_):
    if not os.path.exists("./outputs/steering_model"):
        os.makedirs("./outputs/steering_model")

    batch_size = 1024

    X_image = []
    X_flip = []
    y_steering = []

    with open(FLAGS.drive_log_file, 'rt') as csvfile:
        reader = csv.reader(csvfile, skipinitialspace=True)

        for row in reader:
            if row[0] == 'center':
                continue
            X_image.append(row[0])
            X_flip.append(0)
            y_steering.append(float(row[3]))
            X_image.append(row[1])
            X_flip.append(0)
            y_steering.append(float(row[3]) + 0.15)
            X_image.append(row[2])
            X_flip.append(0)
            y_steering.append(float(row[3]) - 0.15)
            # X_image.append(row[0])
            # X_flip.append(0)
            # y_steering.append(float(row[3]))
            X_image.append(row[1])
            X_flip.append(1)
            y_steering.append(float(row[3]) - 0.15)
            X_image.append(row[2])
            X_flip.append(1)
            y_steering.append(float(row[3]) + 0.15)

    X_image = np.array(X_image)
    X_flip = np.array(X_flip)
    y_steering = np.array(y_steering)

    X_train_image, X_test_image, X_train_flip, X_test_flip, y_train_steering, y_test_steering = train_test_split(
        X_image, X_flip, y_steering, test_size=0.2)

    if len(X_train_image) == 0:
        print('No data found')
        return

    sample_image = preprocessing.preprocess_input(
        np.array([mpimg.imread(X_train_image[0])]))[0]
    print('Image Shape: ', sample_image.shape)

    sample_per_epoch = len(X_train_image)
    nb_val_samples = len(X_test_image)

    print('Training Data Count:', sample_per_epoch)
    print('Validation Data Count:', nb_val_samples)

    X_train_image, X_train_flip, y_train_steering = shuffle(
        X_train_image, X_train_flip, y_train_steering)

    comma_ai_model = get_comma_ai_model(sample_image.shape)

    # if there is a model given, then we will train on it, else we will train on a new model
    if FLAGS.model_file != '':
        model = load_model(FLAGS.model_file)
    else:
        model = comma_ai_model

    model.compile(optimizer="adam", loss="mse")

    model.fit_generator(image_generator(X_train_image, X_train_flip,
                                        y_train_steering, batch_size),
                        samples_per_epoch=sample_per_epoch,
                        nb_epoch=1,
                        validation_data=image_generator(
                            X_test_image, X_test_flip, y_test_steering,
                            batch_size),
                        nb_val_samples=nb_val_samples)

    print("Saving model weights and configuration file.")

    model.save("./outputs/steering_model/steering_angle.h5", True)