예제 #1
0
def create_GRU_stack_model(images_shape, dict_size, sentence_len, settings, pretrained_emb):
    # input (None, 224, 224, 3), outputs (None, sentence_len, 512)
    image_model = create_image_model_resnet50(images_shape, sentence_len)

    # outputs (None, sentence_len, 128)
    sentence_model = create_sentence_model(dict_size, sentence_len, pretrained_emb, 160)

    combined_model = Sequential()
    combined_model.add(Merge([image_model, sentence_model], mode='concat', concat_axis=-1))
    combined_model.add(GRU(160, return_sequences=True, dropout_U=0.25, dropout_W=0.25))

    combined_model2 = Sequential()
    combined_model2.add(Merge([image_model, combined_model], mode='concat', concat_axis=-1))
    combined_model2.add(GRU(256, return_sequences=False, dropout_U=0.25, dropout_W=0.25))

    addPredictionLayers(combined_model2, dict_size, settings)

    # input words are 1-indexed and 0 index is used for masking!
    # but result words are 0-indexed and will go into [0, ..., dict_size-1] !!!

    combined_model2.compile(loss='sparse_categorical_crossentropy', optimizer=create_optimizer(settings))
    return combined_model2
예제 #2
0
def create_GRU_xception_model(images_shape, dict_size, sentence_len, settings, pretrained_emb):
    image_model = create_image_model_xception(images_shape, sentence_len)

    sentence_model = create_sentence_model(dict_size, sentence_len, pretrained_emb)

    combined_model = Sequential()
    combined_model.add(Merge([image_model, sentence_model], mode='concat', concat_axis=-1))
    combined_model.add(GRU(256, return_sequences=False, dropout_U=0.2, dropout_W=0.2))

    addPredictionLayers(combined_model, dict_size, settings)

    # input words are 1-indexed and 0 index is used for masking!
    # but result words are 0-indexed and will go into [0, ..., dict_size-1] !!!

    combined_model.compile(loss='sparse_categorical_crossentropy', optimizer=create_optimizer(settings))
    return combined_model
예제 #3
0
#Left Image
model_left = Sequential()
model_left.add(Cropping2D(cropping=((70,25),(0,0)), input_shape=(160,320,3)))
model_left.add(Lambda(lambda x: x / 255.0 - 0.5))
model_left.add(Convolution2D(24,5,5, subsample=(1,1), activation="relu"))

#Right Image
model_right = Sequential()
model_right.add(Cropping2D(cropping=((70,25),(0,0)), input_shape=(160,320,3)))
model_right.add(Lambda(lambda x: x / 255.0 - 0.5))
model_right.add(Convolution2D(24,5,5, subsample=(1,1), activation="relu"))


#Nvidia
model = Sequential()
model.add(Merge([model_center, model_left, model_right], mode='concat'))
#model.add(Convolution2D(24,5,5, subsample=(2,2), activation="relu"))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation="relu"))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation="relu"))
model.add(Convolution2D(64,5,5, subsample=(2,2), activation="relu"))
#model.add(Convolution2D(64,5,5, subsample=(2,2), activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
print("Fitting")
model.fit([X_train, X_train, X_train], y_train, validation_split=0.2, shuffle=True, nb_epoch=5, verbose=1)

model.save('model.h5')
예제 #4
0
def main():
    index_name = [
        'end', 'ignore', 'approach', 'move', 'grasp_left', 'grasp_right',
        'ungrasp_left', 'ungrasp_right', 'twist', 'push', 'neutral', 'pull',
        'pinch', 'unpinch'
    ]

    training_data, training_current_action, training_next_action, testing_data, testing_current_action, testing_next_action = load_data(
        index_name)

    # model = Sequential()
    # model.add(Dense(64, input_dim=training_data.shape[1]+len(index_name), activation='relu'))
    # model.add(Dropout(0.3))
    # model.add(Dense(64, activation='relu'))
    # model.add(Dropout(0.3))
    # model.add(Dense(len(index_name), activation='softmax'))
    #
    # model.compile(loss='categorical_crossentropy',
    #               optimizer='adadelta',
    #               metrics=['accuracy'])
    #
    # model.fit(np.hstack((training_data, training_current_action)),
    #           training_next_action,
    #           nb_epoch=500,
    #           validation_split=0.2,
    #           batch_size=16)  # starts training
    #
    # model.save('model1.h5')
    # score = model.fit(np.hstack((testing_data, testing_current_action)), testing_next_action, batch_size=16)
    # print score

    left_branch = Sequential()
    left_branch.add(
        Dense(64, input_dim=training_data.shape[1], activation='relu'))
    left_branch.add(Dropout(0.3))
    left_branch.add(Dense(64, activation='relu'))
    left_branch.add(Dropout(0.3))
    left_branch.add(Dense(64, activation='relu'))
    left_branch.add(Dropout(0.3))
    left_branch.add(Dense(64, activation='relu'))

    right_branch = Sequential()
    right_branch.add(Dense(8, input_dim=len(index_name), activation='relu'))

    merged = Merge([left_branch, right_branch], mode='concat')

    model = Sequential()
    model.add(merged)
    model.add(Dense(32, activation='relu'))
    # model.add(Dense(32, activation='relu'))
    model.add(Dense(14, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    model.fit([training_data, training_current_action],
              training_next_action,
              nb_epoch=20000,
              validation_split=0.2,
              batch_size=16)  # starts training
    model.save('model2.h5')
    score = model.evaluate([testing_data, testing_current_action],
                           testing_next_action,
                           batch_size=16)
    print score
예제 #5
0
def predictOneShop_ANN_LSTM(shopid, all_data, trainAsTest=False):
    """
    用ANN预测某一个商店,2个网络分别模拟近期趋势和中期趋势,隐藏层合并,1层隐藏层,近期趋势用LSTM,速度慢,而且效果有时候也不好
    :param shopid: 预测商店id
    :param trainAsTest: 是否使用训练集后14天作为测试集
    :return:
    """
    part_data = all_data[all_data.shopid == shopid]
    last_14_real_y = None
    # 取出一部分做训练集
    if trainAsTest: #使用训练集后14天作为测试集的话,训练集为前面部分
        last_14_real_y = part_data[len(part_data) - 14:]["count"].values
        part_data = part_data[0:len(part_data) - 14]
    # print last_14_real_y
    verbose = 2
    rnn_nb_epoch = 10
    skipNum = 28
    day_backNum = 7
    sameday_backNum = 3
    week_backnum = 3
    learnrate = 0.01
    sameday = extractBackSameday(part_data, sameday_backNum, skipNum, nan_method_sameday_mean)
    day = extractBackDay(part_data,day_backNum,skipNum,nan_method_sameday_mean)
    count = extractCount(part_data, skipNum)
    train_x = getOneWeekdayFomExtractedData(sameday)
    train_x2 = getOneWeekdayFomExtractedData(day)
    train_y = getOneWeekdayFomExtractedData(count)
    other_features = [statistic_functon_mean,statistic_functon_median]
    # other_features = []
    for feature in other_features:
        value = getOneWeekdayFomExtractedData(extractBackWeekValue(part_data, week_backnum, skipNum, nan_method_sameday_mean, feature))
        train_x = np.append(train_x, value, axis=1)

    # '''添加周几'''
    # extract_weekday = getOneWeekdayFomExtractedData(extractWeekday(part_data, skipNum))
    # train_x = np.append(train_x, extract_weekday, axis=1)
    # ''''''

    '''将t标准化'''
    x_scaler = MinMaxScaler().fit(train_x)
    x2_scaler = MinMaxScaler().fit(train_x2)
    y_scaler = MinMaxScaler().fit(train_y)
    train_x = x_scaler.transform(train_x)
    train_x2 = x2_scaler.transform(train_x2)
    train_x2 = train_x2.reshape((train_x2.shape[0],
                                 train_x2.shape[1], 1))
    train_y = y_scaler.transform(train_y)
    '''标准化结束'''
    # train_x = train_x.reshape((train_x.shape[0],
    #                            train_x.shape[1], 1))
    model1 = Sequential()
    model2 = Sequential()
    final_model = Sequential()
    # print getrefcount(model1)
    model1.add(Dense(32, input_dim=train_x.shape[1], activation="sigmoid")) #sigmoid
    # model1.add(Dense(1, activation='linear'))


    '''近期趋势'''
    model2.add(LSTM(32, input_shape=(train_x2.shape[1],train_x2.shape[2]), activation="sigmoid"))


    final_model.add(Merge([model1, model2],mode="concat",concat_axis=1))
    final_model.add(Dense(1, activation='linear'))

    #, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)
    # print getrefcount(model1)
    # 设置优化器(除了学习率外建议保持其他参数不变)
    rms=RMSprop(lr=0.05)
    # sgd=SGD(lr=0.1, momentum=0.9, nesterov=True)
    final_model.compile(loss="mse", optimizer=rms)
    print final_model.summary()
    # print model1.summary()
    # print getrefcount(model1)
    # print model1.summary()
    final_model.fit([train_x, train_x2], train_y, nb_epoch=rnn_nb_epoch, batch_size=1, verbose=verbose)
    # print model1.get_weights()
    # part_counts = []
    # for i in range(7):
    #     weekday = i + 1
    #     part_count = getOneWeekdayFomExtractedData(count, weekday)
    #     part_counts.append(part_count)

    # print getrefcount(model1)
    format = "%Y-%m-%d"
    if trainAsTest:
        startTime = datetime.datetime.strptime("2016-10-18", format)
    else:
        startTime = datetime.datetime.strptime("2016-11-1", format)
    timedelta = datetime.timedelta(1)
    preficts = []
    for i in range(14):
        currentTime = startTime + timedelta * i
        strftime = currentTime.strftime(format)
        # index = getWeekday(strftime) - 1
        # part_count = part_counts[index]
        #取前{sameday_backNum}周同一天的值为特征进行预测
        part_data = part_data.append({"count":0, "shopid":shopid, "time":strftime, "weekday":getWeekday(strftime)}, ignore_index=True)
        x = getOneWeekdayFomExtractedData(extractBackSameday(part_data,sameday_backNum,part_data.shape[0] - 1, nan_method_sameday_mean))
        x2 = getOneWeekdayFomExtractedData(extractBackDay(part_data,day_backNum,part_data.shape[0]-1,nan_method_sameday_mean))
        for feature in other_features:
            x_value = getOneWeekdayFomExtractedData(extractBackWeekValue(part_data, week_backnum, part_data.shape[0]-1, nan_method_sameday_mean, feature))
            x = np.append(x, x_value, axis=1)
        # '''添加周几'''
        # x = np.append(x, getOneWeekdayFomExtractedData(extractWeekday(part_data, part_data.shape[0]-1)), axis=1)
        # ''''''

        x = x_scaler.transform(x)
        x2 = x2_scaler.transform(x2)
        x2 = x2.reshape((x2.shape[0],x2.shape[1],1))
        # for j in range(sameday_backNum):
        #     x.append(train_y[len(train_y) - (j+1)*7][0])
        # x = np.array(x).reshape((1, sameday_backNum))

        # print x
        # x = x.reshape(1, sameday_backNum, 1)
        predict = final_model.predict([x,x2])
        predict = y_scaler.inverse_transform(predict)[0][0]
        if(predict <= 0):
            predict == 1
        preficts.append(predict)
        part_data.set_value(part_data.shape[0]-1, "count", predict)
        # preficts.append(predict)
        # part_counts[index] = np.append(part_count, predict).reshape((part_count.shape[0] + 1, 1))
    preficts = (removeNegetive(toInt(np.array(preficts)))).astype(int)
    # preficts = np.array(preficts)
    if trainAsTest:
        last_14_real_y = (removeNegetive(toInt(np.array(last_14_real_y)))).astype(int)
        # print preficts,last_14_real_y
        print str(shopid)+',score:', scoreoneshop(preficts, last_14_real_y)
    return [preficts, last_14_real_y]