Example #1
0
from load import model
import torch
import numpy as np

new_user_input = torch.FloatTensor([0, 0, 0, 3, 1, 2])
output = model(new_user_input)

output = (output + 1)
print(output)

# 주어진 테이블
food_table = []

print("모델 Output")
print(output)

print("최고 별점 음식 추천")
print(np.sort(output.detach().numpy()))

print("최고 별점의 food id 리스트")
sort_food_id = np.argmax(output.detach().numpy())
print(sort_food_id)
Example #2
0
def AIModel(include, exclude):
    num_epochs = 150
    batch_size = 50

    learning_rate = 1e-3

    t1_data = pd.read_csv('./11/08.csv')

    nb_users = int(max(t1_data.iloc[:, 0])) + 1  # 사용자의 수 +1
    nb_foods = int(max(t1_data.iloc[:, 1])) + 1  # 음식 종류의 수 만약 인덱스 0부터 주었다면 +1

    t1_data = t1_data.values

    def convert(data):
        new_data = []
        for id_users in range(0, nb_users):  # 총 사용자 수많큼 반복해라
            id_foods = data[:, 1][data[:, 0] == id_users]  # user가 본 영화들
            id_foods = id_foods.astype(int)  # user가 본 영화들의 별점
            id_ratings = data[:, 2][data[:, 0] == id_users]
            ratings = np.zeros(nb_foods)  # 영화 숫자만큼 zero 배열 만들어줌
            ratings[
                id_foods] = id_ratings  # id_movies영화갯수 1부터 하려고 -1을 해줌/ id_movies - 1번째 영화 /ratings[id_movies - 1]: n번째 영화 별점이 몇점인지 쭉 나열
            ratings = ratings.astype(float)
            new_data.append(list(ratings))  # 전체영화 zero에 배열되있는것에 점수 넣어줌

        print('epoch [{}/{}], loss:{:.4f}'
              .format(epoch + 1, num_epochs, loss.item()))

        return new_data

    t2_data = convert(t1_data)
    t2_data = np.asarray(t2_data)

    tensor = torch.FloatTensor(t2_data)

    num_train_dataset = int(len(tensor) * 0.8)
    num_test_dataset = len(tensor) - num_train_dataset

    train_dataset, test_dataset = torch.utils.data.random_split(tensor, [num_train_dataset, num_test_dataset])

    dataloader = DataLoader(tensor, batch_size=batch_size, shuffle=True)

    aaa = []
    lists = include
    print(lists)
    arr = [0 for i in range(1, 310)]
    count = 0
    for i in lists:
        if count < 4:
            arr[i] = 5
        elif count < 8:
            arr[i] = 3
        elif count < 12:
            arr[i] = 1
        count += 1

    aaa.append(arr)
    bb = torch.FloatTensor(aaa)

    new_user_input = bb
    output = model(new_user_input)

    output = (output + 1)

    sort_food_id = np.argsort(-output.detach().numpy())
    sort_food_id_list = sort_food_id.tolist()
    food_real_list = np.ravel(sort_food_id_list, order='C').tolist()

    file = pd.read_excel('food_label.xlsx')
    rm_list = set()
    list_remove = exclude

    for j in list_remove:
        for i in range(309):
            if file[j][i] == 1:
                rm_list.add(file['f_num'][i])

    rm_real_list = list(rm_list)

    for i in food_real_list:
        if i in rm_list or i in include:
            food_real_list.remove(i)

    top_10 = food_real_list[:10]

    count1 = 2
    sampleList1 = include
    random_list1 = random.sample(sampleList1, count1)

    count = 4
    sampleList = top_10
    random_list2 = random.sample(sampleList, count)

    final_list = random_list1 + random_list2

    return final_list
Example #3
0
                         action='store',
                         default=0.001,
                         type=float)
hyper_param.add_argument('-u',
                         '--hidden_units',
                         default=3136,
                         dest='u',
                         type=int)
hyper_param.add_argument('-e', '--epochs', dest='e', default=8, type=int)

args = parser.parse_args()

data_d = args.inputDirectory
architec = args.arch
lr = args.l
hidden_units = args.u
epochs = args.e
gpu = args.gpu
path = args.save_dir

train_image_datasets, test_image_data_sets, valid_image_datasets, trainloaders, testloaders, validloaders = load.arg_inputdir(
    args.inputDirectory)
pretrained_model, output_size, i = model.arch(architec)
pretrained_model, criterion, optimizer = model.classifier(
    pretrained_model, output_size, i, lr, hidden_units)
load.training_model(pretrained_model, criterion, optimizer, epochs, gpu)
load.validation_test(pretrained_model, testloaders)
complete_path = load.set_checkpoint_path(path)
load.save_model(pretrained_model, complete_path, architec)
load.model(architec)
# pretrained_model = load.load_model(pretrained_model,'checkpoint.pth')
Example #4
0
            nn.ReLU(True),
            nn.Linear(200, 309), nn.Tanh())

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x


model = autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-3)

for epoch in range(num_epochs):
    for data in dataloader:
        output = model(data)
        loss = criterion(output, data)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


def AIModel(include, exclude):
    num_epochs = 150
    batch_size = 50

    learning_rate = 1e-3

    t1_data = pd.read_csv('./11/08.csv')
Example #5
0
def AiModel():
    body = request.get_json(force=True)
    output = model(FloatTensor(body['foods'])).detach().numpy()
    # result를 가공하는 작업
    result = [1, 2, 3, 4]
    return jsonify({'recommendation': result})