import os, time from data.readfile import * cwd = os.getcwd() grid = readfile(f'{cwd}\\data') print(*grid) from processing_py import * # pip install processing-py from data.block_class import * from data.update import * def delay(seconds: float): time.sleep(seconds) delta = 0 rez = 150 def showDir(pos, i, j): PI = 3.14 a = pos.dir app.pushMatrix() app.translate(i, j) if a[1] == 1: app.rotate(PI) app.rotate(PI / 2 * a[0]) app.translate(0, -65) app.ellipse(0, 0, 20, 20) app.popMatrix()
prices = prices[inbudget] cases = cases[inbudget] indexmaxutil = np.argmax(utils) return cases[indexmaxutil], prices[indexmaxutil] bestcase, price = best_case(cases, utils, prices, budget) print('Ayt Miss. Here is your ${} meal:'.format(price)) for x in range(len(bestcase)): if bestcase[x] > 0: print(' - {}: {} units'.format(products[x].name, bestcase[x])) nutrients = np.matmul(bestcase, v)[0:3] msg = 'It gives you' for i in range(len(nutrients)): msg = '%s %.0f%% %s' % (msg, nutrients[i], labels[i]) msg = msg + ' of your daily intake' print(msg) if __name__ == '__main__': # read product data products, labels = data.readfile('datasets/starmarket.csv') budget = input('What is your max daily budget in dollars?\n>>') budget = float(budget) calories = input('What is your desired daily calorie intake?\n>>') calories = float(calories) desiredpercent = (calories / 2000) * 100 # BRUTE FORCE POWER brute_force(products, labels, budget, desiredpercent)
# N_NEGS = 10 N_EPOCHS = 1 REPORT_EVERY = 1 LEARNING_RATE = 0.01 N_GRAM = 5 # The length of the center word toy_corpus = [ 'You may work either independently or in a group', 'We have five suggestions for topics and practical projects', 'We will provide datasets for each practical project', 'You can also choose your own topic and suggest a project or choose and existing topic and suggest your own project based on the topic' ] # CORP = toy_corpus CORP = readfile('MeBo-123.2015_stamd.txt') #-- model --# class SGNS(nn.Module): #Skipgram (without negative sampling for now) def __init__(self, embedding_dim, vocab_size): super(SGNS, self).__init__() self.embedding_dim = embedding_dim self.vocab_size = vocab_size self.embed = nn.Embedding(vocab_size, embedding_dim) self.linear = nn.Linear(vocab_size * embedding_dim, vocab_size) # Is this input size correct? def forward(self, x): out = self.embed(x)
cntr(np.array): centers of the fuzzy clustering, dimension [cluster_num feature_num] X (np.array): data of the patients, dimension [data_num feature_num] label (pd.DataFrame): ground truths of the data, multilabels, dimension [data_num label_num] labelname (string): string of the label name being evaluated Return: score of the fuzzy clustering """ if score_type == 'FU': return normFuzzPartScore(u) elif score_type == 'partition index': return _partitionIndex(X, cntr, u) elif score_type == 'separation index': return _separationIndex(X, cntr, u) else: print("No such score type") if __name__ == '__main__': from data import readfile data, label = readfile() print('data shape', data.shape) print('label shape', label.shape) print(data.iloc[:10, :2]) X = data.drop(['Name'], axis=1).values print(X.shape)
import data import solve import newton import numpy as np parser = argparse.ArgumentParser(description='It is a program for ML HW#1.') parser.add_argument('file_path', help='file path of input', type=str) parser.add_argument('base', help='the number of polynomial bases', type=int) parser.add_argument('rate', help='rate of regulation', type=float) args = parser.parse_args() print('file_path: {}'.format(args.file_path)) print('base: {}'.format(args.base)) print('rate: {}'.format(args.rate)) (x, y) = data.readfile(args.file_path) assert len(x) == len(y) print('-'*30) print('x: {}'.format(x)) # print(y) weight_LSE, error_LSE = solve.LSE(x, y, args.base, args.rate) print('-'*30) print('weight of LSE: \n{}'.format(weight_LSE)) print('error of LSE: \n{}'.format(error_LSE)) weight_NT, error_NT = newton.optimize(x, y, args.base) print('-'*30) print('weight of NT: \n{}'.format(weight_NT)) print('error of NT: \n{}'.format(error_NT))