def main(): # Title st.title("MLAI: An Integrated Software platform for AI Automation") # Sidebar activities = [ "Home", "Dataset Explorer", "ML Classifiers", "ML Regression", "Text Summarizer" ] choice = st.sidebar.selectbox("Choose Activity", activities) if choice == "Home": st.header( 'Empowering companies to jumpstart AI and generate real-world value' ) st.subheader( 'Use exponential technologies to your advantage and lead your industry with confidence through innovation.' ) image = Image.open('images/img0.jpg') st.image(image, use_column_width=True, caption='Data Mining') if choice == "Dataset Explorer": st.subheader("Dataset Explorer") dataset_analysis.main() if choice == "ML Classifiers": classification.main() if choice == "ML Regression": regresion.main() if choice == "Text Summarizer": text_summ.main()
def main(): import classification from util import reLu, reLu_derivative prefix = 'projekt1-oddanie/clasification/data' modes = ['circles'] quantities = [500, 1000] create_nn = [[1000]] # (sigmoid, sigmoid_derivative, 'sigmoid'), activation = [(reLu, reLu_derivative, 'reLu')] for mode in modes: for quantity in quantities: def get_filename(t): return prefix + '.' + mode + '.' + t + '.' + str( quantity) + '.csv' train_filename = get_filename('train') test_filename = get_filename('test') n_epochs = 10000 for nn in create_nn: for ff in activation: activation_f, activation_f_derivative, name_f = ff save_nn = name_f + '1-oddanie/classification.' + mode + '.' + str( quantity) + str(nn) print(save_nn) # activation_f, activation_f_derivative = reLu, reLu_derivative classification.main(train_filename, test_filename, nn, save_nn, None, n_epochs, n_epochs / 10, 0.001, True, activation_f, activation_f_derivative)
def test_classification(model_name: str, batch_size: int): testargs = f""" classification.py {model_name} --batch_size {batch_size} """.split() with patch.object(sys, "argv", testargs): classification.main()
def predict_classify(): collection = request.get_json()['collection'] features = request.get_json()['features'] target = request.get_json()['target'] inputType = request.get_json()['inputType'] ml_result = classification.main(collection, features, target, inputType) return jsonify({ "Linear_SVM": ml_result['Linear_SVM'].tolist(), "RandomForest": ml_result['RandomForest'].tolist(), "DecisionTree": ml_result['DecisionTree'].tolist(), "Adaptive_GB": ml_result['Adaptive_GB'].tolist(), "files": ml_result.index.tolist() })
def main(): # Title st.title("AlphaAI") # Sidebar activities = [ "Home", "Dataset Explorer", "ML Classifiers", "ML Regression", "News Classification", "Text Summarizer", "Real World Data Distribution", "Vision API" ] choice = st.sidebar.selectbox("Choose Activity", activities) if choice == "Home": st.header( 'Empowering companies to jumpstart AI and generate real-world value' ) st.subheader( 'Use exponential technologies to your advantage and lead your industry with confidence through innovation.' ) image = Image.open('images/img0.jpg') st.image(image, use_column_width=True, caption='Data Mining') if choice == "Dataset Explorer": st.subheader("Dataset Explorer") dataset_analysis.main() if choice == "Real World Data Distribution": geo_climate.main() if choice == "ML Regression": regression.main() if choice == "ML Classifiers": classification.main() if choice == "Vision API": vision_api.main() if choice == "Text Summarizer": text_summ.main() if choice == "News Classification": newsclass.main()
print('[-] Zero values detected!') print('Number of missing values in original dataset: ' + str(read_input.isnull().sum().sum())) print('[+] Creating dataset with predicted missing values:') predict_missing(read_input) print('[+] Missing values predicted') read_input.dropna(inplace=True) #Anzahl der Datensätze und Merkmale zählen print('Checking instances and dimensionality:') instances = read_input.shape[0] dimensions = read_input.shape[1] print('[+] number of instances: ' + str(instances)) print('[+] number of dimensions: ' + str(dimensions)) #Kodieren kategorieller Daten le = LabelEncoder() for elem in read_input.columns: read_input[elem] = le.fit_transform(read_input[elem]) #Schreiben der bearbeiteten Daten in CSV read_input.to_csv('mushrooms_encoded.csv', ',', encoding='utf-8') print('[+] Removed rows with missing values from original dataset') #Verteilung der Klassen errechnen print('Checking distribution of classes') print(read_input['class'].value_counts(True)) print('[+] Data transformation completed succesfully\n') if __name__ == '__main__': main() feature_selection.main() classification.main()
#!/usr/bin/env python if __name__ == '__main__': # Add src to $PYTHONPATH: from os.path import dirname, abspath from sys import path path.append(dirname(abspath(__file__)) + '/src/') # run classification pipeline from classification import main main()
def main(): import argparse parser = argparse.ArgumentParser(description='Neural Network framework.') parser.add_argument( 'action', choices=['regression', 'classification'], help='Choose mode either \'regression\' or \'classification\'.') parser.add_argument( 'activation', choices=['sigmoid', 'relu', 'tanh'], help='Choose mode either \'sigmoid\' or \'relu\' or \'tanh\'.') parser.add_argument('--train_filename', type=str, help='Name of a file containing training data', required=False) parser.add_argument('--test_filename', type=str, help='Name of a file containing testing data') parser.add_argument( '--create_nn', nargs='*', type=int, help= 'When creating a nn from scratch; number of neurons for each layer', required=False) parser.add_argument('--save_nn', type=str, help='Name of a file to save trained model to.') parser.add_argument('--savefig_filename', type=str, help='Name of a file to save plot to.') parser.add_argument('-e', '--number_of_epochs', type=int, help='Number of epochs (iterations) for the NN to run', required=False, default=10000) parser.add_argument('--read_nn', type=str, help='When reading existing nn from a file; filename') parser.add_argument( '-v', '--visualize_every', type=int, help='How ofter (every n iterations) print neuron\'s weights.', required=False) parser.add_argument('--l_rate', type=float, help='Learning rate', required=False, default=0.001) parser.add_argument('--seed', type=int, help='Random seed int', required=False, default=1) parser.add_argument('--biases', dest='biases', action='store_true') parser.add_argument('--no_biases', dest='biases', action='store_false') parser.set_defaults(biases=True) args = parser.parse_args() # Seed the random number generator random.seed(args.seed) if args.create_nn is None and args.read_nn is None: print('Either \'--create_nn\' or \'--read_nn\' has to be provided.') exit(1) if args.train_filename is None and args.save_nn is not None: print( '\'--save_nn\' cannot be provided when \'--train_filename\' is not provided.' ) exit(1) if args.train_filename is None and args.create_nn is not None: print( '\'--create_nn\' cannot be provided when \'--train_filename\' is not provided.' ) exit(1) if args.activation == 'sigmoid': from util import sigmoid, sigmoid_derivative activation_f, activation_f_derivative = sigmoid, sigmoid_derivative elif args.activation == 'relu': from util import reLu, reLu_derivative activation_f, activation_f_derivative = reLu, reLu_derivative elif args.activation == 'tanh': from util import tanh, tanh_derivative activation_f, activation_f_derivative = tanh, tanh_derivative else: print( 'Sorry, second positional argument has to be either \'sigmoid\' or \'relu\' or \'tanh\'.' ) exit(1) if args.action == 'regression': import regression regression.main(args.train_filename, args.test_filename, args.create_nn, args.save_nn, args.read_nn, args.number_of_epochs, args.visualize_every, args.l_rate, args.savefig_filename, activation_f, activation_f_derivative) elif args.action == 'classification': import classification classification.main(args.train_filename, args.test_filename, args.create_nn, args.save_nn, args.read_nn, args.number_of_epochs, args.visualize_every, args.l_rate, args.biases, activation_f, activation_f_derivative) else: print( 'Sorry, first positional argument has to be either \'regression\' or \'classification\'.' ) exit(1)
def main(): part1.main() # Problem 1 and Problem 3.1 classification.main() # Problem 2 part3.main() # Problem 3.2