def train(self_, X, y, solution=None, iterations=20, batchSize=64, kerasArgs={}, **kwArgs): shaper = solution.shaper() if solution else ma.DataShaper(X, **kwArgs) sampleSize = int(sqrt(shaper.feature_count())) model = kwArgs.get( "model", KerasSolver.__prepare_model(shaper, y, solution, **kwArgs)) y = shaper.map_labels(y) y = keras.utils.to_categorical(y, num_classes=shaper.class_count()) X = shaper.conform(X, addOnes=False) X = X.reshape(X.shape[0], sampleSize, sampleSize, 1) X = X.astype('float32') with NoWarnings(): model.fit(X, y, batch_size=batchSize, epochs=iterations, **kerasArgs) return ma.Solution(model=model, shaper=shaper)
def train(self_, X, y, solution=None, Lambda=0, iterations=50, **kwArgs): shaper = solution.shaper() if solution else ma.DataShaper(X, **kwArgs) debug = kwArgs.get("debug", False) topology, theta = NeuralNetworkSolver.__initial_theta( shaper, y, solution, **kwArgs) y = shaper.map_labels(y) X = shaper.conform(X, addOnes=False) theta = optimize.fmin_cg(compute_cost, theta, fprime=compute_grad, args=(X, y, topology, shaper.class_count(), Lambda, debug), maxiter=iterations, disp=False) return ma.Solution(model=(topology, theta), shaper=shaper)
def train(self_, X, y, solution=None, Lambda=0, iterations=50, **kwArgs): shaper = solution.shaper() if solution else ma.DataShaper(X, **kwArgs) theta = LogisticRegressionSolver.__initial_theta(shaper, y, solution=solution, **kwArgs) y = shaper.map_labels(y) X = shaper.conform(X) for c in range(shaper.class_count()): theta[c] = optimize.fmin_cg(compute_cost, theta[c], fprime=compute_grad, args=(X, (y == c), Lambda), maxiter=iterations, disp=False) return ma.Solution(model=theta, shaper=shaper)
def train(args): solver = preferredEngine() Lambda = list(map(float, args.Lambda.split(","))) rawData = dio.load(args.data_set) solution = None try: solution = dio.load(args.solution) except: pass X_orig = rawData["X"] y_orig = rawData["y"] args.engine = solver.__class__.__name__ optimizationResults = ma.find_solution(solver, X_orig, y_orig, solutionInit=solution, showFailureRateTrain=True, optimizationParams={ "nnTopology": args.topology, "Lambda": Lambda }, files=[args.data_set], log={"log_file_name": "mlak.log"}, **vars(args)) solution = optimizationResults.solution dio.save(args.solution, solution) if args.debug: # pragma: no cover print("solution = {}".format(solution)) if args.verbose and solution.shaper().is_classifier(): # pragma: no cover for i in range(len(y_orig)): if not predict(solver, solution, X_orig, y_orig, i): return
def train(self_, X, y, solution=None, Lambda=0, iterations=50, **kwArgs): shaper = solution.shaper() if solution else ma.DataShaper(X, **kwArgs) theta = LinearRegressionSolver.__initial_theta(shaper, solution, **kwArgs) X = shaper.conform(X) theta = oa.gradient_descent_fminCG(oa.Algorithm( compute_cost, compute_grad), X, y, theta, iterations, Lambda, disp=False) return ma.Solution(model=theta, shaper=shaper)
def analyze(args): # pragma: no cover solver = preferredEngine() rawData = dio.load(args.data_set) X_orig = rawData["X"] y_orig = rawData["y"] args.engine = solver.__class__.__name__ analyzerResults = ma.analyze(solver, X_orig, y_orig, optimizationParams={ "nnTopology": args.topology, "Lambda": args.Lambda, "functions": None }, files=[args.data_set], **vars(args)) fig = plt.figure(1) plt.rc('grid', linestyle=":", color='gray') plt.subplot(211) plt.title("data: {}\nlambda: {}\n\n".format(args.data_set, args.Lambda), loc="left") plt.title("Sample count test\n") plt.xlabel("Sample count") plt.ylabel("Error rate") plt.plot(analyzerResults.sampleCountAnalyzis.sampleCount, analyzerResults.sampleCountAnalyzis.errorTrain, 'b-', label="train") plt.plot(analyzerResults.sampleCountAnalyzis.sampleCount, analyzerResults.sampleCountAnalyzis.errorCV, 'g-', label="CV") plt.grid() plt.legend() plt.subplot(212) plt.title("Iteration count test") plt.xlabel("Iteration count") plt.ylabel("Error rate") plt.plot(analyzerResults.iterationCountAnalyzis.iterationCount, analyzerResults.iterationCountAnalyzis.errorTrain, 'b-', label="train") plt.plot(analyzerResults.iterationCountAnalyzis.iterationCount, analyzerResults.iterationCountAnalyzis.errorCV, 'g-', label="CV") plt.grid() plt.legend() def on_resize(event): plt.tight_layout() plt.subplots_adjust(right=0.95) cid = fig.canvas.mpl_connect("resize_event", on_resize) plt.show()
def examine(self_, solution, X): yp = self_.__examine(solution, X) return ma.label_confidence(solution.shaper(), yp)
def examine(self_, solution, X): X = solution.shaper().conform(X) yp = predict_one_vs_all(X, solution.model()) return ma.label_confidence(solution.shaper(), yp)