def test_novak(): # expect (1.5, 1.5) f = Function(lambda x: (x - 4).dot(x - 4), lambda x: 2 * (x - 4)) g1 = Function(lambda x: x[0] - 5, lambda x: np.array([1, 0])) g2 = Function(lambda x: x.sum() - 3, lambda x: np.array([1, 1])) x0 = np.zeros(2) return penalty_optim(f, [g1], [g2], solver, x0, alpha_gen(), disp=True)
def test_medium(): # expect (0, 0) f = Function(lambda x: x[0]**2 + x[0] * 6 + x[1]**2 + x[1] * 9, lambda x: 2 * x + np.array([6, 9])) g1 = Function(lambda x: -x[0], lambda x: np.array([-1, 0])) g2 = Function(lambda x: -x[1], lambda x: np.array([0, -1])) x0 = np.array([-1, 0.5]) return penalty_optim(f, [g1, g2], [], solver, x0, alpha_gen(), disp=True)
def test_impossible(): # expect to diverge f = Function(lambda x: np.exp(-x[0]), lambda x: -np.exp(-x[0])) g = Function(lambda x: x[0] * np.exp(-x[0]), lambda x: (1 - x[0]) * np.exp(-x[0])) x0 = np.array([1]) return penalty_optim(f, [], [g], solver, x0, alpha_gen(), eps=1e-20, disp=True)
def test(): def name(string): return string.split('.') p = Package(name('LLVM.Core'), [ Function('Module_Create_With_Name', 'Module', [ Argument('ModuleID', 'Interfaces.C.Strings.chars_ptr'), ], []), Function('Get_Data_Layout', 'Interfaces.C.Strings.chars_ptr', [ Argument('M', 'Module'), ], []), Procedure('Set_Data_Layout', [ Argument('M', 'Module'), Argument('Triple', 'Interfaces.C.Strings.chars_ptr'), ], []), Procedure('Set_Some_Flag', [ Argument('M', 'Module'), Argument('Flag', 'Bool_T'), ], []), Function('Get_Some_Flag', 'Bool_T', [ Argument('M', 'Module'), ], []), ]) print('-- Specification') print('with Interfaces.C.Strings;') print('') print('package {} is'.format(fmt_name(p.name))) print('') for elt in p.elements: for line in generate_decl(elt): print(' {}'.format(line)) print('') print('end {};'.format(fmt_name(p.name))) print('') print('-- Body') for line in generate_body(p): print(line)
def subp_tuple(subp_decl): sloc = subp_decl.sloc_range loc = ((sloc.start.line, sloc.start.column), (sloc.end.line, sloc.end.column)) subp_spec = subp_decl.f_subp_spec aspect_decl = get_aspects(subp_decl) aspects = (aspect_decl.f_aspects.f_aspect_assocs if aspect_decl.f_aspects else []) if subp_spec.f_subp_kind.is_a(lal.SubpKindProcedure): subp_tuple = Procedure(subp_spec.f_subp_name.text, arguments_array(subp_spec), aspects) else: subp_tuple = Function(subp_spec.f_subp_name.text, get_return_str(subp_spec), arguments_array(subp_spec), aspects) return subp_decl, subp_tuple, loc
def __init__(self, tree: ast.Program, stdlib: Optional[Dict[str, Function]] = None): self.current_funcs = [] self.functions = {} if stdlib is None else stdlib self.function_nodes = {} self.program = ast.Program(tree.name, []) for node in tree.instructions: if isinstance(node, ast.Func): name = node.name if name in self.function_nodes: raise CodeGenError( f'line {node.line}: function \'{name}\' defined twice') self.function_nodes[name] = node self.functions[name] = Function(len(node.args), node.return_type) else: self.program.instructions.append(node) self.var_map = [{}] self.stack_ptr = 0
from random import seed, uniform, random import matplotlib.pyplot as plt from perceptron import Perceptron from utils import Function, generatePoints, generateY iterations = [] for i in range(0, 1000): #Gerando funcao aleatoria func = Function() # func.set(1, 0) x0 = uniform(-1,1) y0 = uniform(-1,1) x1 = uniform(-1,1) y1 = uniform(-1,1) func.buildFromPoints( x0, y0, x1, y1) # func._print() #Gerando pontos aleatorios com base na funcao X = generatePoints(100) y = generateY(func, X) #Treinando modelo com perceptron perc = Perceptron() perc.train(X, y)
from random import seed, uniform, random from utils import Function, generatePoints, generateY import numpy as np import matplotlib.pyplot as plt from perceptron import Perceptron def linear_regression(X, y): X_pseudo_inverse = np.linalg.pinv(X) w = X_pseudo_inverse.dot(y) return w func = Function() x0 = uniform(-1, 1) y0 = uniform(-1, 1) x1 = uniform(-1, 1) y1 = uniform(-1, 1) func.buildFromPoints(x0, y0, x1, y1) # func._print() #Gerando pontos aleatorios com base na funcao X = generatePoints(100) X_with_x0 = [[1] + x for x in X] y = generateY(func, X) w = linear_regression(X_with_x0, y) print(w) perc = Perceptron()
def test_what(): # expect to reach limit in function calls f = Function(lambda x: x[0]**9, lambda x: 9 * x[0]**8) g = Function(lambda x: 100000 - x[0], lambda x: np.array([-1])) x0 = np.array([0]) return penalty_optim(f, [g], [], solver, x0, alpha_gen(), disp=True)
def test_simple(): # expect (1) f = Function(lambda x: x[0]**2, lambda x: x[0] * 2) g = Function(lambda x: 1 - x[0], lambda x: np.array([-1])) x0 = 10 return penalty_optim(f, [g], [], solver, x0, alpha_gen(), disp=True)