示例#1
0
def test_fixed_file_is_valid():
    """Tests a fixed file is valid comparing the total width with expected
    """
    myParser = MyParser(config.SPEC_FILE)
    myParser.generate_fixed_file()
    total_offset = sum([int(off) for off in myParser.specs["Offsets"]])
    assert os.path.exists(config.OUTPUT_PATH+config.FIXED_FILENAME)

    if os.path.exists(config.OUTPUT_PATH+config.FIXED_FILENAME):
        with open(config.OUTPUT_PATH+config.FIXED_FILENAME, mode="r",encoding=myParser.specs["FixedWidthEncoding"]) as inp:
            for line in inp:
                assert len(line.replace(config.BREAKLINE, ""))==total_offset
示例#2
0
def test_delimited_file_is_valid():
    """Tests a delimited file is valid comparing number of columns in every line with expeced
    """
    myParser = MyParser(config.SPEC_FILE)
    myParser.generate_delimited_file(source_filename=config.OUTPUT_PATH+config.FIXED_FILENAME)
    total_columns = len(myParser.specs["ColumnNames"])
    assert os.path.exists(config.OUTPUT_PATH+config.DELIMITED_FILENAME)

    if os.path.exists(config.OUTPUT_PATH+config.DELIMITED_FILENAME):
        with open(config.OUTPUT_PATH+config.DELIMITED_FILENAME, mode="r",encoding=myParser.specs["FixedWidthEncoding"]) as inp:
            delimited_reader = csv.reader(inp, delimiter=config.DEFAULT_DELIMITER, skipinitialspace=True)
            for line in delimited_reader:
                assert len(line)==total_columns

# TODO: Test encodings
def main():
    import tb
    tb.color()

    parser = MyParser(model_names).parser

    global args, best_prec1
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                world_size=args.world_size)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
        fc_features = model.fc.in_features
        model.fc = nn.Linear(fc_features, args.classes)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.gpu is not None:
        model = model.cuda(args.gpu)
    elif args.distributed:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)
    else:
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    train_s = 'train'
    test_s = 'test'
    if args.reverse:
        train_s = 'test'
        test_s = 'train'
    traindir = os.path.join(args.data, train_s)
    valdir = os.path.join(args.data, test_s)
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = MyFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ],))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(
        MyFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = 0
        if args.reverse:
            if (epoch+1)%2 == 0:
                prec1 = validate(val_loader, model, criterion)
        else:
            prec1 = validate(val_loader, model, criterion)
        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
        }, is_best)

    print('\n', '*' * 50)
    print('\tBest: {:.3f}'.format(best_prec1))
示例#4
0
from lexer import MyLexer
from parser import MyParser

if __name__ == "__main__":

    lexer = MyLexer()
    parser = MyParser()

    while True:

        text = input('our language > ')

        if text:

            lex = lexer.tokenize(text)
            #for token in lex:
            #   print(token)
            tree = parser.parse(lex)
            print(tree)
示例#5
0
import sys
from parser import MyParser
from check_and_eval import typecheck_expression, evaluate_expression
from term_defs import Seq


if __name__ == "__main__":
    parser = MyParser()
    print("Parser setup finished")

    for arg in sys.argv[1:]:
        print("Interpreting file: ", str(arg))
        with open(arg, 'r') as myfile:
            data=myfile.read()
            print(data)
            parsed_data = parser.parse(data)
            print(parsed_data)
            for expression in parsed_data:
                print("")
                _type = typecheck_expression(expression, dict())
                print("Expression:")
                print(" ", expression)
                print("")
                print("Typecheck:")
                print(" ", _type)
                print("")
                _eval = evaluate_expression(expression)
                print("Evaluation:")
                print(" ", _eval)
                print("")
示例#6
0
logger = logging.getLogger(__name__)

if __name__ == '__main__':
    # Get the desired option from user using an argument parser from command line
    # only spcs is required, delimiter and numlines ca be defined with default
    arg_parser = argparse.ArgumentParser()

    # enforce the user to use a specs file
    arg_parser.add_argument("-s", "--spec", required=True, help="Specs")

    # since delimiter is not in specs, let the user to pass it as an argument
    arg_parser.add_argument("-d", "--delimiter", required=False, help="Delimiter")

    # since delimiter is not in specs, let the user to pass it as an argument
    arg_parser.add_argument("-n", "--numlines", required=False, help="Number of lines in file")

    args = vars(arg_parser.parse_args())

    spec_filename = args["spec"]
    delimiter = args["delimiter"]
    
    if args["numlines"]:
        numlines = args["numlines"]
    
    if os.path.exists(spec_filename):
        myParser = MyParser(spec_filename)
        fixed_filename = myParser.generate_fixed_file()
        delimited_filename = myParser.generate_delimited_file(fixed_filename)
    else:
        logger.info("No valid specs file found (%s). Nothing to do! :(",spec_filename)
示例#7
0
from code_generation import MyCodeGenerator
from lexer import MyLexer
from parser import MyParser
from semantic import MySemanticAnalyzer
import sys

if __name__ == "__main__":

    _mylexer = MyLexer()
    _myparser = MyParser()

    if len(sys.argv) > 1:
        _file = sys.argv[1]
        _cool_program = open(_file, encoding="utf-8").read()
        try:
            _mylexer_result = _mylexer.tokenize(_cool_program)
        except:
            pass

        if _mylexer.errors:
            print(_mylexer.errors[0])
            exit(1)
        try:
            myAst = _myparser.parse(_cool_program)
        except:
            pass

        if _myparser.errors:
            print(_myparser.errors[0])
            exit(1)
示例#8
0
def test_delimited_file_exists():
    """Tests a delimited file was generated
    """
    myParser = MyParser(config.SPEC_FILE)
    myParser.generate_delimited_file(source_filename=config.OUTPUT_PATH+config.FIXED_FILENAME)
    assert os.path.exists(config.OUTPUT_PATH+config.DELIMITED_FILENAME)
示例#9
0
def test_fixed_file_exists():
    """Tests a fixed file was generated
    """
    myParser = MyParser(config.SPEC_FILE)
    filename = myParser.generate_fixed_file()
    assert os.path.exists(filename)
示例#10
0
def test_invalid_spec():
    """Tests an invalid specs. E.g. non numeric offsets or different number of columns and offsets
    """
    filename = "invalid_spec.json"
    myParser = MyParser(filename)
    assert not myParser.specs
示例#11
0
def test_no_spec():
    """Tests no specs file avilable
    """
    filename = "nospec.json"
    myParser = MyParser(filename)
    assert not myParser.specs
示例#12
0
def run():
    bytecode = Bytecode()

    lexer = MyLexer()
    parser = MyParser(bytecode)

    code = '''
    var x = 1 - 2 - 3;
    var y = 1 + 2 + 3 + x;
    '''

    #result = parser.parse(lexer.tokenize(code))

    # Initialize the counter
    bytecode.emit(Instruction.PUSH)
    bytecode.emit(0)
    bytecode.emit(Instruction.STORE)
    bytecode.emit(0)

    # Initalize the value
    bytecode.emit(Instruction.PUSH)
    bytecode.emit(10)

    bytecode.emit(Instruction.DUP)

    # Branch if the value is 0
    bytecode.emit(Instruction.BRZ)
    bytecode.emit(12)

    # Loads the counter
    bytecode.emit(Instruction.LOAD)
    bytecode.emit(0)

    # Adds one to the counter
    bytecode.emit(Instruction.PUSH)
    bytecode.emit(1)

    bytecode.emit(Instruction.ADD)

    # Stores the counter again
    bytecode.emit(Instruction.STORE)
    bytecode.emit(0)

    # Pushes 2 on the stack
    bytecode.emit(Instruction.PUSH)
    bytecode.emit(2)

    # Subtracts 2 from the value
    bytecode.emit(Instruction.SUB)

    # Jumps back to the top
    bytecode.emit(Instruction.JMP)
    bytecode.emit(-15)

    bytecode.emit(Instruction.HALT)

    bytecode.dumpinstructions()

    vm = VM(bytecode)
    vm.run()

    print("stack:", vm.stack)
    print("storage:", vm.storage)