コード例 #1
0
ファイル: test_all.py プロジェクト: kwang1971/miasm
#! /usr/bin/env python2

import argparse
import time
import os
import tempfile

from utils.test import Test
from utils.testset import TestSet
from utils import cosmetics, multithread
from multiprocessing import Queue

testset = TestSet("../")
TAGS = {"regression": "REGRESSION", # Regression tests
        "example": "EXAMPLE", # Examples
        "long": "LONG", # Very time consumming tests
        "llvm": "LLVM", # LLVM dependency is required
        "tcc": "TCC", # TCC dependency is required
        "gcc": "GCC", # GCC based tests
        "z3": "Z3", # Z3 dependecy is needed
        "qemu": "QEMU", # QEMU tests (several tests)
        "cparser": "CPARSER", # pycparser is needed
        }

# Regression tests
class RegressionTest(Test):
    """Regression tests specificities:
    - @base_dir: test/@base_dir
    - @tags: TAGS["regression"]"""

    sample_dir = os.path.join("..", "samples")
コード例 #2
0
ファイル: test_all.py プロジェクト: Lukas-Dresel/miasm
import argparse
import time
import os
import tempfile

from utils.test import Test
from utils.testset import TestSet
from utils import cosmetics, multithread
from multiprocessing import Queue

testset = TestSet("../")
TAGS = {"regression": "REGRESSION", # Regression tests
        "example": "EXAMPLE", # Examples
        "long": "LONG", # Very time consumming tests
        "llvm": "LLVM", # LLVM dependency is required
        "tcc": "TCC", # TCC dependency is required
        "z3": "Z3", # Z3 dependecy is needed
        "qemu": "QEMU", # QEMU tests (several tests)
        }

# Regression tests
class RegressionTest(Test):
    """Regression tests specificities:
    - @base_dir: test/@base_dir
    - @tags: TAGS["regression"]"""

    sample_dir = os.path.join("..", "samples")

    def __init__(self, *args, **kwargs):
        super(RegressionTest, self).__init__(*args, **kwargs)
        self.base_dir = os.path.join("test", self.base_dir)
コード例 #3
0
import argparse
import time
import os

from utils.test import Test
from utils.testset import TestSet
from utils import cosmetics, monothread, screendisplay

testset = TestSet("../")
TAGS = {
    "regression": "REGRESSION",  # Regression tests
    "example": "EXAMPLE",  # Examples
    "long": "LONG",  # Very time consumming tests
    "llvm": "LLVM",  # LLVM dependency is required
    "z3": "Z3",  # Z3 dependecy is needed
}


# Regression tests
class RegressionTest(Test):
    """Regression tests specificities:
    - @base_dir: test/@base_dir
    - @tags: TAGS["regression"]"""
    def __init__(self, *args, **kwargs):
        super(RegressionTest, self).__init__(*args, **kwargs)
        self.base_dir = os.path.join("test", self.base_dir)
        self.tags.append(TAGS["regression"])


## Architecture
testset += RegressionTest(["x86/arch.py"],
コード例 #4
0
ファイル: test_all.py プロジェクト: blaquee/miasm
import argparse
import time
import os
import tempfile

from utils.test import Test
from utils.testset import TestSet
from utils import cosmetics, monothread, screendisplay

testset = TestSet("../")
TAGS = {"regression": "REGRESSION", # Regression tests
        "example": "EXAMPLE", # Examples
        "long": "LONG", # Very time consumming tests
        "llvm": "LLVM", # LLVM dependency is required
        "z3": "Z3", # Z3 dependecy is needed
        "qemu": "QEMU", # QEMU tests (several tests)
        }

# Regression tests
class RegressionTest(Test):
    """Regression tests specificities:
    - @base_dir: test/@base_dir
    - @tags: TAGS["regression"]"""

    def __init__(self, *args, **kwargs):
        super(RegressionTest, self).__init__(*args, **kwargs)
        self.base_dir = os.path.join("test", self.base_dir)
        self.tags.append(TAGS["regression"])

## Architecture
testset += RegressionTest(["x86/arch.py"], base_dir="arch",
コード例 #5
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--conf", dest="configure", help="configure filename")
    options, _ = parser.parse_args() 
    if options.configure:
        conf_file = str(options.configure)
    else:
        print('please specify --conf configure filename')
        exit(-1)
  
    trainset_params, testset_params, net_params, solver_params = process_config(conf_file)
    
    trainset = TrainSet(trainset_params['data_path'], trainset_params['sample'])
    
    net_params['entity_num'] = trainset.entity_num
    net_params['relation_num'] = trainset.relation_num
    net_params['batch_size'] = trainset.record_num / int(net_params['nbatches'])
    model = ConvModel(net_params)
    model.build_graph()
    
    os.environ['CUDA_VISIBLE_DEVICES'] = solver_params['gpu_id']
    if solver_params['phase'] == 'train':
        batch_gen = trainset.batch_gen(net_params['batch_size'])
    
        if not solver_params.has_key('pretrain_model') or solver_params['pretrain_model'] == '':
            solver_params['pretrain_model'] = None
        
        if not solver_params.has_key('save_fld'):
            solver_params['save_fld'] = 'models/Conv_v1_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                                        trainset_params['data_path'].split('/')[-1], 
                                        trainset_params['sample'],
                                        net_params['embed_size'],
                                        net_params['activation'],
                                        net_params['channel'],
                                        net_params['learning_rate'],
                                        net_params['nbatches'],
                                        net_params['normed'],
                                        net_params['opt'])
        elif solver_params['save_fld'] == '':                               
            solver_params['save_fld'] = None
        print solver_params['save_fld']

        if not solver_params.has_key('summary_fld'):
            solver_params['summary_fld']='graphs/Conv_v1_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                                        trainset_params['data_path'].split('/')[-1], 
                                        trainset_params['sample'],
                                        net_params['embed_size'],
                                        net_params['activation'],
                                        net_params['channel'],
                                        net_params['learning_rate'],
                                        net_params['nbatches'],
                                        net_params['normed'],
                                        net_params['opt'])
        elif solver_params['summary_fld'] == '':
            solver_params['summary_fld'] = None

        train_model(model, batch_gen, solver_params)
        
        if solver_params['save_fld']:
            testset_params['save_fld'] = solver_params['save_fld']
            testset_params['start'] = 1
            testset_params['end'] = 1
            testset_params['interval'] = solver_params['max_iter']
            testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
            testset = TestSet(trainset_params['data_path'], 'test')
            testset_params['batch_size'] = net_params['batch_size']
            if testset_params['testtype'] == 'link':
                test_model_link(model, testset, testset_params)
            elif testset_params['testtype'] == 'trip':
                raise ValueError('Wait to finish.')
            else:
                raise ValueError('Undefined testtype.')
    elif solver_params['phase'] == 'val':
        raise ValueError('Wait to finish.')
    elif solver_params['phase'] == 'test':
        if not testset_params.has_key('save_fld') or testset_params['save_fld'] == '':
            testset_params['save_fld'] = 'models/Conv_v1_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                                        trainset_params['data_path'].split('/')[-1], 
                                        trainset_params['sample'],
                                        net_params['embed_size'],
                                        net_params['activation'],
                                        net_params['channel'],
                                        net_params['learning_rate'],
                                        net_params['nbatches'],
                                        net_params['normed'],
                                        net_params['opt'])
        
        print testset_params['save_fld']
        testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
        testset = TestSet(trainset_params['data_path'], 'test')
        #testset = TestSet(trainset_params['data_path'], 'train')
        testset_params['batch_size'] = net_params['batch_size']
        if testset_params['testtype'] == 'link':
            test_model_link(model, testset, testset_params)
        elif testset_params['testtype'] == 'trip':
            raise ValueError('Wait to finish.')
        else:
            raise ValueError('Undefined testtype.')
    else:
        raise ValueError('Undefined phase.')
コード例 #6
0
ファイル: run_TransE.py プロジェクト: sui6662012/TransAt
def main():
    parser = OptionParser()
    parser.add_option("-c",
                      "--conf",
                      dest="configure",
                      help="configure filename")
    options, _ = parser.parse_args()
    if options.configure:
        conf_file = str(options.configure)
    else:
        print('please specify --conf configure filename')
        exit(-1)

    trainset_params, testset_params, net_params, solver_params = process_config(
        conf_file)

    #trainset = TrainSet(trainset_params['data_path'], trainset_params['sample'], asym=True)
    trainset = TrainSet(trainset_params['data_path'],
                        trainset_params['sample'])

    net_params['entity_num'] = trainset.entity_num
    net_params['relation_num'] = trainset.relation_num
    net_params['batch_size'] = trainset.record_num / int(
        net_params['nbatches'])

    if solver_params['phase'] == 'train':
        model = TransEModel(net_params)
        model.build_graph()
        os.environ['CUDA_VISIBLE_DEVICES'] = solver_params['gpu_id']
        batch_gen = trainset.batch_gen(net_params['batch_size'])

        if not solver_params.has_key(
                'pretrain_model') or solver_params['pretrain_model'] == '':
            solver_params['pretrain_model'] = None

        if not solver_params.has_key('save_fld'):
            solver_params[
                'save_fld'] = 'models/TransE_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                    trainset_params['data_path'].split('/')[-1],
                    trainset_params['sample'], net_params['embed_size'],
                    net_params['margin'], net_params['learning_rate'],
                    net_params['nbatches'], net_params['normed'],
                    net_params['dorc'], net_params['opt'])
        elif solver_params['save_fld'] == '':
            solver_params['save_fld'] = None
        print solver_params['save_fld']

        if not solver_params.has_key('summary_fld'):
            solver_params[
                'summary_fld'] = 'graphs/TransE_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                    trainset_params['data_path'].split('/')[-1],
                    trainset_params['sample'], net_params['embed_size'],
                    net_params['margin'], net_params['learning_rate'],
                    net_params['nbatches'], net_params['normed'],
                    net_params['dorc'], net_params['opt'])
        elif solver_params['summary_fld'] == '':
            solver_params['summary_fld'] = None

        solver_params['dorc'] = net_params['dorc']
        train_model(model, batch_gen, solver_params)

        if solver_params['save_fld']:
            testset_params['save_fld'] = solver_params['save_fld']
            testset_params['start'] = 1
            testset_params['end'] = 1
            testset_params['interval'] = solver_params['max_iter']
            testset_params['dataset'] = trainset_params['data_path'].split(
                '/')[-1]
            testset = TestSet(trainset_params['data_path'], 'test')
            testset_params['batch_size'] = net_params['batch_size']
            if testset_params['testtype'] == 'link':
                test_model_link(model, testset, testset_params)
            elif testset_params['testtype'] == 'trip':
                raise ValueError('Wait to finish.')
            else:
                raise ValueError('Undefined testtype.')
    elif solver_params['phase'] == 'val':
        raise ValueError('Wait to finish.')
    elif solver_params['phase'] == 'test':
        #models = TransEModel(net_params)
        #models.build_graph()
        #os.environ['CUDA_VISIBLE_DEVICES'] = '3'
        models = []
        for i in xrange(4):
            with tf.device('/gpu:%d' % i):
                models.append(TransEModel(net_params))
                models[i].build_graph()
                tf.get_variable_scope().reuse_variables()
        if not testset_params.has_key(
                'save_fld') or testset_params['save_fld'] == '':
            testset_params[
                'save_fld'] = 'models/TransE_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                    trainset_params['data_path'].split('/')[-1],
                    trainset_params['sample'], net_params['embed_size'],
                    net_params['margin'], net_params['learning_rate'],
                    net_params['nbatches'], net_params['normed'],
                    net_params['dorc'], net_params['opt'])

        print testset_params['save_fld']
        testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
        testset = TestSet(trainset_params['data_path'], 'test')
        #testset = TestSet(trainset_params['data_path'], 'train')
        testset_params['batch_size'] = net_params['batch_size']
        if testset_params['testtype'] == 'link':
            test_model_link(models, testset, testset_params)
        elif testset_params['testtype'] == 'trip':
            raise ValueError('Wait to finish.')
        else:
            raise ValueError('Undefined testtype.')
    else:
        raise ValueError('Undefined phase.')
コード例 #7
0
import argparse
import time
from utils.test import Test
from utils.testset import TestSet
from utils import cosmetics, monothread, screendisplay

testset = TestSet("../")

# Regression tests
## Architecture
testset += Test(["x86/arch.py"], base_dir="test/arch",
                products=["x86_speed_reg_test.bin",
                          "regression_test16_ia32.bin",
                          "regression_test32_ia32.bin",
                          "regression_test64_ia32.bin"])
for script in ["x86/sem.py",
               "x86/unit/mn_strings.py",
               "x86/unit/mn_float.py",
               "arm/arch.py",
               "arm/sem.py",
               "msp430/arch.py",
               "msp430/sem.py",
               "sh4/arch.py",
               "mips32/arch.py",
               ]:
    testset += Test([script], base_dir="test/arch")
## Core
for script in ["interval.py",
               "graph.py",
               "parse_asm.py",
               ]: