コード例 #1
0
def main():
    "Several runs with different seeds but same random initial conditions (Chebyshev polinomial)."

    coef_ord_combo = random_coeff_order_combinations(2)

    # pretrain policy means based on some random chebyshev polinomial with fixed standar deviation
    identifiers, desired_controls = chebys_tracer(coef_ord_combo,
                                                  CONFIG.time_points,
                                                  zipped=True)
    desired_deviation = 2.0

    config = deepcopy(CONFIG)

    # add initial controls identifiers to config
    labels = multilabel_cheby_identifiers(identifiers)
    config.initial_controls_labels = labels
    print(f"Initial controls {labels}")

    # repeat simulation with different seeds
    for _ in range(config.distinct_seeds):
        training_pipeline(config, desired_controls, desired_deviation)

    # plot results
    Plotter("SimpleModel", config).plot_everything()
    Plotter("ComplexModel", config).plot_everything()
コード例 #2
0
ファイル: import_data.py プロジェクト: kirchma/perm
 def show_plot(self, df):
     plot = Plotter(
         df, **{
             'start': self.start,
             'stop': self.stop,
             'name': self.file_name
         })
     plot.raw_data_chart()
     user_input = input(
         'Ist die Messzeit in Ordnung? (y) falls nicht bitte neue Werte für Beginn und '
         'Ende der Messung angeben (start, ende)')
     return user_input
コード例 #3
0
    def calculate_permeability(self, guess, parameter='k'):
        if self.find_file():
            user_input = input('Bereits angepasste Messdaten nutzen? (y/n')
            if user_input == 'y':
                self.set_adjusted_data()
            elif user_input == 'n':
                self.set_data()
        else:
            self.set_data()

        result = Optimizer(self.df_100, self.sample_data, guess)
        result, opt_steps = result.nelder_mead(parameter)

        plot = Plotter(self.df_100, **{'name': self.file_name})
        plot.result_chart()

        self.add_results(result, guess)
        user_input = input('Ergebnis abspeichern? (y)')
        if user_input == 'y':
            self.save_adjusted_measurement_file()
            self.save_results()
コード例 #4
0
def main():
  """ main functions """
  global __logger

  load_ini_config()
  agent_config = __game_config['agents']
  game_config = __game_config['game']

  with open('logging.yaml', 'r') as f:
    logging.config.dictConfig(yaml.safe_load(f.read()))
  __logger = logging.getLogger(__name__)
  game_config['logger'] = logging.getLogger('game')
  agent_config['logger'] = logging.getLogger('agent')

  agent_config['episodes'] = game_config['episodes']

  # Optionally Get F, M, and Population Overrides from CLI
  parser = argparse.ArgumentParser(description='F,M,Population Overrides')
  parser.add_argument('--F',
                      type=int,
                      default=game_config['F'])
  parser.add_argument('--M',
                      type=int,
                      default=game_config['M'])
  parser.add_argument('--P',
                      type=int,
                      default=game_config['population_size'])
  args = parser.parse_args()
  game_config['F'] = args.F
  game_config['M'] = args.M
  game_config['population_size'] = args.P
  __logger.info(args)

  game_config['timestamp'] = datetime.datetime.utcnow().isoformat()
  game_config['output_directory'] = setup_directory(game_config)
  game_config['plotter'] = Plotter(agent_config, game_config)

  game = Game(agent_config, game_config)
  game.play_game(game_config['episodes'])
コード例 #5
0
ファイル: data.py プロジェクト: ds-ga-1007/final_project
 def plots_for_boroughs(self):
     ''' Return Plots by Borough'''
     plotter = Plotter(self.query())
     return plotter.borough_plots()
コード例 #6
0
ファイル: data.py プロジェクト: ds-ga-1007/final_project
    def plots_for_zip_code(self, zip_code):
        ''' Return Plots by Zipcode'''
        zipdata = self.query({ 'zip_code' : zip_code})

        plotter = Plotter(zipdata, "ZIP: %s" % zip_code)
        return plotter.all_plots()
コード例 #7
0
# M: frame orientation 45 degrees to 40 intervals.
# M: 20 to 25 frames.

stimuli = {'rods': rods, 'frames': frames}

# initialize generative agent
genAgent = GenerativeAgent(params_gen, stimuli)

# initialize psi object
psi = PSI_RiF(params, stimuli)

# number of iterations of the experiment
iterations_num = 500

# initialize plotter and plot generative distribution, generative weights and the negative log likelihood
plotter = Plotter(params, params_gen, stimuli, genAgent, psi, iterations_num)
plotter.plotGenProbTable()
plotter.plotGenVariances()
plotter.plotGenWeights()
plotter.plotGenPSE()
plotter.plotNegLogLikelihood(responses_num=500)
plotter.plot()

for stim_selection in ['adaptive', 'random']:
    # set stimulus selection mode and reset psi object to initial values
    psi.reset(stim_selection)

    # reset plotter to plot new figures
    plotter.reset()

    # run model for given number of iterations
コード例 #8
0
ファイル: server.py プロジェクト: softaria/math-processor
import flask
from flask import request, abort, Response
import json
from executor import Executor
from plots import Plotter, PlotterError

app = flask.Flask(__name__)

executor = Executor()
plotter = Plotter()


@app.route('/', methods=['GET'])
def home():
    return '''<h1>Welcome to the REST service for SymPy</h1>
    '''


@app.route('/api/v1/custom', methods=['POST'])
def execute_custom():
    if not request.json:
        abort(400, "no json provided as the request body")
    method = request.json["method"]

    if 'method' not in request.json:
        abort(400, "no 'method' field found in the JSON")
    if 'args' not in request.json:
        abort(400, "no 'args' field found in the JSON")

    args = request.json["args"]
    result = executor.run_custom(method, args)
コード例 #9
0
ファイル: data.py プロジェクト: mx419/final_project
 def plots_for_boroughs(self):
     ''' Return Plots by Borough'''
     plotter = Plotter(self.query())
     return plotter.borough_plots()
コード例 #10
0
ファイル: data.py プロジェクト: mx419/final_project
    def plots_for_zip_code(self, zip_code):
        ''' Return Plots by Zipcode'''
        zipdata = self.query({'zip_code': zip_code})

        plotter = Plotter(zipdata, "ZIP: %s" % zip_code)
        return plotter.all_plots()
コード例 #11
0
ファイル: main.py プロジェクト: bradford415/CALM
def main():
    # Maybe delete this ?
    group = 'lung'

    parser = argparse.ArgumentParser(description='classifier')
    parser.add_argument('--sample_file', type=str, default='lung.emx.txt', help="the name of the GEM organized by samples (columns) by genes (rows)")
    parser.add_argument('--label_file', type=str, default='sample_condition.txt', help="name of the label file: two columns that maps the sample to the label")
    parser.add_argument('--output_name', type=str, default='tissue-run-1', help="name of the output directory to store the output files")
    #parser.add_argument('--overwrite_output', type=bool, default=False, help="overwrite the output directory file if it already exists")
    parser.add_argument('--batch_size', type=int, default=16, help="size of batches to split data")
    parser.add_argument('--max_epoch', type=int, default=100, help="number of passes through a dataset")
    parser.add_argument('--learning_rate', type=float, default=0.001, help="controls the rate at which the weights of the model update")
    parser.add_argument('--test_split', type=float, default=0.3, help="percentage of test data, the train data will be the remaining data. 30% -> 0.3")
    parser.add_argument('--continuous_discrete', type=str, default='continuous', help="type of data in the sample file, typically RNA will be continous and DNA will be discrete")
    parser.add_argument('--plot_results', type=bool, default=True, help="plots the sample distribution, training/test accuracy/loss, and confusion matrix")
    parser.add_argument('--use_gpu', type=bool, default=False, help="true to use a gpu, false to use the cpu - if the node does not have a gpu then it will use the cpu")
    args = parser.parse_args()

    #If data is discrete, data should only range between 0-3
    #if args.continuous_discrete == "discrete":
        #args.input_num_classes = 4

    # Initialize file paths and create output folder
    LABEL_FILE = os.path.join(INPUT_DIR, args.label_file)
    SAMPLE_FILE = os.path.join(INPUT_DIR, args.sample_file)
    OUTPUT_DIR_FINAL = os.path.join(OUTPUT_DIR, args.output_name + "-" + str(datetime.today().strftime('%Y-%m-%d-%H:%M')))
    if not os.path.exists(OUTPUT_DIR_FINAL):
        os.makedirs(OUTPUT_DIR_FINAL)

    # Create log file to keep track of model parameters
    logging.basicConfig(filename=os.path.join(OUTPUT_DIR_FINAL,'classifier.log'),
                        filemode='w',
                        format='%(message)s',
                        level=logging.INFO)
    logger = logging.getLogger(__name__)
    logger.info('Classifer log file for ' + args.sample_file + ' - Started on ' + str(datetime.today().strftime('%Y-%m-%d-%H:%M')) + '\n')
    logger.info('Batch size: %d', args.batch_size)
    logger.info('Number of epochs: %d', args.max_epoch)
    logger.info('Learning Rate: %f', args.learning_rate)
    logger.info('Sample filename: ' + args.sample_file)
    logger.info('Output directory: ' + args.output_name)

    if args.continuous_discrete != 'continuous' and args.continuous_discrete != 'discrete':
        logger.error("ERROR: check that the continuous_discrete argument is spelled correctly.")
        logger.error("       only continuous or discrete data can be processed.")
        sys.exit("\nCommand line argument error. Please check the log file.\n")

    # Intialize gpu usage if desired
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda and args.use_gpu else "cpu")
    train_kwargs = {'batch_size': 16}
    test_kwargs = {'batch_size': 16}
    if use_cuda:
        cuda_kwargs = {'num_workers': 1,
                       'pin_memory': True,
                       'shuffle': True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    # Load matrix, labels/weights, and number of samples
    column_names = ("sample", "label")
    matrix_df = pd.read_csv(SAMPLE_FILE, sep='\t', index_col=[0])
    labels_df = pd.read_csv(LABEL_FILE, names=column_names, delim_whitespace=True, header=None)


    # Error checking for same number of samples in both files and samples are unique
    samples_unique = set(labels_df.iloc[:,0])
    assert len(labels_df) == len(matrix_df.columns)
    assert len(labels_df) == len(samples_unique)

    
    labels, class_weights = preprocessing.labels_and_weights(labels_df)
    args.output_num_classes = len(labels)
    is_binary = False
    if len(labels) == 2:
        is_binary = True
        args.output_num_classess = 1

    # Define model paramters
    batch_size = args.batch_size
    max_epoch = args.max_epoch
    learning_rate = args.learning_rate #5e-4
    num_features = len(matrix_df.index)

    # Setup model
    model = utils.Net(input_seq_length=num_features,
                  output_num_classes=args.output_num_classes).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)

    if is_binary:
        loss_fn = torch.nn.BCEWithLogitsLoss()
    else:
        loss_fn = torch.nn.CrossEntropyLoss()#(weight=class_weights)

    logger.info('Number of samples: %d\n', len(labels_df))
    logger.info('Labels: ')
    for i in range(len(labels)):
        logger.info('       %d - %s', i, labels[i])
    
    # Replace missing data with the global minimum of the dataset
    val_min, val_max = np.nanmin(matrix_df), np.nanmax(matrix_df)
    matrix_df.fillna(val_min, inplace=True)

    # Transposing matrix to align with label file
    matrix_transposed_df = matrix_df.T

    # Create density and tsne plot
    graphs = Plotter(OUTPUT_DIR_FINAL)
    graphs.density(matrix_df)
    graphs.tsne(matrix_transposed_df, labels_df, labels, title=args.sample_file)

    train_data, test_data = preprocessing.split_data(matrix_transposed_df, labels_df, args.test_split, args.output_num_classes)

    # Convert tuple of df's to tuple of np's
    # Allows the dataset class to access w/ data[][] instead of data[].iloc[]
    train_data_np = (train_data[0].values, train_data[1].values)
    test_data_np = (test_data[0].values, test_data[1].values)

    train_dataset = dataset.Dataset(train_data_np)
    test_dataset = dataset.Dataset(test_data_np)
    train_generator = data.DataLoader(train_dataset, **train_kwargs, drop_last=False)
    test_generator = data.DataLoader(test_dataset, **test_kwargs, drop_last=False)
    # drop_last=True would drop the last batch if the sample size is not divisible by the batch size

    logger.info('\nTraining size: %d \nTesting size: %d\n', len(train_dataset), len(test_dataset))

    # Create variables to store accuracy and loss
    loss_meter = utils.AverageMeter()
    loss_meter.reset()
    summary_file = pd.DataFrame([], columns=['Epoch', 'Training Loss', 'Accuracy', 'Accurate Count', 'Total Items'])
    train_stats = pd.DataFrame([], columns=['accuracy', 'loss'])
    test_stats = pd.DataFrame([], columns=['accuracy', 'loss'])

    # Train and test the model
    for epoch in range(args.max_epoch):
        train_stats = train(model, device, is_binary, train_generator, optimizer, loss_fn, batch_size, loss_meter, train_stats)
        test_stats = test(model, device, is_binary, test_generator, loss_fn, epoch, batch_size, loss_meter, test_stats, train_stats, logger)
        scheduler.step()

    # Training finished - Below is used for testing the network, plots and saving results
    if(args.plot_results):
        y_predict_list = []
        y_target_list = []
        y_predict_list, y_target_list = forward(model, device, is_binary, test_generator, y_predict_list, y_target_list)

        graphs.accuracy(train_stats, test_stats, graphs_title=args.sample_file)
        graphs.confusion(y_predict_list, y_target_list, labels, cm_title=args.sample_file)
        logger.info("\n\nf1 score: %0.2f" % (f1_score(y_target_list, y_predict_list, average="weighted")))

    #summary_file.to_csv(RESULTS_FILE, sep='\t', index=False)
    logger.info('\nFinal Accuracy: %2.3f', test_stats.iloc[epoch]['accuracy'])
    logger.info('\nFinished at  ' + str(datetime.today().strftime('%Y-%m-%d-%H:%M')))
コード例 #12
0
# initialize generative agent
genAgent = GenerativeAgent(params_gen, stimuli)

# initialize psi object
psi = PSI_RiF(params, stimuli)

# number of iterations of the experiment and (current) number of experiments
iterations_num = 500
experiments_num = 2
current_experiment_num = 0

# initialize plotter and plot generative distribution, weights, SDs and bias and the negative log likelihood
plotter = Plotter(params,
                  params_gen,
                  stimuli,
                  genAgent,
                  psi,
                  iterations_num,
                  plot_period=25)
plotter.plotGenProbTable()
plotter.plotGenStandardDeviations()
plotter.plotGenWeights()
plotter.plotGenPSE()
plotter.plotNegLogLikelihood(responses_num=500)
plotter.plot()

# initialize printer and print generative standard deviations, weights and bias
printer = Printer(params, stimuli, genAgent, psi, iterations_num,
                  experiments_num)
printer.printGenStandardDeviations()
printer.printGenWeights()