def update_invoice_status_for_pending_indents(date=None, force_run=False): if not force_run: if skip_run(): return customer_account_list = frappe.db.sql(""" select distinct iitm.customer, iitm.credit_account from `tabIndent Item` iitm left join `tabIndent` ind on iitm.parent = ind.name where plant like 'hpcl%' and iitm.name not in ( select indent_item from `tabIndent Invoice` where docstatus != 2 and ifnull(indent_item, '')!='' ) and ifnull(iitm.invoice_reference, '') = '' """, as_dict=True) date = date if date else today() fetch_and_record_hpcl_transactions(customer_account_list, date) reconcile_omc_txns_with_indents()
from utils import skip_run from data.data_utils import * from data_GMU import * with skip_run('run', 'Get GMU acc data') as check, check(): extract_GMU() with skip_run('run', 'Create GMU acc data mels, save as hdf') as check, check(): preprocess_GMU()
from models.task_classification import task_type_classification from models.density_estimation import estimate_density from models.statistical_test import friedman_test from visualization.visualize import (plot_classification_accuracy, plot_reaction_time, plot_box_reaction_time, plot_detection_false_alarm) from utils import (skip_run, save_dataset) # The configuration file config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'create_dataset') as check, check(): data, dataframe, secondary_dataframe = create_dataframe( config['subjects'], config) # Save save_path = Path(__file__).parents[1] / config['processed_dataframe'] save_dataset(str(save_path), dataframe, save=True, use_pandas=True) save_path = Path(__file__).parents[1] / config['secondary_dataframe'] save_dataset(str(save_path), secondary_dataframe, save=True, use_pandas=True) # save_path = Path(__file__).parents[1] / config['processed_dataset'] # save_dataset(str(save_path), data, save=True)
from models.utils import (save_trained_pytorch_model, load_trained_pytorch_model) from visualization.visualise import (plot_average_model_accuracy, plot_bar, plot_accuracy_bar, plot_accuracy_bar_transfer, plot_predictions) from visualization.utils import plot_settings from utils import skip_run # The configuration file config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'create_emg_data') as check, check(): data = create_emg_data(config['subjects'], config['trials'], config) # Save the dataset save_path = Path(__file__).parents[1] / config['raw_emg_data'] save_data(str(save_path), data, save=False) with skip_run('skip', 'create_epoch_emg_data') as check, check(): data = create_emg_epoch(config['subjects'], config['trials'], config) # Save the dataset save_path = Path(__file__).parents[1] / config['epoch_emg_data'] save_data(str(save_path), data, save=True) with skip_run('skip', 'clean_epoch_emg_data') as check, check(): data = clean_epoch_data(config['subjects'], config['trials'], config)
import yaml from pathlib import Path import time import ray from server.parameters import ParameterServer from envs.environments import Benning from utils import skip_run # The configuration file config_path = Path(__file__).parents[1] / 'offset-game/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('run', 'learning tactic') as check, check(): # Initiate ray if not ray.is_initialized(): ray.init(num_cpus=4) # Instantiate parameter server ps = ParameterServer.remote() # Instantiate environment env = Benning.remote(config) # ['n_robots', 'primitive', 'target_node_id', 0, 0, 0] net_output = [[20, 1, 38, 0, 0, 0], [10, 1, 39, 0, 0, 20], [20, 1, 40, 0, 0, 0], [12, 1, 15, 0, 0, 0], [9, 1, 12, 0, 0, 0], [4, 1, 11, 0, 0, 0]]
from models import car_maneuver, hammering from models import optimize from models.utils import export_trajectory_data from visualization.visualize import (plot_optimal_trajectories, plot_magnet_hammer_path, plot_magnet_hammer, plot_simulation_trajectories, plot_experiment_trajectories) from visualization.utils import plot_settings from utils import (skip_run, save_model_log) # The configuration file config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'car_maneuver_model') as check, check(): tf = 50.0 m = car_maneuver.motion_model(tf) with skip_run('skip', 'dynamic_model_binary_search') as check, check(): tf_min = 0.7 tf_max = 2.0 v = [0] t = [] while (tf_max - tf_min) >= 10e-3: print(tf_max) m = hammering.dynamic_motion_model(tf_max, 'variable_stiffness', config) m, optimal_values, solution = optimize.run_optimization(m, 200) temp = optimal_values['hv'].values v.append(temp[-1])
import yaml from pathlib import Path from utils import skip_run # The configuration file config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'Data') as check, check(): pass
from features.haptic_features import create_haptic_features from features.utils import (save_to_r_dataset, read_with_pickle, read_with_deepdish) from models.index_validation import validate_engagement_index from visualization.visualize import (topo_map, force_error, plot_mixed_effect_model) from utils import (skip_run, save_with_deepdish, save_with_pickle) # The configuration file config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'create_eeg_dataset') as check, check(): eeg_dataset = eeg_dataset(config) save_path = Path(__file__).parents[1] / config['raw_eeg_dataset'] save_with_deepdish(str(save_path), eeg_dataset, save=True) with skip_run('skip', 'clean_eeg_dataset') as check, check(): clean_dataset = clean_dataset(config) save_path = Path(__file__).parents[1] / config['clean_eeg_dataset'] save_with_deepdish(str(save_path), clean_dataset, save=True) with skip_run('skip', 'index_validation') as check, check(): index_validation_dataset = validate_engagement_index(config) print(index_validation_dataset) save_path = Path(__file__).parents[1] / config['index_validation_dataset'] save_with_deepdish(str(save_path), index_validation_dataset, save=True)
import yaml from pathlib import Path from envs.enhance_env import EnhanceEnv from default_actions.default_actions import (blue_team_actions, red_team_actions) from gui.gui_main import MainGUI from utils import skip_run config_path = Path(__file__).parents[1] / 'hsi/config/simulation_config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'Test New Framework') as check, check(): default_blue_actions = blue_team_actions(config) default_red_actions = red_team_actions(config) config['simulation']['map_to_use'] = 'buffalo-medium' env = EnhanceEnv(config) env.step(default_blue_actions, default_red_actions) with skip_run('run', 'Test New GUI') as check, check(): config['simulation']['map_to_use'] = 'buffalo-medium' gui = MainGUI(1200, 800, config) gui.run()
def fetch_and_record_iocl_transactions(customer_list, for_date=None, force_run=False): if not force_run: if skip_run(): return def get_item(item_code): if item_code == 'M00002': return 'FC19' if item_code == 'M00065': return 'FC47.5' if item_code == 'M00069': return 'FC47.5' return item_code def get_plant(plant_code): return IOCL_PLANT_CODE_MAP[ plant_code] if plant_code in IOCL_PLANT_CODE_MAP else plant_code for customer in customer_list: portal = IOCLPortal(customer.id, customer.passwd) portal.login() txns = portal.transactions_since_yesterday(for_date, for_date, mode=dict) for txn in txns['txns']: if frappe.db.sql( 'SELECT name FROM `tabOMC Transactions` WHERE document_no="{}"' .format(int(txn['Doc. No']))): continue registration = None if txn['Ship to Party'].strip(): registration = frappe.db.get_value( "OMC Customer Registration", {'customer_code': int(txn['Ship to Party'].strip())}, ["customer"], as_dict=True) doc = frappe.get_doc({ 'customer': registration.customer if registration else '', 'date': '-'.join(reversed(txn['Tran. Date'].split('.'))), 'doctype': 'OMC Transactions', 'document_no': int(txn['Doc. No']), 'debit': txn['Bill Amt'] if txn['Db/Cr'] == 'D' else 0, 'credit': txn['Bill Amt'] if txn['Db/Cr'] == 'C' else 0, 'item': get_item(txn['Material']), 'quantity': txn['Bill Qty'], 'vehicle_no': strip_vehicle(txn['TTNO']), 'plant': get_plant(txn['Plant']), 'supplier': 'IOCL', 'dump': json.dumps(txn), 'account_number': customer.id }) doc.ignore_permissions = True doc.save() frappe.db.commit()
from hydra.experimental import compose, initialize from utils import skip_run # Initialize the config directory initialize(config_path="configs", job_name="learning") with skip_run('skip', 'split_image_folder') as check, check(): hparams = compose(config_name="config") raise NotImplementedError
import yaml from pathlib import Path from data.create_data import create_eeg_data from utils import skip_run config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('run', 'Create EEG data') as check, check(): create_eeg_data(config) with skip_run('run', 'Create EEG data') as check, check(): create_eeg_data(config)
from envs.environments import Benning from envs.utils import get_xy_position # from models.torch_network import Actor, Critic # from models.torch_train import AdvantageCritic from visualization.utils import plot_occupancy_map from utils import skip_run # The configuration file config_path = Path(__file__).parents[1] / 'src/config.yml' config = yaml.load(open(str(config_path)), Loader=yaml.SafeLoader) with skip_run('skip', 'convert lat-long to cartesian') as check, check(): get_xy_position(config) with skip_run('skip', 'plot occupancy grid') as check, check(): env = Benning(config) program_starts = time.time() fig, ax = plt.subplots() for i in range(10000): if i == 10: rgbImg, depthImg, segImg = env.get_camera_image() plot_occupancy_map(ax, np.rot90(depthImg, k=2), config, save_array=True) plt.show() p.stepSimulation()