def get_vms(session_id, page_size):

    vm_filter_spec = VirtualMachineFilterSpec()
    vm_filter_spec.live = "true"
    vm_filter_spec.limit = page_size

    # Repeat with the live filter.
    live_vms = tintri.get_vms(filters=vm_filter_spec)
    if live_vms.filteredTotal == 0:
        raise TintriServerError(0, cause="No live VMs present")

    print_info("Live Total = " + str(live_vms.filteredTotal))
    print ""

    count = 1
    for vm in live_vms:
        vm_name = vm.vmware.name
        vm_uuid = vm.uuid.uuid
        if debug_mode:
            dump_object(vm.stat.sortedStats[0])

        vm_stats = VmStat(vm_name, vm_uuid, vm.stat.sortedStats[0])
        print_debug(str(count) + ": " + vm_name + ", " + vm_uuid)
        count += 1

        # Store the VM stats object keyed by VM name.
        vms[vm_name] = vm_stats

    return vms
示例#2
0
文件: task.py 项目: deaputri/kfp
def main():
    """
    Main function
    """

    args = get_args()

    output_bucket = args.pathoutput

    storage = args.storage

    full_table_path = args.bqtable

    # Fetch training data from BQ or GCS
    if storage in ['BQ', 'bq' 'bigquery', 'BigQuery', 'bigQuery', 'Bigquery', 'Bq']:
      dataset = utils.read_df_from_bigquery(full_table_path)
    else:
      dataset = utils.get_data_from_gcs(args.pathdata)

    x_train, y_train, x_val, y_val = utils.data_train_test_split(dataset)

    # Get pipeline and fit model
    pipeline = model.get_pipeline()
   
    pipeline.fit(x_train, y_train)

    scores = model_selection.cross_val_score(pipeline, x_val, y_val, cv=3)

    print('model score: %.3f' % pipeline.score(x_val, y_val))
    print('pipeline run done :)')

    # Output results and trained model
    model_output_path = os.path.join(output_bucket,'model/', metadata.MODEL_FILE_NAME)

    metric_output_path = os.path.join(output_bucket, 'experiment', metadata.METRIC_FILE_NAME)

    utils.dump_object(pipeline, model_output_path)
    utils.dump_object(scores, metric_output_path)

    joblib_output_path = os.path.join(output_bucket)

    # Write GCS path to local file 
    # This GCS path can be passed to the next component
    Path(args.pathoutputfile).parent.mkdir(parents=True, exist_ok=True)
    Path(args.pathoutputfile).write_text(joblib_output_path)
示例#3
0
def main():

    args = get_args()

    path_data = args.pathdata

    output_bucket = args.pathoutput

    storage = args.storage

    full_table_path = args.bqtable

    if storage in [
            'BQ', 'bq'
            'bigquery', 'BigQuery', 'bigQuery', 'Bigquery', 'Bq'
    ]:
        dataset = utils.read_df_from_bigquery(full_table_path)
    else:
        dataset = utils.get_data_from_gcs(path_data)

    x_train, y_train, x_val, y_val = utils.data_train_test_split(dataset)

    pipeline = model.get_pipeline()

    pipeline.fit(x_train, y_train)

    scores = model_selection.cross_val_score(pipeline, x_val, y_val, cv=3)

    print("model score: %.3f" % pipeline.score(x_val, y_val))
    print('pipeline run done :)')

    model_output_path = os.path.join(output_bucket, 'model/',
                                     metadata.MODEL_FILE_NAME)

    metric_output_path = os.path.join(output_bucket, 'experiment',
                                      metadata.METRIC_FILE_NAME)

    utils.dump_object(pipeline, model_output_path)
    utils.dump_object(scores, metric_output_path)

    joblib_output_path = os.path.join(output_bucket)

    Path(args.pathoutputfile).parent.mkdir(parents=True, exist_ok=True)
    Path(args.pathoutputfile).write_text(joblib_output_path)
示例#4
0
def clone_vm(uuid, clone_name, vcenter_name, datastore_name, num_clones):

    # Create and initialize the clone spec minus the UUID
    clone_spec = VirtualMachineCloneSpec()
    clone_spec.consistency = 'CRASH_CONSISTENT'
    clone_spec.count = num_clones

    vmware_clone_info = VMwareCloneInfo()
    clone_spec.vmware = vmware_clone_info
    clone_spec.vmware.cloneVmName = clone_name
    clone_spec.vmware.vCenterName = vcenter_name
    clone_spec.vmware.datastoreName = datastore_name

    # Suss-out if UUID is a Tintri VM or snapshot UUID and set the appropriate field.
    if (uuid.find("VIM") > -1):
        clone_spec.vmId = uuid
    elif (uuid.find("SST") > -1):
        clone_spec.snapshotId = uuid
    else:
        raise TintriServerError(0,
                                cause="UUID is not a VM or snapshot UUID: " +
                                uuid)

    print_info("Cloning " + uuid + " to " + clone_name + " for " +
               str(num_clones) + " times")

    if debug_mode:
        dump_object(clone_spec)

    # Clone the VM specified by the clone_payload
    task_result = tintri.clone_vm(clone_spec, True)

    # Get the desired task info
    task_uuid = task_result.uuid.uuid
    task_state = task_result.state
    task_progress = task_result.progressDescription
    task_type = task_result.type
    print_info(task_type + ": " + task_uuid + ": " + task_state + " - " +
               task_progress)
示例#5
0
 def dump(self, filename):
     dump_object(filename, self.pack())
示例#6
0
from config_tools import load_config, build_graph, create_index_from_config
from utils import dump_object, print_object
from index import diff_trees
from graph import depth_first_search, find_roots
import json

# index1 = create_index_from_config(".apps/alcuin1.json")
index1 = create_index_from_config(".apps/MDC_ENSSUP_EVO.json")

# index2 = create_index_from_config(".apps/alcuin1_other.json")
index2 = create_index_from_config(".apps/MODULE_ENSSUP.json")

(id1, t1) = index1.write_tree()
(id2, t2) = index2.write_tree()

t = dict(t1, **t2)
dump_object("trees.json", t)
dump_object('i1.json', dict(index1.blobs, **index2.blobs))


def visitor(*args):
    print(args)


diff_trees(visitor, id1, id2, t)
示例#7
0
 def _check_params(self):
     app_log.debug("jdbe:check:start")
     utils.dump_object(self)
     # raise TypeError, "Parameter error"
     app_log.debug("jdbe:check:complete")