def test_converter(testname, fail_expected, error_text=None): """ Convert a v1 object to v3, then apply the result and read it back. """ test_converter.__name__ = testname # Let's start every test afresh wipe_etcd(get_ip()) testdata = data[testname] # Convert data to V3 API using the tool under test rc = calicoctl("convert", data=testdata) if not fail_expected: logger.debug("Trying to convert manifest from V1 to V3") rc.assert_no_error() # Get the converted yaml and clean it up (remove fields we don't care about) converted_data = clean_calico_data(yaml.safe_load(rc.output)) original_resource = rc # Apply the converted data rc = calicoctl("create", data=original_resource.output) logger.debug("Trying to create resource using converted manifest") rc.assert_no_error() rc = calicoctl("get %s %s -o yaml" % (converted_data['kind'], name(converted_data))) # Comparison here needs to be against cleaned versions of data to remove Creation Timestamp logger.debug("Comparing 'get'ted output with original converted yaml") cleaned_output = yaml.safe_dump( clean_calico_data( yaml.safe_load(rc.output), extra_keys_to_remove=['projectcalico.org/orchestrator', 'namespace'] ) ) original_resource.assert_data(cleaned_output) else: rc.assert_error(error_text)
def test_datastore_migrate(self): """ Test that migrating Calico resources works properly """ # Create the ipv6 pool using calicoctl, and read it out using an # exact get and a list query. rc = calicoctl("create", data=ippool_name2_rev1_v6) rc.assert_no_error() rc = calicoctl("get ippool %s -o yaml" % name(ippool_name2_rev1_v6)) rc.assert_data(ippool_name2_rev1_v6) rc = calicoctl("get ippool -o yaml") rc.assert_list("IPPool", [ippool_name2_rev1_v6]) # Create a BGP Config rc = calicoctl("create", data=bgpconfig_name1_rev1) rc.assert_no_error() rc = calicoctl("get bgpconfig %s -o yaml" % name(bgpconfig_name1_rev1)) rc.assert_data(bgpconfig_name1_rev1) rc = calicoctl("get bgpconfig -o yaml") rc.assert_list("BGPConfiguration", [bgpconfig_name1_rev1]) # Create a BGP Peer rc = calicoctl("create", data=bgppeer_name1_rev1_v4) rc.assert_no_error() rc = calicoctl("get bgppeer %s -o yaml" % name(bgppeer_name1_rev1_v4)) rc.assert_data(bgppeer_name1_rev1_v4) rc = calicoctl("get bgppeer -o yaml") rc.assert_list("BGPPeer", [bgppeer_name1_rev1_v4]) # Create a Felix config rc = calicoctl("create", data=felixconfig_name1_rev1) rc.assert_no_error() rc = calicoctl("get felixconfig %s -o yaml" % name(felixconfig_name1_rev1)) rc.assert_no_error() # Create a Global Network policy rc = calicoctl("create", data=globalnetworkpolicy_name1_rev1) rc.assert_no_error() rc = calicoctl("get globalnetworkpolicy %s -o yaml" % name(globalnetworkpolicy_name1_rev1)) rc.assert_data(globalnetworkpolicy_name1_rev1) rc = calicoctl("get globalnetworkpolicy -o yaml") rc.assert_list("GlobalNetworkPolicy", [globalnetworkpolicy_name1_rev1]) # Create a Global Network set rc = calicoctl("create", data=globalnetworkset_name1_rev1) rc.assert_no_error() rc = calicoctl("get globalnetworkset %s -o yaml" % name(globalnetworkset_name1_rev1)) rc.assert_data(globalnetworkset_name1_rev1) rc = calicoctl("get globalnetworkset -o yaml") rc.assert_list("GlobalNetworkSet", [globalnetworkset_name1_rev1]) # Create a HostEndpoint rc = calicoctl("create", data=hostendpoint_name1_rev1) rc.assert_no_error() rc = calicoctl("get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev1)) rc.assert_data(hostendpoint_name1_rev1) rc = calicoctl("get hostendpoint -o yaml") rc.assert_list("HostEndpoint", [hostendpoint_name1_rev1]) # Create Network policy rc = calicoctl("create", data=networkpolicy_name1_rev1) rc.assert_no_error() rc = calicoctl("get networkpolicy %s -o yaml" % name(networkpolicy_name1_rev1)) rc.assert_data(networkpolicy_name1_rev1) rc.assert_no_error() # Create NetworkSets rc = calicoctl("create", data=networkset_name1_rev1) rc.assert_no_error() rc = calicoctl("get networkset %s -o yaml" % name(networkset_name1_rev1)) rc.assert_no_error() # Create a Node, this should also trigger auto-creation of a cluster info rc = calicoctl("create", data=node_name4_rev1) rc.assert_no_error() rc = calicoctl("get node %s -o yaml" % name(node_name4_rev1)) rc.assert_data(node_name4_rev1) rc = calicoctl("get clusterinfo %s -o yaml" % name(clusterinfo_name1_rev1)) rc.assert_no_error() # Create another Node, this node will not be imported because it does not # reference a real k8s node. rc = calicoctl("create", data=node_name5_rev1) rc.assert_no_error() rc = calicoctl("get node %s -o yaml" % name(node_name5_rev1)) rc.assert_data(node_name5_rev1) # TODO: Pull code or modify tests to create IPAM objects for this test # since they cannot be created via calicoctl. # Export the data before locking the datastore rc = calicoctl("datastore migrate export > test-migration") rc.assert_error(text=NOT_LOCKED) # Lock the data rc = calicoctl("datastore migrate lock") rc.assert_no_error() # Export the data after locking the datastore rc = calicoctl("datastore migrate export > test-migration") rc.assert_no_error() # Delete the data rc = calicoctl("delete ippool %s" % name(ippool_name2_rev1_v6)) rc.assert_no_error() rc = calicoctl("delete bgpconfig %s" % name(bgpconfig_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete bgppeer %s" % name(bgppeer_name1_rev1_v4)) rc.assert_no_error() rc = calicoctl("delete felixconfig %s" % name(felixconfig_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete globalnetworkpolicy %s" % name(globalnetworkpolicy_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete globalnetworkset %s" % name(globalnetworkset_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete hostendpoint %s" % name(hostendpoint_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete networkpolicy %s" % name(networkpolicy_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete networkset %s" % name(networkset_name1_rev1)) rc.assert_no_error() rc = calicoctl("delete node %s" % name(node_name4_rev1)) rc.assert_no_error() rc = calicoctl("delete node %s" % name(node_name5_rev1)) rc.assert_no_error() # Attempt and fail to import the data into an etcd datastore rc = calicoctl("datastore migrate import -f test-migration") rc.assert_error(text=NOT_KUBERNETES) # Import the data rc = calicoctl("datastore migrate import -f test-migration", kdd=True) rc.assert_error(text=NO_IPAM) # Check that all the resources were imported properly rc = calicoctl("get ippool %s -o yaml" % name(ippool_name2_rev1_v6), kdd=True) rc.assert_data(ippool_name2_rev1_v6) rc = calicoctl("get bgpconfig %s -o yaml" % name(bgpconfig_name1_rev1), kdd=True) rc.assert_data(bgpconfig_name1_rev1) rc = calicoctl("get bgppeer %s -o yaml" % name(bgppeer_name1_rev1_v4), kdd=True) rc.assert_data(bgppeer_name1_rev1_v4) rc = calicoctl("get felixconfig %s -o yaml" % name(felixconfig_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("get globalnetworkpolicy %s -o yaml" % name(globalnetworkpolicy_name1_rev1), kdd=True) rc.assert_data(globalnetworkpolicy_name1_rev1) rc = calicoctl("get globalnetworkset %s -o yaml" % name(globalnetworkset_name1_rev1), kdd=True) rc.assert_data(globalnetworkset_name1_rev1) rc = calicoctl("get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev1), kdd=True) rc.assert_data(hostendpoint_name1_rev1) rc = calicoctl("get networkpolicy %s -o yaml" % name(networkpolicy_name1_rev1), kdd=True) rc.assert_data(networkpolicy_name1_rev1) rc = calicoctl("get networkset %s -o yaml" % name(networkset_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("get node %s -o yaml" % name(node_name4_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("get node %s -o yaml" % name(node_name5_rev1), kdd=True) rc.assert_error(text=NOT_FOUND) rc = calicoctl("get clusterinfo %s -o yaml" % name(clusterinfo_name1_rev1), kdd=True) rc.assert_no_error() # Unlock the datastore rc = calicoctl("datastore migrate unlock", kdd=True) rc.assert_no_error() # Clean up rc = calicoctl("delete ippool %s" % name(ippool_name2_rev1_v6), kdd=True) rc.assert_no_error() rc = calicoctl("delete bgpconfig %s" % name(bgpconfig_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("delete bgppeer %s" % name(bgppeer_name1_rev1_v4), kdd=True) rc.assert_no_error() rc = calicoctl("delete felixconfig %s" % name(felixconfig_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("delete globalnetworkpolicy %s" % name(globalnetworkpolicy_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("delete globalnetworkset %s" % name(globalnetworkset_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("delete hostendpoint %s" % name(hostendpoint_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("delete networkpolicy %s" % name(networkpolicy_name1_rev1), kdd=True) rc.assert_no_error() rc = calicoctl("delete networkset %s" % name(networkset_name1_rev1), kdd=True) rc.assert_no_error()
def test_conversion(self, testname, no_prompt): """ Test successful conversion of each resource, dry-run and start file validation. etcdv2 Ready and etcdv3 datastoreReady flag validation. Correctly converted data validated with calicoctlv3 get compared to calicoctl convert manifest output. """ prompt_resp = None if no_prompt else "yes" testdata = data[testname] report1 = "convertednames" calicoctlv2("create", data=testdata) logger.debug("INFO: dump of etcdv2:") dump_etcdv2() rcu = calicoupgrade("dry-run") logger.debug("INFO: calico-upgrade dry-run should return 0.") rcu.assert_no_error() ready_output = get_value_etcdv2("/calico/v1/Ready") assert ready_output == "true" dr_report1 = _get_readlines(report1) logger.debug("INFO: calico-upgrade dry-run %s output:\n%s" % (report1, dr_report1)) rcu = calicoupgrade("start", prompt_resp) logger.debug("INFO: calico-upgrade start should return 0.") rcu.assert_no_error() ready_output = get_value_etcdv2("/calico/v1/Ready") assert ready_output == "false" datastore_ready_rc = _get_ready_etcdv3() assert datastore_ready_rc is False st_report1 = _get_readlines(report1) logger.debug("INFO: calico-upgrade start %s output:\n%s" % (report1, st_report1)) assert dr_report1 == st_report1, \ "INFO: calico-upgrade dry-run and start %s files are not equal" % report1 rcc = calicoctl("convert", data=testdata) rcc.assert_no_error() parsed_output = yaml.safe_load(rcc.output) converted_data = clean_calico_data(parsed_output) logger.debug("INFO: converted data to v3\n%s" % converted_data) original_resource = rcc rcc = calicoctl("get %s %s -o yaml" % (converted_data['kind'], name(converted_data))) logger.debug( "INFO: calicoctl (v3) get - after calico-upgrade start: \n%s" % rcc.output) # Comparison here needs to be against cleaned versions of data to remove Creation Timestamp logger.debug("Comparing 'get'ted output with original converted yaml") cleaned_output = yaml.safe_dump( clean_calico_data(yaml.safe_load(rcc.output), extra_keys_to_remove=[ 'projectcalico.org/orchestrator', 'namespace' ])) original_resource.assert_data(cleaned_output) rcu = calicoupgrade("complete", prompt_resp) logger.debug("INFO: calico-upgrade complete should return 0.") rcu.assert_no_error() ready_output = get_value_etcdv2("/calico/v1/Ready") assert ready_output == "false" datastore_ready_rc = _get_ready_etcdv3() assert datastore_ready_rc is True