Le fichier NCP YAML inclut le ConfugMap nsx-node-agent-config. Vous pouvez mettre à jour les options de votre environnement. Vous trouverez ci-dessous le ConfigMap nsx-node-agent-config de ncp-ubuntu-policy.yaml.

apiVersion: v1
kind: ConfigMap
metadata:
  name: nsx-node-agent-config
  namespace: nsx-system
  labels:
    version: v1
data:
  ncp.ini: |
    [DEFAULT]

    # If set to true, the logging level will be set to DEBUG instead of the
    # default INFO level.
    #debug = False

    # If set to true, use syslog for logging.
    #use_syslog = False

    # The base directory used for relative log_file paths.
    #log_dir = <None>

    # Name of log file to send logging output to.
    #log_file = <None>

    # max MB for each compressed file. Defaults to 100 MB.
    #log_rotation_file_max_mb = 100

    # max MB for each compressed file for API logs.Defaults to 10 MB.
    #api_log_rotation_file_max_mb = 10

    # Total number of compressed backup files to store. Defaults to 5.
    #log_rotation_backup_count = 5

    # Total number of compressed backup files to store API logs. Defaults to 5.
    #api_log_rotation_backup_count = 5

    # Log level for the root logger. If debug=True, the default root logger
    # level will be DEBUG regardless of the value of this option. If this
    # option is unset, the default root logger level will be either DEBUG or
    # INFO according to the debug option value
    # Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL
    #loglevel = <None>

    [coe]

    # Container orchestrator adaptor to plug in.
    #adaptor = kubernetes

    # Specify cluster for adaptor.
    #cluster = k8scluster

    # Log level for NCP modules (controllers, services, etc.). Ignored if debug
    # is True
    # Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL
    #loglevel = <None>

    # Log level for NSX API client operations. Ignored if debug is True
    # Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL
    #nsxlib_loglevel = <None>

    # Enable SNAT for all projects in this cluster. Modification of topologies
    # for existing Namespaces is not supported if this option is reset.
    #enable_snat = True

    # The time in seconds for NCP/nsx_node_agent to recover the connection to
    # NSX manager/container orchestrator adaptor/Hyperbus before exiting. If
    # the value is 0, NCP/nsx_node_agent won't exit automatically when the
    # connection check fails
    #connect_retry_timeout = 0

    # Enable system health status report for SHA
    #enable_sha = True

    [k8s]

    # Kubernetes API server IP address.
    #apiserver_host_ip = <None>

    # Kubernetes API server port.
    #apiserver_host_port = <None>

    # Full path of the Token file to use for authenticating with the k8s API
    # server.
    client_token_file = /var/run/secrets/kubernetes.io/serviceaccount/token

    # Specify a CA bundle file to use in verifying the k8s API server
    # certificate.
    ca_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

    # Specify whether ingress controllers are expected to be deployed in
    # hostnework mode or as regular pods externally accessed via NAT
    # Choices: hostnetwork nat
    #ingress_mode = hostnetwork

    # Log level for the kubernetes adaptor. Ignored if debug is True
    # Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL
    #loglevel = <None>

    # Specifying whether the TCP connections between the LoadBalancer service
    # and backend could be reused by multiple client requests.
    #lb_connection_multiplexing_enabled = False

    # Specifying the maximum number of multiplexing connections.
    #lb_connection_multiplexing_number = 6

    # User specified IP address for HTTP and HTTPS ingresses
    #http_and_https_ingress_ip = <None>

    # Set this option to configure the ability to allow a virtual IP that is
    # not in the range of external_ip_pools_lb specified in spec.loadBalancerIP
    # of K8s service of type LoadBalancer to be realized in NSX.When the value
    # is relaxed, any IP specified in spec.loadBalancerIP can be allowed. When
    # the value is strict, only IP within the range of external_ip_pools_lb
    # will be allowed.
    # Choices: relaxed strict
    #lb_ip_allocation = relaxed


    # Set this to True to enable NCP to create LoadBalancer on a Tier-1 for
    # LoadBalancer CRD. This option does not support LB autoscaling.
    #enable_lb_crd = False

    # Option to set the type of baseline cluster policy. ALLOW_CLUSTER creates
    # an explicit baseline policy to allow any pod to communicate any other pod
    # within the cluster. ALLOW_NAMESPACE creates an explicit baseline policy
    # to allow pods within the same namespace to communicate with each other.
    # ALLOW_NAMESPACE_STRICT inherits the behaviors of ALLOW_NAMESPACE, and
    # also restricts service talk to resources outside the cluster. By default,
    # no baseline rule will be created and the cluster will assume the default
    # behavior as specified by the backend.
    # Choices: <None> allow_cluster allow_namespace allow_namespace_strict
    #baseline_policy_type = <None>

    # Set this to True to enable NCP reporting NSX backend error to k8s object
    # using k8s event
    #enable_ncp_event = False

    # Set this to True to enable multus to create multiple interfaces for one
    # pod. Requires policy_nsxapi set to True to take effect. If passthrough
    # interface is used as additional interface, user should deploy the network
    # device plugin to provide device allocation information for NCP. Pod
    # annotations with prefix "k8s.v1.cni.cncf.io" cannot be modified once pod
    # is realized. User defined IP will not be allocated from the Segment
    # IPPool. The "gateway" in NetworkAttachmentDefinition is not used to
    # configure secondary interfaces, as the default gateway of Pod is
    # configured by the primary CNI on the main network interface. User must
    # define IP and/or MAC if no "ipam" is configured. Only available if node
    # type is HOSTVM and not to be leveraged in conjunction with 3rd party CNI
    # plugin
    #enable_multus = False

    # Set this to True to enable NSX restore support (only effective in NSX
    # Policy API mode).
    #enable_restore = False

    # nsx-node-agent will add iptables rules for K8s pod which has hostPort,
    # client packets to hostPort will be SNATed to node IP. We leverage portmap
    # plugin to add iptables DNAT rules for hostPort ingress traffic. This
    # hostPort feature is only supported on K8s Linux node.
    #enable_hostport_snat = False

    # If true, pod ip of statefulset will locate in the ip_range in annotation
    # of statefulset. It only works for policy mode.
    #statefulset_ip_range = False

    # If true, user can set ncp/subnets annotation on namespace to specify the
    # subnets for no-snat namespace. It only works for policy mode.
    #enable_namespace_subnets = False

    [nsx_kube_proxy]

    [nsx_node_agent]

    # The log level of NSX RPC library. Ignored if debug is True
    # Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL
    #nsxrpc_loglevel = ERROR

    # The time in seconds for nsx_node_agent to backoff before re-using an
    # existing cached CIF to serve CNI request. Must be less than
    # config_retry_timeout.
    #config_reuse_backoff_time = 15

    # The OVS uplink OpenFlow port where to apply the NAT rules to.
    #ovs_uplink_port = <None>

    # Set this to True if you want to install and use the NSX-OVS kernel
    # module. If the host OS is supported, it will be installed by nsx-ncp-
    # bootstrap and used by nsx-ovs container in nsx-node-agent pod. Note that
    # you would have to add (uncomment) the volumes and mounts in the nsx-ncp-
    # bootstrap DS and add SYS_MODULE capability in nsx-ovs container spec in
    # nsx-node-agent DS. Failing to do so will result in failure of
    # installation and/or kernel upgrade of NSX-OVS kernelmodule.
    #use_nsx_ovs_kernel_module = False

    # The time in seconds for nsx_node_agent to call OVS command. Please
    # increase the time if OVS is in heavy load to create/delete ports
    #ovs_operation_timeout = 5

    # Set to true to allow the CNI plugin to enable IPv6 container interfaces
    #enable_ipv6 = False

    # Set to True if DHCP is configured on the "ovs_uplink_port". "auto" will
    # try to automatically infer it but it only works on CoreOS. On other types
    # host OS, it defaults to False
    # Choices: True False auto
    #is_dhcp_configured_on_ovs_uplink_port = auto

    # The MTU value for nsx-cni
    #mtu = 1500

    # The waiting time before nsx-node-agent returns response to CNI plugin,
    # there is a potential timing issue between port creation and related
    # firewall config update on Hypervisor host
    #waiting_before_cni_response = 0

    # If this option is True, nsx-ncp-bootstrap pod will install portmap plugin
    # from nsx-ncp image, nsx-ncp-cleanup pod will remove portmap plugin.
    #use_ncp_portmap = False