[DEFAULT] # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, log-date-format). (string value) # Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append # is set. (boolean value) #use_syslog = false # Enable journald for logging. If running in a systemd environment you may wish # to enable journal support. Doing so will use the journal native protocol # which includes structured metadata in addition to log messages.This option is # ignored if log_config_append is set. (boolean value) #use_journal = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Use JSON formatting for logging. This option is ignored if log_config_append # is set. (boolean value) #use_json = false # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) #use_stderr = false # (Optional) Set the 'color' key according to log levels. This option takes # effect only when logging to stderr or stdout is used. This option is ignored # if log_config_append is set. (boolean value) #log_color = false # The amount of time before the log files are rotated. This option is ignored # unless log_rotation_type is set to "interval". (integer value) #log_rotate_interval = 1 # Rotation interval type. The time of the last file change (or the time when # the service was started) is used when scheduling the next rotation. (string # value) # Possible values: # Seconds - # Minutes - # Hours - # Days - # Weekday - # Midnight - #log_rotate_interval_type = days # Maximum number of rotated log files. (integer value) #max_logfile_count = 30 # Log file maximum size in MB. This option is ignored if "log_rotation_type" is # not set to "size". (integer value) #max_logfile_size_mb = 200 # Log rotation type. (string value) # Possible values: # interval - Rotate logs at predefined time intervals. # size - Rotate logs once they reach a predefined size. # none - Do not rotate log files. #log_rotation_type = none # Format string to use for log messages with context. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message # is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. Used by oslo_log.formatters.ContextFormatter # (string value) #logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) #default_log_levels = amqp=WARN,boto=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. (string # value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Interval, number of seconds, of log rate limiting. (integer value) #rate_limit_interval = 0 # Maximum number of logged messages per rate_limit_interval. (integer value) #rate_limit_burst = 0 # Log level name used by rate limiting. Logs with level greater or equal to # rate_limit_except_level are not filtered. An empty string means that all # levels are filtered. (string value) # Possible values: # CRITICAL - # ERROR - # INFO - # WARNING - # DEBUG - # '' - #rate_limit_except_level = CRITICAL # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false # # From oslo.messaging # # Size of executor thread pool when executor is threading or eventlet. (integer # value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call. (integer value) #rpc_response_timeout = 60 # The network address and optional user credentials for connecting to the # messaging backend, in URL format. The expected format is: # # driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query # # Example: rabbit://rabbitmq:password@127.0.0.1:5672// # # For full details on the fields in the URL see the documentation of # oslo_messaging.TransportURL at # https://docs.openstack.org/oslo.messaging/latest/reference/transport.html # (string value) #transport_url = rabbit:// # The default exchange under which topics are scoped. May be overridden by an # exchange name specified in the transport_url option. (string value) #control_exchange = tacker # Add an endpoint to answer to ping calls. Endpoint is named # oslo_rpc_server_ping (boolean value) #rpc_ping_enabled = false # # From oslo.service.service # # DEPRECATED: Enable eventlet backdoor. Acceptable values are 0, , and # :, where 0 results in listening on a random tcp port number; # results in listening on the specified port number (and not enabling # backdoor if that port is in use); and : results in listening on # the smallest unused port number within the specified range of port numbers. # The chosen port is displayed in the service's log file. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'backdoor_port' option is deprecated and will be removed in a # future release. #backdoor_port = # DEPRECATED: Enable eventlet backdoor, using the provided path as a unix # socket that can receive connections. This option is mutually exclusive with # 'backdoor_port' in that only one should be provided. If both are provided # then the existence of this option overrides the usage of that option. Inside # the path {pid} will be replaced with the PID of the current process. (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'backdoor_socket' option is deprecated and will be removed in a # future release. #backdoor_socket = # Enables or disables logging values of all registered options when starting a # service (at DEBUG level). (boolean value) #log_options = true # Specify a timeout after which a gracefully shutdown server will exit. Zero # value means endless wait. (integer value) #graceful_shutdown_timeout = 60 # # From tacker.common.config # # The host IP to bind to (host address value) #bind_host = 0.0.0.0 # The port to bind to (integer value) #bind_port = 9890 # The API paste config file to use (string value) #api_paste_config = api-paste.ini # The path for API extensions (string value) #api_extensions_path = # The service plugins Tacker will use (list value) #service_plugins = nfvo,vnfm # The type of authentication to use (string value) #auth_strategy = keystone # Allow the usage of the bulk API (boolean value) #allow_bulk = true # Allow the usage of the pagination (boolean value) #allow_pagination = false # Allow the usage of the sorting (boolean value) #allow_sorting = false # The maximum number of items returned in a single response, value was # 'infinite' or negative integer means no limit (string value) #pagination_max_limit = -1 # The hostname Tacker is running on (host address value) #host = np05d54c4fd30e4 # Enable to encrypt the credential (boolean value) #use_credential_encryption = false # The type of keymanager to use when the 'use_credential_encryption' option is # True (string value) #keymanager_type = barbican # Dir.path to store fernet_keys (string value) #crypt_key_dir = /etc/tacker/crypt/fernet_keys # Where to store Tacker state files. This directory must be writable by the # agent. (string value) #state_path = /var/lib/tacker # # From tacker.conf # # Seconds between running periodic tasks to cleanup residues of deleted vnf # packages (integer value) #vnf_package_delete_interval = 1800 # Interval time in sec for DB sync between Tacker and Kubernetes VIMs (integer # value) #db_synchronization_interval = 300 # # From tacker.service # # Seconds between running components report states (integer value) #report_interval = 10 # Seconds between running periodic tasks (integer value) #periodic_interval = 40 # Number of separate worker processes for service (integer value) #api_workers = 0 # Range of seconds to randomly delay when starting the periodic task scheduler # to reduce stampeding. (Disable by setting to 0) (integer value) #periodic_fuzzy_delay = 5 # # From tacker.wsgi # # Number of backlog requests to configure the socket with (integer value) #backlog = 4096 # Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not # supported on OS X. (integer value) #tcp_keepidle = 600 # Number of seconds to keep retrying to listen (integer value) #retry_until_window = 30 # Max header line to accommodate large tokens (integer value) #max_header_line = 16384 # Enable SSL on the API server (boolean value) #use_ssl = false # CA certificate file to use to verify connecting clients (string value) #ssl_ca_file = # Certificate file to use when starting the server securely (string value) #ssl_cert_file = # Private key file to use when starting the server securely (string value) #ssl_key_file = [authentication] # # From tacker.auth # # auth_type used for external connection (string value) # Possible values: # BASIC - # OAUTH2_CLIENT_CREDENTIALS - #auth_type = # timeout used for external connection (integer value) #timeout = 20 # token_endpoint used to get the oauth2 token (string value) #token_endpoint = # client_id used to get the oauth2 token (string value) #client_id = # client_password used to get the oauth2 token (string value) #client_password = # user_name used in basic authentication (string value) #user_name = # password used in basic authentication (string value) #password = # verify the certification to get the oauth2 access token by ssl (boolean # value) #verify_oauth2_ssl = true # authentication type (string value) # Possible values: # Bearer - # Basic - #token_type = # URL of the authorization server (string value) #auth_url = [connect_grant] # # From tacker.vnfm.nfvo_client # # grant of base_url (string value) #base_url = # Number of grant retry count (integer value) #retry_num = 2 # Number of grant retry wait (integer value) #retry_wait = 30 # Number of grant connect timeout (integer value) #timeout = 20 [connect_vnf_packages] # # From tacker.vnfm.nfvo_client # # vnf_packages base_url (string value) #base_url = # Get vnf_packages api pipeline (list value) #pipeline = # Number of vnf_packages retry count (integer value) #retry_num = 2 # Number of vnf_packages retry wait (integer value) #retry_wait = 30 # Number of vnf_packages connect timeout (integer value) #timeout = 20 [coordination] # # From tacker.conf # # The backend URL to use for distributed coordination. (string value) #backend_url = file://$state_path [cors] # # From oslo.middleware # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. Format: "://[:]", no trailing # slash. Example: https://horizon.example.com (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = [database] # # From oslo.db # # If True, SQLite uses synchronous mode. (boolean value) #sqlite_synchronous = true # The back end to use for the database. (string value) #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. (string # value) #connection = # The SQLAlchemy connection string to use to connect to the slave database. # (string value) #slave_connection = # The SQLAlchemy asyncio connection string to use to connect to the database. # (string value) #asyncio_connection = # The SQLAlchemy asyncio connection string to use to connect to the slave # database. (string value) #asyncio_slave_connection = # Whether or not to assume a reader context needs to guarantee it can read data # committed by a writer assuming replication lag is present; defaults to True. # When False, a reader context works the same as async_reader and will select # the slave database if present. When using a galera cluster, this can be set # to False only if you set mysql_wsrep_sync_wait to 1 (this will guarantee that # the reader will wait until writesets are committed).Note that this may incur # a performance degradation within the galera cluster. Note also that this # parameter has no effect if you do not set any slave_connection. (boolean # value) #synchronous_reader = true # The SQL mode to be used for MySQL sessions. This option, including the # default, overrides any server-set SQL mode. To use whatever SQL mode is set # by the server configuration, set this to no value. Example: mysql_sql_mode= # (string value) #mysql_sql_mode = TRADITIONAL # For Galera only, configure wsrep_sync_wait causality checks on new # connections. Default is None, meaning don't configure any setting. (integer # value) #mysql_wsrep_sync_wait = # Connections which have been present in the connection pool longer than this # number of seconds will be replaced with a new one the next time they are # checked out from the pool. (integer value) #connection_recycle_time = 3600 # Maximum number of SQL connections to keep open in a pool. Setting a value of # 0 indicates no limit. (integer value) #max_pool_size = 5 # Maximum number of database connection retries during startup. Set to -1 to # specify an infinite retry count. (integer value) #max_retries = 10 # Interval between retries of opening a SQL connection. (integer value) #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer value) #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer # value) # Minimum value: 0 # Maximum value: 100 #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer value) #pool_timeout = # Enable the experimental use of database reconnect on connection lost. # (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database operation up to # db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries of a # database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before error is # raised. Set to -1 to specify an infinite retry count. (integer value) #db_max_retries = 20 # Optional URL parameters to append onto the connection URL at connect time; # specify as param1=value1¶m2=value2&... (string value) #connection_parameters = [ext_oauth2_auth] # # From tacker.common.ext_oauth2_auth # # Set True to use external Oauth2.0 auth server. (boolean value) #use_ext_oauth2_auth = false # The endpoint for access token API. (string value) #token_endpoint = # The scope that the access token can access. (string value) #scope = # Required if identity server requires client certificate. (string value) #certfile = # Required if identity server requires client private key. (string value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs connections. # Defaults to system CAs. (string value) #cafile = # Verify HTTPS connections. (boolean value) #insecure = false # Request timeout value for communicating with Identity API server. (integer # value) #http_connect_timeout = # The Audience should be the URL of the Authorization Server's Token Endpoint. # The Authorization Server will verify that it is an intended audience for the # token. (string value) #audience = # The auth_method must use the authentication method specified by the # Authorization Server. (string value) # Possible values: # client_secret_basic - # client_secret_post - # tls_client_auth - # private_key_jwt - # client_secret_jwt - #auth_method = client_secret_basic # The OAuth 2.0 Client Identifier valid at the Authorization Server. (string # value) #client_id = # The OAuth 2.0 client secret. When the auth_method is client_secret_basic, # client_secret_post, or client_secret_jwt, the value is used, and otherwise # the value is ignored. (string value) #client_secret = # The jwt_key_file must use the certificate key file which has been registered # with the Authorization Server. When the auth_method is private_key_jwt, the # value is used, and otherwise the value is ignored. (string value) #jwt_key_file = # The jwt_algorithm must use the algorithm specified by the Authorization # Server. When the auth_method is client_secret_jwt, this value is often set to # HS256,when the auth_method is private_key_jwt, the value is often set to # RS256, and otherwise the value is ignored. (string value) #jwt_algorithm = # This value is used to calculate the expiration time. If after the expiration # time, the access token cannot be accepted. When the auth_method is # client_secret_jwt or private_key_jwt, the value is used, and otherwise the # value is ignored. (integer value) #jwt_bearer_time_out = 3600 [glance_store] # # From glance.store # # DEPRECATED: # List of enabled Glance stores. # # Register the storage backends to use for storing disk images # as a comma separated list. The default stores enabled for # storing disk images with Glance are ``file`` and ``http``. # # Possible values: # * A comma separated list that could include: # * file # * http # * swift # * rbd # * cinder # * vmware # * s3 # # Related Options: # * default_store # # (list value) # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``enabled_backends`` which helps to configure multiple backend stores # of different schemes. # # This option is scheduled for removal in the U development # cycle. #stores = file,http # DEPRECATED: # The default scheme to use for storing images. # # Provide a string value representing the default scheme to use for # storing images. If not set, Glance uses ``file`` as the default # scheme to store images with the ``file`` store. # # NOTE: The value given for this configuration option must be a valid # scheme for a store registered with the ``stores`` configuration # option. # # Possible values: # * file # * filesystem # * http # * https # * swift # * swift+http # * swift+https # * swift+config # * rbd # * cinder # * vsphere # * s3 # # Related Options: # * stores # # (string value) # Possible values: # file - # filesystem - # http - # https - # swift - # swift+http - # swift+https - # swift+config - # rbd - # cinder - # vsphere - # s3 - # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``default_backend`` which acts similar to ``default_store`` config # option. # # This option is scheduled for removal in the U development # cycle. #default_store = file # # Directory to which the filesystem backend store writes images. # # Upon start up, Glance creates the directory if it doesn't already # exist and verifies write access to the user under which # ``glance-api`` runs. If the write access isn't available, a # ``BadStoreConfiguration`` exception is raised and the filesystem # store may not be available for adding new images. # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * A valid path to a directory # # Related options: # * ``filesystem_store_datadirs`` # * ``filesystem_store_file_perm`` # # (string value) #filesystem_store_datadir = /var/lib/glance/images # DEPRECATED: # List of directories and their priorities to which the filesystem # backend store writes images. # # The filesystem store can be configured to store images in multiple # directories as opposed to using a single directory specified by the # ``filesystem_store_datadir`` configuration option. When using # multiple directories, each directory can be given an optional # priority to specify the preference order in which they should # be used. Priority is an integer that is concatenated to the # directory path with a colon where a higher value indicates higher # priority. When two directories have the same priority, the directory # with most free space is used. When no priority is specified, it # defaults to zero. # # More information on configuring filesystem store with multiple store # directories can be found at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * List of strings of the following form: # * ``:`` # # Related options: # * ``filesystem_store_datadir`` # * ``filesystem_store_file_perm`` # # (multi valued) # This option is deprecated for removal since Flamingo. # Its value may be silently ignored in the future. # Reason: # Users willing to use multiple data directories should configure multiple # filesystem stores instead of using filesystem_store_datadirs. # # This option is scheduled for removal in the H development cycle. #filesystem_store_datadirs = # # Filesystem store metadata file. # # The path to a file which contains the metadata to be returned with any # location # associated with the filesystem store. Once this option is set, it is used for # new images created afterward only - previously existing images are not # affected. # # The file must contain a valid JSON object. The object should contain the keys # ``id`` and ``mountpoint``. The value for both keys should be a string. # # Possible values: # * A valid path to the store metadata file # # Related options: # * None # # (string value) #filesystem_store_metadata_file = # # File access permissions for the image files. # # Set the intended file access permissions for image data. This provides # a way to enable other services, e.g. Nova, to consume images directly # from the filesystem store. The users running the services that are # intended to be given access to could be made a member of the group # that owns the files created. Assigning a value less then or equal to # zero for this configuration option signifies that no changes be made # to the default permissions. This value will be decoded as an octal # digit. # # For more information, please refer the documentation at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # Possible values: # * A valid file access permission # * Zero # * Any negative integer # # Related options: # * None # # (integer value) #filesystem_store_file_perm = 0 # # Chunk size, in bytes. # # The chunk size used when reading or writing image files. Raising this value # may improve the throughput but it may also slightly increase the memory usage # when handling a large number of requests. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #filesystem_store_chunk_size = 65536 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the filesystem, the holes who can appear will automatically # be interpreted by the filesystem as null bytes, and do not really consume # your storage. # Enabling this feature will also speed up image upload and save network # traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #filesystem_thin_provisioning = false # # Timeout for all filesystem operations (seconds). # # Set to 0 to disable timeout protection (blocking IO, normal behavior). # Set > 0 to enable timeout protection with thread pool. # Recommended: 30 seconds for network storage, higher for slow networks. # # When timeout protection is enabled, filesystem operations like delete(), # get_size(), and _get_capacity_info() will be wrapped with timeout # protection. If an operation exceeds the timeout, a TimeoutError will be # raised. # # Possible values: # * 0 (disabled, blocking IO) # * Any positive integer (timeout in seconds) # # Related options: # * filesystem_store_thread_pool_size # * filesystem_store_threadpool_threshold # # (integer value) # Minimum value: 0 #filesystem_store_timeout = 0 # # Thread pool size for timeout-protected operations. # # Only meaningful when filesystem_store_timeout > 0. # Ignored when filesystem_store_timeout = 0 (no thread pool is created). # Each store instance gets its own pool to avoid starvation. # Set based on expected concurrency and WSGI worker count. # # Possible values: # * Any positive integer # # Related options: # * filesystem_store_timeout # * filesystem_store_threadpool_threshold # # (integer value) # Minimum value: 1 #filesystem_store_thread_pool_size = 10 # # Thread pool usage threshold for warning logs (percentage). # # Only meaningful when filesystem_store_timeout > 0. # Ignored when filesystem_store_timeout = 0 (no thread pool is created). # When thread pool usage exceeds this threshold, a warning is logged # indicating that the pool is getting busy and may start blocking. # # Possible values: # * 0-100 (percentage) # # Related options: # * filesystem_store_timeout # * filesystem_store_thread_pool_size # # (integer value) # Minimum value: 0 # Maximum value: 100 #filesystem_store_threadpool_threshold = 75 # # Path to the CA bundle file. # # This configuration option enables the operator to use a custom # Certificate Authority file to verify the remote server certificate. If # this option is set, the ``https_insecure`` option will be ignored and # the CA file specified will be used to authenticate the server # certificate and establish a secure connection to the server. # # Possible values: # * A valid path to a CA file # # Related options: # * https_insecure # # (string value) #https_ca_certificates_file = # # Set verification of the remote server certificate. # # This configuration option takes in a boolean value to determine # whether or not to verify the remote server certificate. If set to # True, the remote server certificate is not verified. If the option is # set to False, then the default CA truststore is used for verification. # # This option is ignored if ``https_ca_certificates_file`` is set. # The remote server certificate will then be verified using the file # specified using the ``https_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * https_ca_certificates_file # # (boolean value) #https_insecure = true # # The http/https proxy information to be used to connect to the remote # server. # # This configuration option specifies the http/https proxy information # that should be used to connect to the remote server. The proxy # information should be a key value pair of the scheme and proxy, for # example, http:10.0.0.1:3128. You can also specify proxies for multiple # schemes by separating the key value pairs with a comma, for example, # http:10.0.0.1:3128, https:10.0.0.1:1080. # # Possible values: # * A comma separated list of scheme:proxy pairs as described above # # Related options: # * None # # (dict value) #http_proxy_information = # # Size, in megabytes, to chunk RADOS images into. # # Provide an integer value representing the size in megabytes to chunk # Glance images into. The default chunk size is 8 megabytes. For optimal # performance, the value should be a power of two. # # When Ceph's RBD object storage system is used as the storage backend # for storing Glance images, the images are chunked into objects of the # size set using this option. These chunked objects are then stored # across the distributed block data store to use for Glance. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #rbd_store_chunk_size = 8 # # RADOS pool in which images are stored. # # When RBD is used as the storage backend for storing Glance images, the # images are stored by means of logical grouping of the objects (chunks # of images) into a ``pool``. Each pool is defined with the number of # placement groups it can contain. The default pool that is used is # 'images'. # # More information on the RBD storage backend can be found here: # http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ # # Possible Values: # * A valid pool name # # Related options: # * None # # (string value) #rbd_store_pool = images # # RADOS user to authenticate as. # # This configuration option takes in the RADOS user to authenticate as. # This is only needed when RADOS authentication is enabled and is # applicable only if the user is using Cephx authentication. If the # value for this option is not set by the user or is set to None, a # default value will be chosen, which will be based on the client. # section in rbd_store_ceph_conf. # # Possible Values: # * A valid RADOS user # # Related options: # * rbd_store_ceph_conf # # (string value) #rbd_store_user = # # Ceph configuration file path. # # This configuration option specifies the path to the Ceph configuration # file to be used. If the value for this option is not set by the user # or is set to the empty string, librados will read the standard ceph.conf # file by searching the default Ceph configuration file locations in # sequential order. See the Ceph documentation for details. # # NOTE: If using Cephx authentication, this file should include a reference # to the right keyring in a client. section # # NOTE 2: If you leave this option empty (the default), the actual Ceph # configuration file used may change depending on what version of librados # is being used. If it is important for you to know exactly which # configuration # file is in effect, you may specify that file here using this option. # # Possible Values: # * A valid path to a configuration file # # Related options: # * rbd_store_user # # (string value) #rbd_store_ceph_conf = # # Timeout value for connecting to Ceph cluster. # # This configuration option takes in the timeout value in seconds used # when connecting to the Ceph cluster i.e. it sets the time to wait for # glance-api before closing the connection. This prevents glance-api # hangups during the connection to RBD. If the value for this option # is set to less than 0, no timeout is set and the default librados value # is used. # # Possible Values: # * Any integer value # # Related options: # * None # # (integer value) #rados_connect_timeout = -1 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the RBD backend, the holes who can appear will automatically # be interpreted by Ceph as null bytes, and do not really consume your storage. # Enabling this feature will also speed up image upload and save network # traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #rbd_thin_provisioning = false # # The host where the S3 server is listening. # # This configuration option sets the host of the S3 or S3 compatible storage # Server. This option is required when using the S3 storage backend. # The host can contain a DNS name (e.g. s3.amazonaws.com, my-object- # storage.com) # or an IP address (127.0.0.1). # # Possible values: # * A valid DNS name # * A valid IPv4 address # # Related Options: # * s3_store_access_key # * s3_store_secret_key # # (string value) #s3_store_host = # # The S3 region name. # # This parameter will set the region_name used by boto. # If this parameter is not set, we we will try to compute it from the # s3_store_host. # # Possible values: # * A valid region name # # Related Options: # * s3_store_host # # (string value) #s3_store_region_name = # # The S3 query token access key. # # This configuration option takes the access key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is the access key for a user with appropriate # privileges # # Related Options: # * s3_store_host # * s3_store_secret_key # # (string value) #s3_store_access_key = # # The S3 query token secret key. # # This configuration option takes the secret key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is a secret key corresponding to the access key # specified using the ``s3_store_host`` option # # Related Options: # * s3_store_host # * s3_store_access_key # # (string value) #s3_store_secret_key = # # The S3 bucket to be used to store the Glance data. # # This configuration option specifies where the glance images will be stored # in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be # created automatically even if the bucket does not exist. # # Possible values: # * Any string value # # Related Options: # * s3_store_create_bucket_on_put # * s3_store_bucket_url_format # # (string value) #s3_store_bucket = # # Determine whether S3 should create a new bucket. # # This configuration option takes boolean value to indicate whether Glance # should # create a new bucket to S3 if it does not exist. # # Possible values: # * Any Boolean value # # Related Options: # * None # # (boolean value) #s3_store_create_bucket_on_put = false # # The S3 calling format used to determine the object. # # This configuration option takes access model that is used to specify the # address of an object in an S3 bucket. # # NOTE: # In ``path``-style, the endpoint for the object looks like # 'https://s3.amazonaws.com/bucket/example.img'. # And in ``virtual``-style, the endpoint for the object looks like # 'https://bucket.s3.amazonaws.com/example.img'. # If you do not follow the DNS naming convention in the bucket name, you can # get objects in the path style, but not in the virtual style. # # Possible values: # * Any string value of ``auto``, ``virtual``, or ``path`` # # Related Options: # * s3_store_bucket # # (string value) #s3_store_bucket_url_format = auto # # What size, in MB, should S3 start chunking image files and do a multipart # upload in S3. # # This configuration option takes a threshold in MB to determine whether to # upload the image to S3 as is or to split it (Multipart Upload). # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value or zero # # Related Options: # * s3_store_large_object_chunk_size # * s3_store_thread_pools # # (integer value) # Minimum value: 0 #s3_store_large_object_size = 100 # # What multipart upload part size, in MB, should S3 use when uploading parts. # # This configuration option takes the image split size in MB for Multipart # Upload. # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value (must be greater than or equal to 5M) # # Related Options: # * s3_store_large_object_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_chunk_size = 10 # # The number of thread pools to perform a multipart upload in S3. # # This configuration option takes the number of thread pools when performing a # Multipart Upload. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_size # * s3_store_large_object_chunk_size # # (integer value) #s3_store_thread_pools = 10 # # The path to the CA cert bundle to use. The default value (an empty string) # forces the use of the default CA cert bundle used by botocore. # # Possible values: # * A path to the CA cert bundle to use # * An empty string to use the default CA cert bundle used by botocore # # (string value) #s3_store_cacert = # # Turn on S3 data integrity protections by enabling request checksum # calculation # and response checksum validation. Setting this to False is the same as # setting # the AWS_REQUEST_CHECKSUM_CALCULATION and AWS_RESPONSE_CHECKSUM_VALIDATION # environment variables to "when_required". Setting this to True gives the user # fine-grained control over the data integrity protections by using the # s3_store_request_checksum_calculation and # s3_store_response_checksum_validation # options. # # This defaults to False as to not change the default behaviour in # glance_store, # which prevents upload failures when using S3-compatible storage backends that # do not implement S3 data integrity protection. # # Related Options: # * s3_store_request_checksum_calculation # * s3_store_response_checksum_validation # (boolean value) #s3_store_enable_data_integrity_protection = false # # Controls when checksums are calculated for S3 upload requests. # # This configuration option provides fine-grained control over request checksum # calculation behavior. It maps directly to the botocore Config parameter # request_checksum_calculation. # # Possible values: # * when_required - Only calculate checksums when required by the operation # * when_supported - Calculate checksums when supported by the operation # # This option is only used when s3_store_enable_data_integrity_protection is # set to True. When s3_store_enable_data_integrity_protection is False, both # request and response checksums use 'when_required' regardless of this # setting. # # Related Options: # * s3_store_response_checksum_validation # * s3_store_enable_data_integrity_protection # # (string value) # Possible values: # when_required - # when_supported - #s3_store_request_checksum_calculation = when_required # # Controls when checksums are validated for S3 download responses. # # This configuration option provides fine-grained control over response # checksum # validation behavior. It maps directly to the botocore Config parameter # response_checksum_validation. # # Possible values: # * when_required - Only validate checksums when required by the operation # * when_supported - Validate checksums when supported by the operation # # This option is only used when s3_store_enable_data_integrity_protection is # set to True. When s3_store_enable_data_integrity_protection is False, both # request and response checksums use 'when_required' regardless of this # setting. # # Related Options: # * s3_store_request_checksum_calculation # * s3_store_enable_data_integrity_protection # # (string value) # Possible values: # when_required - # when_supported - #s3_store_response_checksum_validation = when_required # # Set verification of the server certificate. # # This boolean determines whether or not to verify the server # certificate. If this option is set to True, swiftclient won't check # for a valid SSL certificate when authenticating. If the option is set # to False, then the default CA truststore is used for verification. # # Possible values: # * True # * False # # Related options: # * swift_store_cacert # # (boolean value) #swift_store_auth_insecure = false # # Path to the CA bundle file. # # This configuration option enables the operator to specify the path to # a custom Certificate Authority file for SSL verification when # connecting to Swift. # # Possible values: # * A valid path to a CA file # # Related options: # * swift_store_auth_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_cacert = /etc/ssl/certs/ca-certificates.crt # # The region of Swift endpoint to use by Glance. # # Provide a string value representing a Swift region where Glance # can connect to for image storage. By default, there is no region # set. # # When Glance uses Swift as the storage backend to store images # for a specific tenant that has multiple endpoints, setting of a # Swift region with ``swift_store_region`` allows Glance to connect # to Swift in the specified region as opposed to a single region # connectivity. # # This option can be configured for both single-tenant and # multi-tenant storage. # # NOTE: Setting the region with ``swift_store_region`` is # tenant-specific and is necessary ``only if`` the tenant has # multiple endpoints across different regions. # # Possible values: # * A string value representing a valid Swift region. # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_region = RegionTwo # # The URL endpoint to use for Swift backend storage. # # Provide a string value representing the URL endpoint to use for # storing Glance images in Swift store. By default, an endpoint # is not set and the storage URL returned by ``auth`` is used. # Setting an endpoint with ``swift_store_endpoint`` overrides the # storage URL and is used for Glance image storage. # # NOTE: The URL should include the path up to, but excluding the # container. The location of an object is obtained by appending # the container and object to the configured URL. # # Possible values: # * String value representing a valid URL path up to a Swift container # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name # # Endpoint Type of Swift service. # # This string value indicates the endpoint type to use to fetch the # Swift endpoint. The endpoint type determines the actions the user will # be allowed to perform, for instance, reading and writing to the Store. # # Possible values: # * publicURL # * adminURL # * internalURL # # Related options: # * swift_store_endpoint # # (string value) # Possible values: # publicURL - # adminURL - # internalURL - #swift_store_endpoint_type = publicURL # # Type of Swift service to use. # # Provide a string value representing the service type to use for # storing images while using Swift backend storage. The default # service type is set to ``object-store``. # # Possible values: # * A string representing a valid service type for Swift storage. # # Related Options: # * None # # (string value) #swift_store_service_type = object-store # # Name of single container to store images/name prefix for multiple containers # # When a single container is being used to store images, this configuration # option indicates the container within the Glance account to be used for # storing all images. When multiple containers are used to store images, this # will be the name prefix for all containers. Usage of single/multiple # containers can be controlled using the configuration option # ``swift_store_multiple_containers_seed``. # # When using multiple containers, the containers will be named after the value # set for this configuration option with the first N chars of the image UUID # as the suffix delimited by an underscore (where N is specified by # ``swift_store_multiple_containers_seed``). # # Example: if the seed is set to 3 and swift_store_container = ``glance``, then # an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed # in # the container ``glance_fda``. All dashes in the UUID are included when # creating the container name but do not count toward the character limit, so # when N=10 the container name would be ``glance_fdae39a1-ba.`` # # Possible values: # * If using single container, this configuration option can be any string # that is a valid swift container name in Glance's Swift account # * If using multiple containers, this configuration option can be any # string as long as it satisfies the container naming rules enforced by # Swift. The value of ``swift_store_multiple_containers_seed`` should be # taken into account as well. # # Related options: # * ``swift_store_multiple_containers_seed`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (string value) #swift_store_container = glance # # The size threshold, in MB, after which Glance will start segmenting image # data. # # Swift has an upper limit on the size of a single uploaded object. By default, # this is 5GB. To upload objects bigger than this limit, objects are segmented # into multiple smaller objects that are tied together with a manifest file. # For more detail, refer to # https://docs.openstack.org/swift/latest/overview_large_objects.html # # This configuration option specifies the size threshold over which the Swift # driver will start segmenting image data into multiple smaller files. # Currently, the Swift driver only supports creating Dynamic Large Objects. # # NOTE: This should be set by taking into account the large object limit # enforced by the Swift cluster in consideration. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by the Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_chunk_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_size = 5120 # # The maximum size, in MB, of the segments when image data is segmented. # # When image data is segmented to upload images that are larger than the limit # enforced by the Swift cluster, image data is broken into segments that are no # bigger than the size specified by this configuration option. # Refer to ``swift_store_large_object_size`` for more detail. # # For example: if ``swift_store_large_object_size`` is 5GB and # ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will # be # segmented into 7 segments where the first six segments will be 1GB in size # and # the seventh segment will be 0.2GB. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_chunk_size = 200 # # Create container, if it doesn't already exist, when uploading image. # # At the time of uploading an image, if the corresponding container doesn't # exist, it will be created provided this configuration option is set to True. # By default, it won't be created. This behavior is applicable for both single # and multiple containers mode. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #swift_store_create_container_on_put = false # # Store images in tenant's Swift account. # # This enables multi-tenant storage mode which causes Glance images to be # stored # in tenant specific Swift accounts. If this is disabled, Glance stores all # images in its own account. More details multi-tenant store can be found at # https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage # # NOTE: If using multi-tenant swift store, please make sure # that you do not set a swift configuration file with the # 'swift_store_config_file' option. # # Possible values: # * True # * False # # Related options: # * swift_store_config_file # # (boolean value) #swift_store_multi_tenant = false # # Seed indicating the number of containers to use for storing images. # # When using a single-tenant store, images can be stored in one or more than # one # containers. When set to 0, all images will be stored in one single container. # When set to an integer value between 1 and 32, multiple containers will be # used to store images. This configuration option will determine how many # containers are created. The total number of containers that will be used is # equal to 16^N, so if this config option is set to 2, then 16^2=256 containers # will be used to store images. # # Please refer to ``swift_store_container`` for more detail on the naming # convention. More detail about using multiple containers can be found at # https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- # multiple-containers.html # # NOTE: This is used only when swift_store_multi_tenant is disabled. # # Possible values: # * A non-negative integer less than or equal to 32 # # Related options: # * ``swift_store_container`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (integer value) # Minimum value: 0 # Maximum value: 32 #swift_store_multiple_containers_seed = 0 # # List of tenants that will be granted admin access. # # This is a list of tenants that will be granted read/write access on # all Swift containers created by Glance in multi-tenant mode. The # default value is an empty list. # # Possible values: # * A comma separated list of strings representing UUIDs of Keystone # projects/tenants # # Related options: # * None # # (list value) #swift_store_admin_tenants = # # SSL layer compression for HTTPS Swift requests. # # Provide a boolean value to determine whether or not to compress # HTTPS Swift requests for images at the SSL layer. By default, # compression is enabled. # # When using Swift as the backend store for Glance image storage, # SSL layer compression of HTTPS Swift requests can be set using # this option. If set to False, SSL layer compression of HTTPS # Swift requests is disabled. Disabling this option may improve # performance for images which are already in a compressed format, # for example, qcow2. # # Possible values: # * True # * False # # Related Options: # * None # # (boolean value) #swift_store_ssl_compression = true # # The number of times a Swift download will be retried before the # request fails. # # Provide an integer value representing the number of times an image # download must be retried before erroring out. The default value is # zero (no retry on a failed image download). When set to a positive # integer value, ``swift_store_retry_get_count`` ensures that the # download is attempted this many more times upon a download failure # before sending an error message. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_retry_get_count = 0 # # Time in seconds defining the size of the window in which a new # token may be requested before the current token is due to expire. # # Typically, the Swift storage driver fetches a new token upon the # expiration of the current token to ensure continued access to # Swift. However, some Swift transactions (like uploading image # segments) may not recover well if the token expires on the fly. # # Hence, by fetching a new token before the current token expiration, # we make sure that the token does not expire or is close to expiry # before a transaction is attempted. By default, the Swift storage # driver requests for a new token 60 seconds or less before the # current token expiration. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_expire_soon_interval = 60 # # Use trusts for multi-tenant Swift store. # # This option instructs the Swift store to create a trust for each # add/get request when the multi-tenant store is in use. Using trusts # allows the Swift store to avoid problems that can be caused by an # authentication token expiring during the upload or download of data. # # By default, ``swift_store_use_trusts`` is set to ``True``(use of # trusts is enabled). If set to ``False``, a user token is used for # the Swift connection instead, eliminating the overhead of trust # creation. # # NOTE: This option is considered only when # ``swift_store_multi_tenant`` is set to ``True`` # # Possible values: # * True # * False # # Related options: # * swift_store_multi_tenant # # (boolean value) #swift_store_use_trusts = true # # Buffer image segments before upload to Swift. # # Provide a boolean value to indicate whether or not Glance should # buffer image data to disk while uploading to swift. This enables # Glance to resume uploads on error. # # NOTES: # When enabling this option, one should take great care as this # increases disk usage on the API node. Be aware that depending # upon how the file system is configured, the disk space used # for buffering may decrease the actual disk space available for # the glance image cache. Disk utilization will cap according to # the following equation: # (``swift_store_large_object_chunk_size`` * ``workers`` * 1000) # # Possible values: # * True # * False # # Related options: # * swift_upload_buffer_dir # # (boolean value) #swift_buffer_on_upload = false # # Reference to default Swift account/backing store parameters. # # Provide a string value representing a reference to the default set # of parameters required for using swift account/backing store for # image storage. The default reference value for this configuration # option is 'ref1'. This configuration option dereferences the # parameters and facilitates image storage in Swift storage backend # every time a new image is added. # # Possible values: # * A valid string value # # Related options: # * None # # (string value) #default_swift_reference = ref1 # DEPRECATED: The authentication version to be used. Currently The only valid # version is 3. (string value) # Possible values: # 3 - # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # This option is kept for backword-compatibility reasons but is no longer # required, because only the single version (3) is supported now. #swift_store_auth_version = 3 # DEPRECATED: The address where the Swift authentication service is listening. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_address' in the Swift back-end configuration file is # used instead. #swift_store_auth_address = # DEPRECATED: The user to authenticate against the Swift authentication # service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'user' in the Swift back-end configuration file is set instead. #swift_store_user = # DEPRECATED: Auth key for the user authenticating against the Swift # authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'key' in the Swift back-end configuration file is used # to set the authentication key instead. #swift_store_key = # # Application credential ID for authenticating against Swift. # # This option specifies the application credential ID to use for # authenticating with the Swift backend. When set along with # swift_store_application_credential_secret, the Swift driver will # use V3ApplicationCredential authentication instead of password # authentication. # # This enables Zero Downtime Password Rotation (ZDPR) support for # Swift backend operations, as application credentials are not # affected by password rotation. # # If not set, the driver falls back to password authentication # using swift_store_user and swift_store_key. # # Possible values: # * A valid application credential ID string # # Related options: # * swift_store_application_credential_secret # # (string value) #swift_store_application_credential_id = # # Application credential secret for authenticating against Swift. # # This option specifies the application credential secret to use # for authenticating with the Swift backend. When set along with # swift_store_application_credential_id, the Swift driver will # use V3ApplicationCredential authentication instead of password # authentication. # # This enables Zero Downtime Password Rotation (ZDPR) support for # Swift backend operations, as application credentials are not # affected by password rotation. # # If not set, the driver falls back to password authentication # using swift_store_user and swift_store_key. # # Possible values: # * A valid application credential secret string # # Related options: # * swift_store_application_credential_id # # (string value) #swift_store_application_credential_secret = # DEPRECATED: Project name for authenticating with application credentials # against the Swift authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'project_name' in the Swift back-end configuration file is # used instead. #swift_store_project_name = # DEPRECATED: Project ID for authenticating with application credentials # against the Swift authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'project_id' in the Swift back-end configuration file is # used instead. #swift_store_project_id = # # Absolute path to the file containing the swift account(s) # configurations. # # Include a string value representing the path to a configuration # file that has references for each of the configured Swift # account(s)/backing stores. By default, no file path is specified # and customized Swift referencing is disabled. Configuring this # option is highly recommended while using Swift storage backend for # image storage as it avoids storage of credentials in the database. # # NOTE: Please do not configure this option if you have set # ``swift_store_multi_tenant`` to ``True``. # # Possible values: # * String value representing an absolute path on the glance-api # node # # Related options: # * swift_store_multi_tenant # # (string value) #swift_store_config_file = # # Directory to buffer image segments before upload to Swift. # # Provide a string value representing the absolute path to the # directory on the glance node where image segments will be # buffered briefly before they are uploaded to swift. # # NOTES: # * This is required only when the configuration option # ``swift_buffer_on_upload`` is set to True. # * This directory should be provisioned keeping in mind the # ``swift_store_large_object_chunk_size`` and the maximum # number of images that could be uploaded simultaneously by # a given glance node. # # Possible values: # * String value representing an absolute directory path # # Related options: # * swift_buffer_on_upload # * swift_store_large_object_chunk_size # # (string value) #swift_upload_buffer_dir = # # Address of the ESX/ESXi or vCenter Server target system. # # This configuration option sets the address of the ESX/ESXi or vCenter # Server target system. This option is required when using the VMware # storage backend. The address can contain an IP address (127.0.0.1) or # a DNS name (www.my-domain.com). # # Possible Values: # * A valid IPv4 or IPv6 address # * A valid DNS name # # Related options: # * vmware_server_username # * vmware_server_password # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_host = 127.0.0.1 # # Server username. # # This configuration option takes the username for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is the username for a user with appropriate # privileges # # Related options: # * vmware_server_host # * vmware_server_password # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_username = root # # Server password. # # This configuration option takes the password for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is a password corresponding to the username # specified using the "vmware_server_username" option # # Related options: # * vmware_server_host # * vmware_server_username # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_password = vmware # # The number of VMware API retries. # # This configuration option specifies the number of times the VMware # ESX/VC server API must be retried upon connection related issues or # server API call overload. It is not possible to specify 'retry # forever'. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_api_retry_count = 10 # # Interval in seconds used for polling remote tasks invoked on VMware # ESX/VC server. # # This configuration option takes in the sleep time in seconds for polling an # on-going async task as part of the VMWare ESX/VC server API call. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_task_poll_interval = 5 # # The directory where the glance images will be stored in the datastore. # # This configuration option specifies the path to the directory where the # glance images will be stored in the VMware datastore. If this option # is not set, the default directory where the glance images are stored # is openstack_glance. # # Possible Values: # * Any string that is a valid path to a directory # # Related options: # * None # # (string value) #vmware_store_image_dir = /openstack_glance # # Set verification of the ESX/vCenter server certificate. # # This configuration option takes a boolean value to determine # whether or not to verify the ESX/vCenter server certificate. If this # option is set to True, the ESX/vCenter server certificate is not # verified. If this option is set to False, then the default CA # truststore is used for verification. # # This option is ignored if the "vmware_ca_file" option is set. In that # case, the ESX/vCenter server certificate will then be verified using # the file specified using the "vmware_ca_file" option . # # Possible Values: # * True # * False # # Related options: # * vmware_ca_file # # (boolean value) # Deprecated group/name - [glance_store]/vmware_api_insecure #vmware_insecure = false # # Absolute path to the CA bundle file. # # This configuration option enables the operator to use a custom # Cerificate Authority File to verify the ESX/vCenter certificate. # # If this option is set, the "vmware_insecure" option will be ignored # and the CA file specified will be used to authenticate the ESX/vCenter # server certificate and establish a secure connection to the server. # # Possible Values: # * Any string that is a valid absolute path to a CA file # # Related options: # * vmware_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_ca_file = /etc/ssl/certs/ca-certificates.crt # # The datastores where the image can be stored. # # This configuration option specifies the datastores where the image can # be stored in the VMWare store backend. This option may be specified # multiple times for specifying multiple datastores. The datastore name # should be specified after its datacenter path, separated by ":". An # optional weight may be given after the datastore name, separated again # by ":" to specify the priority. Thus, the required format becomes # ::. # # When adding an image, the datastore with highest weight will be # selected, unless there is not enough free space available in cases # where the image size is already known. If no weight is given, it is # assumed to be zero and the directory will be considered for selection # last. If multiple datastores have the same weight, then the one with # the most free space available is selected. # # Possible Values: # * Any string of the format: # :: # # Related options: # * None # # (multi valued) #vmware_datastores = [healthcheck] # # From oslo.middleware # # Show more detailed information as part of the response. Security note: # Enabling this option may expose sensitive details about the service being # monitored. Be sure to verify that it will not violate your security policies. # (boolean value) #detailed = false # Additional backends that can perform health checks and report that # information back as part of a request. (list value) #backends = # A list of network addresses to limit source ip allowed to access healthcheck # information. Any request from ip outside of these network addresses are # ignored. (list value) #allowed_source_ranges = # Ignore requests with proxy headers. (boolean value) #ignore_proxied_requests = false # Check the presence of a file to determine if an application is running on a # port. Used by DisableByFileHealthcheck plugin. (string value) #disable_by_file_path = # Check the presence of a file based on a port to determine if an application # is running on a port. Expects a "port:path" list of strings. Used by # DisableByFilesPortsHealthcheck plugin. (list value) #disable_by_file_paths = # Check the presence of files. Used by EnableByFilesHealthcheck plugin. (list # value) #enable_by_file_paths = [k8s_vim] # # From tacker.nfvo.drivers.vim.kubernetes_driver # # Use barbican to encrypt vim password if True, save vim credentials in local # file system if False (boolean value) #use_barbican = true [key_manager] # # From tacker.keymgr # # The full class name of the key manager API class (string value) #api_class = tacker.keymgr.barbican_key_manager.BarbicanKeyManager # The endpoint for barbican API. (string value) #barbican_endpoint = # The version for barbican API. (string value) #barbican_version = v1 [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete "public" Identity API endpoint. This endpoint should not be an # "admin" endpoint, as it should be accessible by all end users. # Unauthenticated clients are redirected to this endpoint to authenticate. # Although this endpoint should ideally be unversioned, client support in the # wild varies. If you're using a versioned v2 endpoint here, then this should # *not* be the same endpoint the service user utilizes for validating tokens, # because normal end users may not be able to reach that endpoint. (string # value) # Deprecated group/name - [keystone_authtoken]/auth_uri #www_authenticate_uri = # DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not # be an "admin" endpoint, as it should be accessible by all end users. # Unauthenticated clients are redirected to this endpoint to authenticate. # Although this endpoint should ideally be unversioned, client support in the # wild varies. If you're using a versioned v2 endpoint here, then this should # *not* be the same endpoint the service user utilizes for validating tokens, # because normal end users may not be able to reach that endpoint. This option # is deprecated in favor of www_authenticate_uri and will be removed in the S # release. (string value) # This option is deprecated for removal since Queens. # Its value may be silently ignored in the future. # Reason: The auth_uri option is deprecated in favor of www_authenticate_uri # and will be removed in the S release. #auth_uri = # API version of the Identity API endpoint. (string value) #auth_version = # Interface to use for the Identity API endpoint. Valid values are "public", # "internal" (default) or "admin". (string value) #interface = internal # Do not handle authorization requests within the middleware, but delegate the # authorization decision to downstream WSGI components. (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server. (integer # value) #http_connect_timeout = # How many times are we trying to reconnect when communicating with Identity # API Server. (integer value) #http_request_max_retries = 3 # Request environment key where the Swift cache object is stored. When # auth_token middleware is deployed with a Swift cache, use this option to have # the middleware share a caching backend with swift. Otherwise, use the # ``memcached_servers`` option instead. (string value) #cache = # Required if identity server requires client certificate (string value) #certfile = # Required if identity server requires client certificate (string value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs connections. # Defaults to system CAs. (string value) #cafile = # Verify HTTPS connections. (boolean value) #insecure = false # The region in which the identity server can be found. (string value) #region_name = # Optionally specify a list of memcached server(s) to use for caching. If left # undefined, tokens will instead be cached in-process. (list value) # Deprecated group/name - [keystone_authtoken]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the middleware # caches previously-seen tokens for a configurable duration (in seconds). Set # to -1 to disable caching completely. (integer value) #token_cache_time = 300 # (Optional) If defined, indicate whether token data should be authenticated or # authenticated and encrypted. If MAC, token data is authenticated (with HMAC) # in the cache. If ENCRYPT, token data is encrypted and authenticated in the # cache. If the value is not one of these options or empty, auth_token will # raise an exception on initialization. (string value) # Possible values: # None - # MAC - # ENCRYPT - #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This string is # used for key derivation. (string value) #memcache_secret_key = # (Optional) Global toggle for TLS usage when comunicating with the caching # servers. (boolean value) #memcache_tls_enabled = false # (Optional) Path to a file of concatenated CA certificates in PEM format # necessary to establish the caching server's authenticity. If tls_enabled is # False, this option is ignored. (string value) #memcache_tls_cafile = # (Optional) Path to a single file in PEM format containing the client's # certificate as well as any number of CA certificates needed to establish the # certificate's authenticity. This file is only required when client side # authentication is necessary. If tls_enabled is False, this option is ignored. # (string value) #memcache_tls_certfile = # (Optional) Path to a single file containing the client's private key in. # Otherwhise the private key will be taken from the file specified in # tls_certfile. If tls_enabled is False, this option is ignored. (string value) #memcache_tls_keyfile = # (Optional) Set the available ciphers for sockets created with the TLS # context. It should be a string in the OpenSSL cipher list format. If not # specified, all OpenSSL enabled ciphers will be available. (string value) #memcache_tls_allowed_ciphers = # (Optional) Number of seconds memcached server is considered dead before it is # tried again. (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every memcached # server. (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a memcached # server. (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held unused in the # pool before it is closed. (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a memcached # client connection from the pool. (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool. (boolean # value) #memcache_use_advanced_pool = true # (Optional) Indicate whether to set the X-Service-Catalog header. If False, # middleware will not ask for service catalog on token validation and will not # set the X-Service-Catalog header. (boolean value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: "disabled" # to not check token binding. "permissive" (default) to validate binding # information if the bind type is of a form known to the server and ignore it # if not. "strict" like "permissive" but if the bind type is unknown the token # will be rejected. "required" any form of token binding is needed to be # allowed. Finally the name of a binding method that must be present in tokens. # (string value) #enforce_token_bind = permissive # A choice of roles that must be present in a service token. Service tokens are # allowed to request that an expired token can be used and so this check should # tightly control that only actual services should be sending this token. Roles # here are applied as an ANY check so any role in this list must be present. # For backwards compatibility reasons this currently only affects the # allow_expired check. (list value) #service_token_roles = service # For backwards compatibility reasons we must let valid service tokens pass # that don't pass the service_token_roles check as valid. Setting this true # will become the default in a future release and should be enabled if # possible. (boolean value) #service_token_roles_required = false # The name or type of the service as it appears in the service catalog. This is # used to validate tokens that have restricted access rules. (string value) #service_type = # Enable the SASL(Simple Authentication and Security Layer) if the SASL_enable # is true, else disable. (boolean value) #memcache_sasl_enabled = false # the user name for the SASL (string value) #memcache_username = # the username password for SASL (string value) #memcache_password = # Authentication type to load (string value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (string value) #auth_section = # # From tacker.conductor.conductor_server # # User Domain Id (string value) #user_domain_id = default # Project Domain Id (string value) #project_domain_id = default # User Password (string value) #password = default # User Name (string value) #username = default # Use Domain Name (string value) #user_domain_name = Default # Project Name (string value) #project_name = default # Project Domain Name (string value) #project_domain_name = Default # Keystone endpoint (string value) #auth_url = http://localhost/identity/v3 [kubernetes_vim] # # From tacker.vnfm.infra_drivers.kubernetes.kubernetes_driver # # Number of attempts to retry for stack creation/deletion (integer value) #stack_retries = 100 # Wait time (in seconds) between consecutive stack create/delete retries # (integer value) #stack_retry_wait = 5 [nfvo_vim] # # From tacker.nfvo.nfvo_plugin # # VIM driver for launching VNFs (list value) #vim_drivers = openstack,kubernetes [openstack_vim] # # From tacker.vnfm.infra_drivers.openstack.openstack # # Number of attempts to retry for stack creation/deletion (integer value) #stack_retries = 60 # Wait time (in seconds) between consecutive stack create/delete retries # (integer value) #stack_retry_wait = 10 # # From tacker.vnfm.infra_drivers.openstack.translate_template # # Flavor Extra Specs (dict value) #flavor_extra_specs = [oslo_messaging_kafka] # # From oslo.messaging # # Max fetch bytes of Kafka consumer (integer value) #kafka_max_fetch_bytes = 1048576 # Default timeout(s) for Kafka consumers (floating point value) #kafka_consumer_timeout = 1.0 # Group id for Kafka consumer. Consumers in one group will coordinate message # consumption (string value) #consumer_group = oslo_messaging_consumer # Upper bound on the delay for KafkaProducer batching in seconds (floating # point value) #producer_batch_timeout = 0.0 # Size of batch for the producer async send (integer value) #producer_batch_size = 16384 # The compression codec for all data generated by the producer. If not set, # compression will not be used. Note that the allowed values of this depend on # the kafka version (string value) # Possible values: # none - # gzip - # snappy - # lz4 - # zstd - #compression_codec = none # Enable asynchronous consumer commits (boolean value) #enable_auto_commit = false # The maximum number of records returned in a poll call (integer value) #max_poll_records = 500 # Protocol used to communicate with brokers (string value) # Possible values: # PLAINTEXT - # SASL_PLAINTEXT - # SSL - # SASL_SSL - #security_protocol = PLAINTEXT # Mechanism when security protocol is SASL (string value) #sasl_mechanism = PLAIN # CA certificate PEM file used to verify the server certificate (string value) #ssl_cafile = # Client certificate PEM file used for authentication. (string value) #ssl_client_cert_file = # Client key PEM file used for authentication. (string value) #ssl_client_key_file = # Client key password file used for authentication. (string value) #ssl_client_key_password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are # messaging, messagingv2, routing, log, test, noop (multi valued) #driver = # A URL representing the messaging driver to use for notifications. If not set, # we fall back to the same configuration used for RPC. (string value) #transport_url = # AMQP topic used for OpenStack notifications. (list value) #topics = notifications # The maximum number of attempts to re-send a notification message which failed # to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite # (integer value) #retry = -1 [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. If rabbit_quorum_queue is enabled, queues will be # durable and this value will be ignored. (boolean value) #amqp_durable_queues = false # Auto-delete queues in AMQP. (boolean value) #amqp_auto_delete = false # Size of RPC connection pool. (integer value) # Minimum value: 1 #rpc_conn_pool_size = 30 # The pool size limit for connections expiration policy (integer value) #conn_pool_min_size = 2 # The time-to-live in sec of idle connections in the pool (integer value) #conn_pool_ttl = 1200 # Connect over SSL. (boolean value) #ssl = false # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some # distributions. (string value) #ssl_version = # SSL key file (valid only if SSL enabled). (string value) #ssl_key_file = # SSL cert file (valid only if SSL enabled). (string value) #ssl_cert_file = # SSL certification authority file (valid only if SSL enabled). (string value) #ssl_ca_file = # DEPRECATED: Global toggle for enforcing the OpenSSL FIPS mode. This feature # requires Python support. This is available in Python 3.9 in all environments # and may have been backported to older Python versions on select environments. # If the Python executable used does not support OpenSSL FIPS mode, an # exception will be raised. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: FIPS_mode_set API was removed in OpenSSL 3.0.0. This option has no # effect now. #ssl_enforce_fips_mode = false # DEPRECATED: (DEPRECATED) It is recommend not to use this option anymore. Run # the health check heartbeat thread through a native python thread by default. # If this option is equal to False then the health check heartbeat will inherit # the execution model from the parent process. For example if the parent # process has monkey patched the stdlib by using eventlet/greenlet then the # heartbeat will be run through a green thread. This option should be set to # True only for the wsgi services. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The option is related to Eventlet which will be removed. In addition # this has never worked as expected with services using eventlet for core # service framework. #heartbeat_in_pthread = false # How long to wait (in seconds) before reconnecting in response to an AMQP # consumer cancel notification. (floating point value) # Minimum value: 0.0 # Maximum value: 4.5 #kombu_reconnect_delay = 1.0 # Random time to wait for when reconnecting in response to an AMQP consumer # cancel notification. (floating point value) # Minimum value: 0.0 #kombu_reconnect_splay = 0.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not # be used. This option may not be available in future versions. (string value) #kombu_compression = # How long to wait a missing client before abandoning to send it its replies. # This value should not be longer than rpc_response_timeout. (integer value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we are # currently connected to becomes unavailable. Takes effect only if more than # one RabbitMQ node is provided in config. (string value) # Possible values: # round-robin - # shuffle - #kombu_failover_strategy = round-robin # The RabbitMQ login method. (string value) # Possible values: # PLAIN - # AMQPLAIN - # EXTERNAL - # RABBIT-CR-DEMO - #rabbit_login_method = AMQPLAIN # How frequently to retry connecting with RabbitMQ. (integer value) # Minimum value: 1 #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ. (integer # value) # Minimum value: 0 #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries. (integer value) # Minimum value: 1 #rabbit_interval_max = 30 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring # is no longer controlled by the x-ha-policy argument when declaring a queue. # If you just want to make sure that all queues (except those with auto- # generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy # HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) #rabbit_ha_queues = false # Use quorum queues in RabbitMQ (x-queue-type: quorum). The quorum queue is a # modern queue type for RabbitMQ implementing a durable, replicated FIFO queue # based on the Raft consensus algorithm. It is available as of RabbitMQ 3.8.0. # If set this option will conflict with the HA queues (``rabbit_ha_queues``) # aka mirrored queues, in other words the HA queues should be disabled. Quorum # queues are also durable by default so the amqp_durable_queues option is # ignored when this option is enabled. (boolean value) #rabbit_quorum_queue = false # Use quorum queues for transients queues in RabbitMQ. Enabling this option # will then make sure those queues are also using quorum kind of rabbit queues, # which are HA by default. (boolean value) #rabbit_transient_quorum_queue = false # Each time a message is redelivered to a consumer, a counter is incremented. # Once the redelivery count exceeds the delivery limit the message gets dropped # or dead-lettered (if a DLX exchange has been configured) Used only when # rabbit_quorum_queue is enabled, Default 0 which means dont set a limit. # (integer value) #rabbit_quorum_delivery_limit = 0 # By default all messages are maintained in memory if a quorum queue grows in # length it can put memory pressure on a cluster. This option can limit the # number of messages in the quorum queue. Used only when rabbit_quorum_queue is # enabled, Default 0 which means dont set a limit. (integer value) #rabbit_quorum_max_memory_length = 0 # By default all messages are maintained in memory if a quorum queue grows in # length it can put memory pressure on a cluster. This option can limit the # number of memory bytes used by the quorum queue. Used only when # rabbit_quorum_queue is enabled, Default 0 which means dont set a limit. # (integer value) #rabbit_quorum_max_memory_bytes = 0 # Positive integer representing duration in seconds for queue TTL (x-expires). # Queues which are unused for the duration of the TTL are automatically # deleted. The parameter affects only reply and fanout queues. Setting 0 as # value will disable the x-expires. If doing so, make sure you have a rabbitmq # policy to delete the queues or you deployment will create an infinite number # of queue over time.In case rabbit_stream_fanout is set to True, this option # will control data retention policy (x-max-age) for messages in the fanout # queue rather then the queue duration itself. So the oldest data in the stream # queue will be discarded from it once reaching TTL Setting to 0 will disable # x-max-age for stream which make stream grow indefinitely filling up the # diskspace (integer value) # Minimum value: 0 #rabbit_transient_queues_ttl = 1800 # Specifies the number of messages to prefetch. Setting to zero allows # unlimited messages. (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down if # heartbeat's keep-alive fails (0 disables heartbeat). (integer value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the # heartbeat. (integer value) #heartbeat_rate = 3 # DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for # direct send. The direct send is used as reply, so the MessageUndeliverable # exception is raised in case the client queue does not # exist.MessageUndeliverable exception will be used to loop for a timeout to # lets a chance to sender to recover.This flag is deprecated and it will not be # possible to deactivate this functionality anymore (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Mandatory flag no longer deactivable. #direct_mandatory_flag = true # Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and # notify consumerswhen queue is down (boolean value) #enable_cancel_on_failover = false # Should we use consistant queue names or random ones (boolean value) #use_queue_manager = false # Hostname used by queue manager. Defaults to the value returned by # socket.gethostname(). (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #hostname = node1.example.com # Process name used by queue manager (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #processname = nova-api # Use stream queues in RabbitMQ (x-queue-type: stream). Streams are a new # persistent and replicated data structure ("queue type") in RabbitMQ which # models an append-only log with non-destructive consumer semantics. It is # available as of RabbitMQ 3.9.0. If set this option will replace all fanout # queues with only one stream queue. (boolean value) #rabbit_stream_fanout = false [oslo_middleware] # # From oslo.middleware # # The maximum body size for each request, in bytes. (integer value) #max_request_body_size = 114688 # Whether the application is behind a proxy or not. This determines if the # middleware should parse the headers or not. (boolean value) #enable_proxy_headers_parsing = false # HTTP basic auth password file. (string value) #http_basic_auth_user_file = /etc/htpasswd [oslo_policy] # # From oslo.policy # # DEPRECATED: This option controls whether or not to enforce scope when # evaluating policies. If ``True``, the scope of the token used in the request # is compared to the ``scope_types`` of the policy being enforced. If the # scopes do not match, an ``InvalidScope`` exception will be raised. If # ``False``, a message will be logged informing operators that policies are # being invoked with mismatching scope. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This configuration was added temporarily to facilitate a smooth # transition to the new RBAC. OpenStack will always enforce scope checks. This # configuration option is deprecated and will be removed in the 2025.2 cycle. #enforce_scope = false # This option controls whether or not to use old deprecated defaults when # evaluating policies. If ``True``, the old deprecated defaults are not going # to be evaluated. This means if any existing token is allowed for old defaults # but is disallowed for new defaults, it will be disallowed. It is encouraged # to enable this flag along with the ``enforce_scope`` flag so that you can get # the benefits of new defaults and ``scope_type`` together. If ``False``, the # deprecated policy check string is logically OR'd with the new policy check # string, allowing for a graceful upgrade experience between releases with new # policies, which is the default behavior. (boolean value) #enforce_new_defaults = false # The relative or absolute path of a file that maps roles to permissions for a # given service. Relative paths must be specified in relation to the # configuration file setting this option. (string value) #policy_file = policy.yaml # Default rule. Enforced when a requested rule is not found. (string value) #policy_default_rule = default # Directories where policy configuration files are stored. They can be relative # to any directory in the search path defined by the config_dir option, or # absolute paths. The file defined by policy_file must exist for these # directories to be searched. Missing or empty directories are ignored. (multi # valued) #policy_dirs = policy.d # Content Type to send and receive data for REST based policy check (string # value) # Possible values: # application/x-www-form-urlencoded - # application/json - #remote_content_type = application/x-www-form-urlencoded # server identity verification for REST based policy check (boolean value) #remote_ssl_verify_server_crt = false # Absolute path to ca cert file for REST based policy check (string value) #remote_ssl_ca_crt_file = # Absolute path to client cert for REST based policy check (string value) #remote_ssl_client_crt_file = # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = # Timeout in seconds for REST based policy check (floating point value) # Minimum value: 0 #remote_timeout = 60 # # From tacker.conf # # Enable enhanced tacker policy (boolean value) #enhanced_tacker_policy = false [oslo_reports] # # From oslo.reports # # Path to a log directory where to create a file (string value) #log_dir = # The path to a file to watch for changes to trigger the reports, instead of # signals. Setting this option disables the signal trigger for the reports. If # application is running as a WSGI application it is recommended to use this # instead of signals. (string value) #file_event_handler = # How many seconds to wait between polls when file_event_handler is set # (integer value) #file_event_handler_interval = 1 [prometheus_plugin] # # From tacker.sol_refactored.common.config # # Enable prometheus plugin performance management (boolean value) #performance_management = false # Some margin time for PM jos's reportingPeriod (integer value) #reporting_period_margin = 1 # Enable prometheus plugin fault management (boolean value) #fault_management = false # Enable prometheus plugin autohealing (boolean value) #auto_healing = false # Enable prometheus plugin autoscaling (boolean value) #auto_scaling = false # Package name for performance management PMJob. This configuration is changed # in case of replacing the original function with a vendor specific function. # (string value) #performance_management_package = tacker.sol_refactored.common.prometheus_plugin # Package name for performance management threshold. This configuration is # changed in case of replacing the original function with a vendor specific # function. (string value) #performance_management_threshold_package = tacker.sol_refactored.common.prometheus_plugin # Class name for performance management PMJob. This configuration is changed in # case of replacing the original function with a vendor specific function. # (string value) #performance_management_class = PrometheusPluginPm # Class name for performance management threshold. This configuration is # changed in case of replacing the original function with a vendor specific # function. (string value) #performance_management_threshold_class = PrometheusPluginThreshold # Package name for fault management. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #fault_management_package = tacker.sol_refactored.common.prometheus_plugin # Class name for fault management. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #fault_management_class = PrometheusPluginFm # Package name for auto healing. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #auto_healing_package = tacker.sol_refactored.common.prometheus_plugin # Class name for auto healing. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #auto_healing_class = PrometheusPluginAutoHealing # Timeout (second) of packing for multiple auto healing. (integer value) #timer_interval = 20 # Package name for auto scaling. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #auto_scaling_package = tacker.sol_refactored.common.prometheus_plugin # Class name for auto scaling. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #auto_scaling_class = PrometheusPluginAutoScaling # Enable rule file validation using promtool. (boolean value) #test_rule_with_promtool = false # The time of reportingPeriod for the PM Threshold. If there is a PromQL that # requires `reporting_period`, it is read from the configuration file. The unit # shall be seconds. (integer value) #reporting_period_threshold = 90 # The time of collectionPeriod for the PM threshold. If there is a PromQL that # requires `collection_period`, it is read from the configuration file. The # unit shall be seconds. (integer value) #collection_period_threshold = 30 [server_notification] # # From tacker.sol_refactored.common.config # # Enable server notification autohealing (boolean value) #server_notification = false # Uri path prefix string for server notification. When changing this # configuration, server_notification description in api-paste.ini must be # changed to the same value. (string value) #uri_path_prefix = /server_notification # Timeout (second) of packing for multiple server notification. (integer value) #timer_interval = 20 # Package name for server notification. This configuration is changed in case # of replacing the original function with a vendor specific function. (string # value) #server_notification_package = tacker.sol_refactored.common.server_notification # Class name for server notification. This configuration is changed in case of # replacing the original function with a vendor specific function. (string # value) #server_notification_class = ServerNotification [tacker] # # From tacker.vnflcm.vnflcm_driver # # Hosting vnf drivers tacker plugin will use (list value) #vnflcm_infra_driver = openstack,kubernetes # MGMT driver to communicate with Hosting VNF/logical service instance tacker # plugin will use (list value) #vnflcm_mgmt_driver = vnflcm_noop # # From tacker.vnfm.plugin # # Hosting vnf drivers tacker plugin will use (list value) #infra_driver = noop,openstack,kubernetes [v2_nfvo] # # From tacker.sol_refactored.common.config # # Use external NFVO if True, use internal NFVO in tacker if False (boolean # value) #use_external_nfvo = false # Grant api_version of NFVO. (string value) #grant_api_version = 1.4.0 # Vnf package management api_version of NFVO. (string value) #vnfpkgm_api_version = 2.1.0 # Endpoint of external NFVO. (string value) #endpoint = # Token endpoint for OAuth2.0 authentication. (string value) #token_endpoint = # Client id used by OAuth2.0 authentication. (string value) #client_id = # Client password used by OAuth2.0 authentication. (string value) #client_password = # Vnf package content cache directory. (string value) #vnf_package_cache_dir = /opt/stack/data/tacker/vnf_package_cache # CA Certificate file used by OAuth2.0 mTLS authentication. (string value) #mtls_ca_cert_file = # Client Certificate file used by OAuth2.0 mTLS authentication. (string value) #mtls_client_cert_file = # Check to get notification from callback Uri. (boolean value) #test_callback_uri = true # Zones used for test which returned in Grant response. (list value) #test_grant_zone_list = nova # Use password authenticatiojn if True, use certificate authentication if # False. (boolean value) #use_client_secret_basic = false # Enable certificate verification during SSL/TLS communication to NFVO. # (boolean value) #nfvo_verify_cert = false # Specifies the root CA certificate to use when thenfvo_verify_cert option is # True. (string value) #nfvo_ca_cert_file = [v2_vnfm] # # From tacker.sol_refactored.common.config # # Endpoint of VNFM (self). (string value) #endpoint = http://127.0.0.1:9890 # Default timeout value (second) of GRACEFUL termination. (integer value) #default_graceful_termination_timeout = 10 # Max content length for list APIs. (integer value) #max_content_length = 1000000 # Timeout (in minutes) of heat stack creation. (integer value) #openstack_vim_stack_create_timeout = 20 # Timeout (second) of k8s res creation. (integer value) #kubernetes_vim_rsc_wait_timeout = 500 # Paged response size of the query result for VNF instances. (integer value) #vnf_instance_page_size = 0 # Paged response size of the query result for Subscriptions. (integer value) #subscription_page_size = 0 # Paged response size of the query result for VNF LCM operation occurrences. # (integer value) #lcm_op_occ_page_size = 0 # CA Certificate file used by OAuth2.0 mTLS authentication. (string value) #notification_mtls_ca_cert_file = # Client Certificate file used by OAuth2.0 mTLS authentication. (string value) #notification_mtls_client_cert_file = # Number of retries that should be attempted for connection error when sending # a notification. Period between retries is exponential starting 0.5 seconds up # to a maximum of 60 seconds. (integer value) #notify_connect_retries = 0 # Paged response size of the query result for VNF Fault Management alarm. # (integer value) #vnffm_alarm_page_size = 0 # Paged response size of the query result for VNF PM threshold. (integer value) #vnfpm_pmthreshold_page_size = 0 # Paged response size of the query result for VNF PM job. (integer value) #vnfpm_pmjob_page_size = 0 # If True, fallbackBestEffort setting is enabled and run Availability Zone # reselection. (boolean value) #placement_fallback_best_effort = false # Number of retries to reselect Availability Zone. Default value "0" means # unlimited number of retries. (integer value) #placement_az_select_retry = 0 # Error message for Availability Zone reselection. These configs are regular # expressions to detect error messages from OpenStack Heat. (string value) #placement_az_resource_error = Resource CREATE failed: ResourceInError: resources\.(.*)\.(.*): (.*)|Resource UPDATE failed: resources\.(.*): Resource CREATE failed: ResourceInError: resources\.(.*): (.*) # If True, enable rollback stack on resource create failure. (boolean value) #enable_rollback_stack = false # Enable to delete LCM operation occurrence if True. This is intended to use # under development. (boolean value) #test_enable_lcm_op_occ_delete = false # Enable certificate verification during SSL/TLS communication to notification # server. (boolean value) #notification_verify_cert = false # Specifies the root CA certificate to use when the notification_verify_cert # option is True. (string value) #notification_ca_cert_file = # Enable OAuth2.0 mTLS authentication for heat server. (boolean value) #use_oauth2_mtls_for_heat = false # CA Certificate file used by OAuth2.0 mTLS authentication. (string value) #heat_mtls_ca_cert_file = # Client Certificate file used by OAuth2.0 mTLS authentication. (string value) #heat_mtls_client_cert_file = # Enable certificate verification during SSL/TLS communication to heat server. # (boolean value) #heat_verify_cert = false # Specifies the root CA certificate to use when the heat_verify_cert option is # True. (string value) #heat_ca_cert_file = # Temporary directory for Terraform infra-driver to store terraform config # files (string value) #tf_file_dir = /var/lib/tacker/terraform # Enable certificate verification during SSL/TLS communication to nova server. # (boolean value) #nova_verify_cert = false # Specifies the root CA certificate to use when the nova_verify_cert option is # True. (string value) #nova_ca_cert_file = [vim_keys] # # From tacker.nfvo.drivers.vim.openstack_driver # # Dir.path to store fernet keys. (string value) #openstack = /etc/tacker/vim/fernet_keys # Use barbican to encrypt vim password if True, save vim credentials in local # file system if False (boolean value) #use_barbican = false # Specify the filename of the default secret key, if available. If not # specified, a key will be generated for each vim_id. If a key with the vim_id # name exists, it will be used. (string value) #default_secret_key = [vnf_lcm] # Vnflcm options group # # From tacker.conf # # endpoint_url (string value) #endpoint_url = http://localhost:9890/ # # Name of the boolean key in ``additionalParams`` that toggles including # block storage for a v1 heal request. # # Example payload:: # # { # "additionalParams": { # "tacker_extension_heal_include_block_storage": true # } # } # (string value) #heal_include_block_storage_key = tacker_extension_heal_include_block_storage # # Default behaviour when a v1 heal request omits the per-request key. # If ``additionalParams[]`` is present, # that value takes precedence over this option. # # In ``tacker.conf`` (``[vnf_lcm]`` section):: # # heal_vnfc_block_storage = false # (boolean value) #heal_vnfc_block_storage = true # Number of subscriptions (integer value) #subscription_num = 100 # Number of retry (integer value) #retry_num = 3 # Retry interval (sec) (integer value) #retry_wait = 10 # Retry timeout (sec) (integer value) #retry_timeout = 10 # Test callbackUri (boolean value) #test_callback_uri = true # LCM operation timeout (sec) (integer value) #operation_timeout = 60 # Verify the certificate to send notification by ssl (boolean value) #verify_notification_ssl = true # Number of lcm_op_occs contained in 1 page (integer value) #lcm_op_occ_num = 100 # Number of vnf_instances contained in 1 page (integer value) #vnf_instance_num = 100 [vnf_package] # # Options under this group are used to store vnf packages in glance store. # # From tacker.conf # # Path to store extracted CSAR file (string value) #vnf_package_csar_path = /var/lib/tacker/vnfpackages/ # # Maximum size of CSAR file a user can upload in GB. # # An CSAR file upload greater than the size mentioned here would result # in an CSAR upload failure. This configuration option defaults to # 1024 GB (1 TiB). # # NOTES: # * This value should only be increased after careful # consideration and must be set less than or equal to # 8 EiB (~9223372036). # * This value must be set with careful consideration of the # backend storage capacity. Setting this to a very low value # may result in a large number of image failures. And, setting # this to a very large value may result in faster consumption # of storage. Hence, this must be set according to the nature of # images created and storage capacity available. # # Possible values: # * Any positive number less than or equal to 9223372036854775808 # (floating point value) # Minimum value: 1e-06 # Maximum value: 9223372036 #csar_file_size_cap = 1024 # # Secure hashing algorithm used for computing the 'hash' property. # # Possible values: # * sha256, sha512 # # Related options: # * None # (string value) #hashing_algorithm = sha512 # List of items to get from top-vnfd (list value) #get_top_list = tosca_definitions_version,description,metadata # Exclude node from node_template (list value) #exclude_node = VNF # List of types to get from lower-vnfd (list value) #get_lower_list = tosca.nodes.nfv.VNF # List of del inputs from lower-vnfd (list value) #del_input_list = descriptor_id,descriptor_versionprovider,product_name,software_version,vnfm_info,flavour_id,flavour_description # Number of vnf_packages contained in 1 page (integer value) #vnf_package_num = 100