++ LOG_DIR=/var/log/kafka ++ export CONTAINER_LOG_DIR=/var/log/kafka/analytics-alarm-kafka ++ CONTAINER_LOG_DIR=/var/log/kafka/analytics-alarm-kafka ++ mkdir -p /var/log/kafka/analytics-alarm-kafka ++ log_file=/var/log/kafka/analytics-alarm-kafka/console.log ++ touch /var/log/kafka/analytics-alarm-kafka/console.log ++ chmod 600 /var/log/kafka/analytics-alarm-kafka/console.log ++ exec +++ tee -a /var/log/kafka/analytics-alarm-kafka/console.log +++ date ++ echo 'INFO: =================== Fri Apr 18 05:24:32 UTC 2025 ===================' INFO: =================== Fri Apr 18 05:24:32 UTC 2025 =================== ++ LOG_LOCAL=1 ++ source /functions.sh ++ source /contrail-functions.sh +++ get_default_ip ++++ get_default_nic ++++ get_gateway_nic_for_ip 1 ++++ command -v ip ++++ local ip=1 +++++ grep -o 'dev.*' +++++ ip route get 1 +++++ awk '{print $2}' ++++ local iface=ens3 ++++ [[ ens3 == \l\o ]] ++++ echo ens3 +++ local nic=ens3 +++ get_ip_for_nic ens3 +++ local nic=ens3 +++ cut -d / -f 1 +++ get_cidr_for_nic ens3 +++ command -v ip +++ local nic=ens3 +++ ip addr show dev ens3 +++ grep 'inet ' +++ awk '{print $2}' +++ head -n 1 ++ DEFAULT_LOCAL_IP=10.0.0.48 ++ ENCAP_PRIORITY=MPLSoUDP,MPLSoGRE,VXLAN ++ VXLAN_VN_ID_MODE=automatic ++ DPDK_UIO_DRIVER=uio_pci_generic ++ CPU_CORE_MASK=0x01 ++ SERVICE_CORE_MASK= ++ DPDK_CTRL_THREAD_MASK= ++ HUGE_PAGES= ++ HUGE_PAGES_DIR=/dev/hugepages ++ HUGE_PAGES_1GB=0 ++ HUGE_PAGES_2MB=0 ++ HUGE_PAGES_1GB_DIR= ++ HUGE_PAGES_2MB_DIR= ++ [[ 0 != 0 ]] ++ [[ 0 != 0 ]] ++ DPDK_MEM_PER_SOCKET=1024 ++ DPDK_COMMAND_ADDITIONAL_ARGS= ++ NIC_OFFLOAD_ENABLE=False ++ DPDK_ENABLE_VLAN_FWRD=False ++ DIST_SNAT_PROTO_PORT_LIST= ++ CLOUD_ORCHESTRATOR=kubernetes ++ CLOUD_ADMIN_ROLE=admin ++ AAA_MODE=no-auth ++ AUTH_MODE=noauth ++ AUTH_PARAMS= ++ SSL_ENABLE=False ++ SSL_INSECURE=True ++ SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ SERVER_CA_KEYFILE=/etc/contrail/ssl/private/ca-key.pem ++ SELFSIGNED_CERTS_WITH_IPS=True ++ CONTROLLER_NODES=10.0.0.48 ++ ANALYTICS_ALARM_ENABLE=True ++ ANALYTICS_SNMP_ENABLE=True ++ ANALYTICSDB_ENABLE=True ++ ANALYTICS_NODES=10.0.0.48 ++ ANALYTICSDB_NODES=10.0.0.48 ++ ANALYTICS_SNMP_NODES=10.0.0.48 ++ ANALYTICS_API_PORT=8081 ++ ANALYTICS_API_INTROSPECT_PORT=8090 ++ ANALYTICSDB_PORT=9160 ++ ANALYTICSDB_CQL_PORT=9042 ++ TOPOLOGY_INTROSPECT_PORT=5921 ++ QUERYENGINE_INTROSPECT_PORT=8091 +++ get_server_list ANALYTICS ':8081 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8081 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:8081 ' +++ '[' -n '10.0.0.48:8081 ' ']' +++ echo 10.0.0.48:8081 ++ ANALYTICS_SERVERS=10.0.0.48:8081 +++ get_server_list ANALYTICSDB ':9042 ' +++ local server_typ=ANALYTICSDB_NODES +++ local 'port_with_delim=:9042 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9042 ' +++ '[' -n '10.0.0.48:9042 ' ']' +++ echo 10.0.0.48:9042 ++ ANALYTICSDB_CQL_SERVERS=10.0.0.48:9042 ++ ANALYTICS_API_VIP= ++ ANALYTICS_ALARM_NODES=10.0.0.48 ++ ALARMGEN_INTROSPECT_PORT=5995 ++ BGP_PORT=179 ++ BGP_AUTO_MESH=true ++ BGP_ASN=64512 ++ ENABLE_4BYTE_AS=false ++ APPLY_DEFAULTS=true ++ COLLECTOR_PORT=8086 ++ COLLECTOR_INTROSPECT_PORT=8089 ++ COLLECTOR_SYSLOG_PORT=514 ++ COLLECTOR_SFLOW_PORT=6343 ++ COLLECTOR_IPFIX_PORT=4739 ++ COLLECTOR_PROTOBUF_PORT=3333 ++ COLLECTOR_STRUCTURED_SYSLOG_PORT=3514 ++ SNMPCOLLECTOR_INTROSPECT_PORT=5920 +++ get_server_list ANALYTICS ':8086 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:8086 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:8086 ' +++ '[' -n '10.0.0.48:8086 ' ']' +++ echo 10.0.0.48:8086 ++ COLLECTOR_SERVERS=10.0.0.48:8086 ++ CASSANDRA_PORT=9160 ++ CASSANDRA_CQL_PORT=9042 ++ CASSANDRA_SSL_STORAGE_PORT=7011 ++ CASSANDRA_STORAGE_PORT=7010 ++ CASSANDRA_JMX_LOCAL_PORT=7200 ++ CONFIGDB_CASSANDRA_DRIVER=cql ++ CONFIG_NODES=10.0.0.48 ++ CONFIGDB_NODES=10.0.0.48 ++ CONFIG_API_PORT=8082 ++ CONFIG_API_INTROSPECT_PORT=8084 ++ CONFIG_API_ADMIN_PORT=8095 ++ CONFIGDB_PORT=9161 ++ CONFIGDB_CQL_PORT=9041 +++ get_server_list CONFIG ':8082 ' +++ local server_typ=CONFIG_NODES +++ local 'port_with_delim=:8082 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:8082 ' +++ '[' -n '10.0.0.48:8082 ' ']' +++ echo 10.0.0.48:8082 ++ CONFIG_SERVERS=10.0.0.48:8082 +++ get_server_list CONFIGDB ':9161 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9161 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9161 ' +++ '[' -n '10.0.0.48:9161 ' ']' +++ echo 10.0.0.48:9161 ++ CONFIGDB_SERVERS=10.0.0.48:9161 +++ get_server_list CONFIGDB ':9041 ' +++ local server_typ=CONFIGDB_NODES +++ local 'port_with_delim=:9041 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9041 ' +++ '[' -n '10.0.0.48:9041 ' ']' +++ echo 10.0.0.48:9041 ++ CONFIGDB_CQL_SERVERS=10.0.0.48:9041 ++ CONFIG_API_VIP= ++ CONFIG_API_SSL_ENABLE=False ++ CONFIG_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CONFIG_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CONFIG_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CONFIG_API_WORKER_COUNT=1 ++ CONFIG_API_MAX_REQUESTS=1024 ++ ANALYTICS_API_SSL_ENABLE=False ++ ANALYTICS_API_SSL_INSECURE=True ++ ANALYTICS_API_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ ANALYTICS_API_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ ANALYTICS_API_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_ENABLE=False ++ CASSANDRA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ CASSANDRA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ CASSANDRA_SSL_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ CASSANDRA_SSL_KEYSTORE_PASSWORD=astrophytum ++ CASSANDRA_SSL_TRUSTSTORE_PASSWORD=ornatum ++ CASSANDRA_SSL_PROTOCOL=TLS ++ CASSANDRA_SSL_ALGORITHM=SunX509 ++ CASSANDRA_SSL_CIPHER_SUITES='[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]' ++ CASSANDRA_CONFIG_MEMTABLE_FLUSH_WRITER=4 ++ CASSANDRA_CONFIG_CONCURRECT_COMPACTORS=4 ++ CASSANDRA_CONFIG_COMPACTION_THROUGHPUT_MB_PER_SEC=256 ++ CASSANDRA_CONFIG_CONCURRECT_READS=64 ++ CASSANDRA_CONFIG_CONCURRECT_WRITES=64 ++ CASSANDRA_CONFIG_MEMTABLE_ALLOCATION_TYPE=offheap_objects ++ CASSANDRA_REAPER_ENABLED=false ++ CASSANDRA_REAPER_JMX_KEY=reaperJmxKey ++ CASSANDRA_REAPER_JMX_AUTH_USERNAME=reaperUser ++ CASSANDRA_REAPER_JMX_AUTH_PASSWORD=reaperPass ++ CASSANDRA_REAPER_APP_PORT=8071 ++ CASSANDRA_REAPER_ADM_PORT=8072 ++ CONTROL_NODES=10.0.0.48 ++ CONTROL_INTROSPECT_PORT=8083 ++ DNS_NODES=10.0.0.48 ++ DNS_SERVER_PORT=53 ++ DNS_INTROSPECT_PORT=8092 ++ RNDC_KEY=xvysmOR8lnUQRBcunkC6vg== ++ USE_EXTERNAL_TFTP=False ++ ZOOKEEPER_NODES=10.0.0.48 ++ ZOOKEEPER_PORT=2181 ++ ZOOKEEPER_PORTS=2888:3888 +++ get_server_list ZOOKEEPER :2181, +++ local server_typ=ZOOKEEPER_NODES +++ local port_with_delim=:2181, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+=10.0.0.48:2181, +++ '[' -n 10.0.0.48:2181, ']' +++ echo 10.0.0.48:2181 ++ ZOOKEEPER_SERVERS=10.0.0.48:2181 +++ get_server_list ZOOKEEPER ':2181 ' +++ local server_typ=ZOOKEEPER_NODES +++ local 'port_with_delim=:2181 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:2181 ' +++ '[' -n '10.0.0.48:2181 ' ']' +++ echo 10.0.0.48:2181 ++ ZOOKEEPER_SERVERS_SPACE_DELIM=10.0.0.48:2181 ++ RABBITMQ_NODES=10.0.0.48 ++ RABBITMQ_NODE_PORT=5673 +++ get_server_list RABBITMQ :5673, +++ local server_typ=RABBITMQ_NODES +++ local port_with_delim=:5673, +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+=10.0.0.48:5673, +++ '[' -n 10.0.0.48:5673, ']' +++ echo 10.0.0.48:5673 ++ RABBITMQ_SERVERS=10.0.0.48:5673 ++ RABBITMQ_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=true ++ RABBITMQ_VHOST=/ ++ RABBITMQ_USER=guest ++ RABBITMQ_PASSWORD=guest ++ RABBITMQ_USE_SSL=False ++ RABBITMQ_SSL_VER=tlsv1.2 ++ RABBITMQ_CLIENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ RABBITMQ_CLIENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ RABBITMQ_CLIENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ RABBITMQ_HEARTBEAT_INTERVAL=60 ++ RABBITMQ_CLUSTER_PARTITION_HANDLING=autoheal ++ RABBITMQ_MIRRORED_QUEUE_MODE=all ++ REDIS_SERVER_PORT=6379 ++ REDIS_SERVER_PASSWORD= +++ get_server_list ANALYTICS ':6379 ' +++ local server_typ=ANALYTICS_NODES +++ local 'port_with_delim=:6379 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:6379 ' +++ '[' -n '10.0.0.48:6379 ' ']' +++ echo 10.0.0.48:6379 ++ REDIS_SERVERS=10.0.0.48:6379 ++ REDIS_LISTEN_ADDRESS= ++ REDIS_PROTECTED_MODE= ++ REDIS_SSL_ENABLE=False ++ REDIS_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ REDIS_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ REDIS_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ redis_ssl_config= ++ KAFKA_NODES=10.0.0.48 ++ KAFKA_PORT=9092 +++ get_server_list KAFKA ':9092 ' +++ local server_typ=KAFKA_NODES +++ local 'port_with_delim=:9092 ' +++ local server_list= +++ IFS=, +++ read -ra server_list +++ local extended_server_list= +++ for server in '"${server_list[@]}"' ++++ echo 10.0.0.48 +++ local server_address=10.0.0.48 +++ extended_server_list+='10.0.0.48:9092 ' +++ '[' -n '10.0.0.48:9092 ' ']' +++ echo 10.0.0.48:9092 ++ KAFKA_SERVERS=10.0.0.48:9092 ++ KAFKA_SSL_ENABLE=False ++ KAFKA_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ KAFKA_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ KAFKA_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ KEYSTONE_AUTH_ADMIN_TENANT=admin ++ KEYSTONE_AUTH_ADMIN_USER=admin ++ KEYSTONE_AUTH_ADMIN_PASSWORD=contrail123 ++ KEYSTONE_AUTH_PROJECT_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_USER_DOMAIN_NAME=Default ++ KEYSTONE_AUTH_REGION_NAME=RegionOne ++ KEYSTONE_AUTH_URL_VERSION=/ ++ KEYSTONE_AUTH_HOST=127.0.0.1 ++ KEYSTONE_AUTH_PROTO=http ++ KEYSTONE_AUTH_ADMIN_PORT=5000 ++ KEYSTONE_AUTH_PUBLIC_PORT=5000 ++ KEYSTONE_AUTH_URL_TOKENS=/v3/auth/tokens ++ KEYSTONE_AUTH_INSECURE=True ++ KEYSTONE_AUTH_CERTFILE= ++ KEYSTONE_AUTH_KEYFILE= ++ KEYSTONE_AUTH_CA_CERTFILE= ++ KEYSTONE_AUTH_ENDPOINT_TYPE= ++ KEYSTONE_AUTH_SYNC_ON_DEMAND= ++ KEYSTONE_AUTH_INTERFACE=public ++ KUBEMANAGER_NODES=10.0.0.48 ++ KUBERNETES_CLUSTER_NAME=k8s ++ KUBERNETES_CNI_META_PLUGIN=multus ++ METADATA_PROXY_SECRET=contrail ++ BARBICAN_TENANT_NAME=service ++ BARBICAN_USER=barbican ++ BARBICAN_PASSWORD=contrail123 ++ AGENT_MODE=kernel ++ EXTERNAL_ROUTERS= ++ SUBCLUSTER= ++ VROUTER_COMPUTE_NODE_ADDRESS= ++ VROUTER_CRYPT_INTERFACE=crypt0 ++ VROUTER_DECRYPT_INTERFACE=decrypt0 ++ VROUTER_DECRYPT_KEY=15 ++ VROUTER_MODULE_OPTIONS= ++ FABRIC_SNAT_HASH_TABLE_SIZE=4096 ++ TSN_EVPN_MODE=False ++ TSN_NODES='[]' ++ PRIORITY_ID= ++ PRIORITY_BANDWIDTH= ++ PRIORITY_SCHEDULING= ++ QOS_QUEUE_ID= ++ QOS_LOGICAL_QUEUES= ++ QOS_DEF_HW_QUEUE=False ++ PRIORITY_TAGGING=True ++ SLO_DESTINATION=collector ++ '[' -n '' ']' ++ SAMPLE_DESTINATION=collector ++ FLOW_EXPORT_RATE=0 ++ WEBUI_NODES=10.0.0.48 ++ WEBUI_JOB_SERVER_PORT=3000 ++ KUE_UI_PORT=3002 ++ WEBUI_HTTP_LISTEN_PORT=8180 ++ WEBUI_HTTPS_LISTEN_PORT=8143 ++ WEBUI_SSL_KEY_FILE=/etc/contrail/webui_ssl/cs-key.pem ++ WEBUI_SSL_CERT_FILE=/etc/contrail/webui_ssl/cs-cert.pem ++ WEBUI_SSL_CIPHERS=ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-SHA ++ WEBUI_STATIC_AUTH_USER=admin ++ WEBUI_STATIC_AUTH_PASSWORD=contrail123 ++ WEBUI_STATIC_AUTH_ROLE=cloudAdmin ++ XMPP_SERVER_PORT=5269 ++ XMPP_SSL_ENABLE=False ++ XMPP_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ XMPP_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ XMPP_SERVER_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ LINKLOCAL_SERVICE_PORT=80 ++ LINKLOCAL_SERVICE_NAME=metadata ++ LINKLOCAL_SERVICE_IP=169.254.169.254 ++ IPFABRIC_SERVICE_PORT=8775 ++ INTROSPECT_SSL_ENABLE=False ++ INTROSPECT_SSL_INSECURE=True ++ INTROSPECT_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ INTROSPECT_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ INTROSPECT_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ INTROSPECT_LISTEN_ALL=True ++ SANDESH_SSL_ENABLE=False ++ SANDESH_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_SERVER_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ SANDESH_SERVER_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ SANDESH_CA_CERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ METADATA_SSL_ENABLE=false ++ METADATA_SSL_CERTFILE= ++ METADATA_SSL_KEYFILE= ++ METADATA_SSL_CA_CERTFILE= ++ METADATA_SSL_CERT_TYPE= ++ CONFIGURE_IPTABLES=false ++ FWAAS_ENABLE=False ++ CONTAINERD_NAMESPACE=k8s.io ++ TOR_AGENT_OVS_KA=10000 ++ TOR_TYPE=ovs ++ TOR_OVS_PROTOCOL=tcp ++ TORAGENT_SSL_CERTFILE=/etc/contrail/ssl/certs/server.pem ++ TORAGENT_SSL_KEYFILE=/etc/contrail/ssl/private/server-privkey.pem ++ TORAGENT_SSL_CACERTFILE=/etc/contrail/ssl/certs/ca-cert.pem ++ [[ / == \/\v\2\.\0 ]] ++ [[ kubernetes == \o\p\e\n\s\t\a\c\k ]] ++ [[ noauth == \k\e\y\s\t\o\n\e ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ read -r -d '' sandesh_client_config ++ true ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ xmpp_certs_config= ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ analytics_api_ssl_opts= ++ read -r -d '' rabbitmq_config ++ true ++ read -r -d '' rabbit_config ++ true ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ kafka_ssl_config= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z '' ]] ++ is_enabled False ++ local val=false ++ [[ false == \t\r\u\e ]] ++ [[ false == \y\e\s ]] ++ [[ false == \e\n\a\b\l\e\d ]] ++ export TSN_AGENT_MODE= ++ TSN_AGENT_MODE= ++ [[ -n '' ]] ++ collector_stats_config= ++ [[ -z x ]] ++ RSYSLOGD_XFLOW_LISTEN_PORT=9898 + : auto + my_ip= + my_index=1 + '[' auto = auto ']' + for i in '{1..10}' ++ find_my_ip_and_order_for_node KAFKA ++ local server_typ=KAFKA_NODES ++ find_my_ip_and_order_for_node_list 10.0.0.48 ++ local servers=10.0.0.48 ++ local server_list= ++ IFS=, ++ read -ra server_list +++ tr '\n' , +++ get_local_ips +++ cat /proc/net/fib_trie +++ uniq +++ awk '/32 host/ { print f } {f=$2}' +++ sort +++ grep -vi host ++ local local_ips=,10.0.0.48,10.126.80.1,127.0.0.1,172.17.0.1,252.48.0.1,, ++ local ord=1 ++ for server in '"${server_list[@]}"' ++ local ret=0 +++ python3 -c 'import socket; print(socket.gethostbyname('\''10.0.0.48'\''))' ++ local server_ip=10.0.0.48 ++ [[ 0 == 0 ]] ++ [[ -n 10.0.0.48 ]] ++ [[ ,10.0.0.48,10.126.80.1,127.0.0.1,172.17.0.1,252.48.0.1,, =~ ,10\.0\.0\.48, ]] ++ echo 10.0.0.48 1 ++ return + my_ip_and_order='10.0.0.48 1' + '[' -n '10.0.0.48 1' ']' + break + '[' -z '10.0.0.48 1' ']' ++ cut -d ' ' -f 1 ++ echo 10.0.0.48 1 + my_ip=10.0.0.48 ++ echo 10.0.0.48 1 ++ cut -d ' ' -f 2 + my_index=1 + export KAFKA_LISTEN_ADDRESS=10.0.0.48 + KAFKA_LISTEN_ADDRESS=10.0.0.48 + zk_servers_array=($ZOOKEEPER_SERVERS_SPACE_DELIM) + zk_list_size=1 + [[ 1 -gt 1 ]] + replication_factor=1 + KAFKA_BROKER_ID=1 + KAFKA_LISTEN_PORT=9092 + KAFKA_log_retention_bytes=268435456 + KAFKA_log_segment_bytes=268435456 + KAFKA_log_retention_hours=24 + KAFKA_log_cleanup_policy=delete + KAFKA_log_cleaner_threads=2 + KAFKA_log_cleaner_dedupe_buffer_size=250000000 + KAFKA_log_cleaner_enable=true + KAFKA_delete_topic_enable=true + KAFKA_KEY_PASSWORD=c0ntrail123 + KAFKA_STORE_PASSWORD=c0ntrail123 + CONFIG=/opt/kafka/config/server.properties + sed -i 's/^broker.id=.*$/broker.id=1/g' /opt/kafka/config/server.properties + sed -i 's/#port=.*$/port=9092/g' /opt/kafka/config/server.properties + is_enabled False + local val=false + [[ false == \t\r\u\e ]] + [[ false == \y\e\s ]] + [[ false == \e\n\a\b\l\e\d ]] + sed -i 's/^#listeners=.*$/listeners=PLAINTEXT:\/\/10.0.0.48:9092/g' /opt/kafka/config/server.properties + sed -i 's)^zookeeper.connect=.*$)zookeeper.connect=10.0.0.48:2181)g' /opt/kafka/config/server.properties + sed -i 's/#advertised.host.name=.*$/advertised.host.name=10.0.0.48/g' /opt/kafka/config/server.properties + sed -i 's/^#log.retention.bytes=.*$/log.retention.bytes=268435456/g' /opt/kafka/config/server.properties + sed -i 's/^log.retention.hours=.*$/log.retention.hours=24/g' /opt/kafka/config/server.properties + sed -i 's/^log.segment.bytes=.*$/log.segment.bytes=268435456/g' /opt/kafka/config/server.properties + sed -i 's/^num.partitions=.*$/num.partitions=30/g' /opt/kafka/config/server.properties + echo ' ' + [[ 1 -eq 2 ]] + [[ 1 -gt 2 ]] + echo log.cleanup.policy=delete + echo log.cleaner.threads=2 + echo log.cleaner.dedupe.buffer.size=250000000 + echo offsets.topic.replication.factor=1 + echo reserved.broker.max.id=100001 + chown -R kafka:kafka /opt/kafka /opt/kafka/config /var/log/kafka + chmod -R 750 /var/log/kafka ++ id -u kafka + CONTRAIL_UID=2000 ++ id -g kafka + CONTRAIL_GID=1011 + do_run_service bin/kafka-server-start.sh config/server.properties + [[ -n 2000 ]] + [[ -n 1011 ]] + mkdir -p /var/crashes + chmod 777 /var/crashes ++ id -un 2000 + local user_name=kafka + export HOME=/home/kafka + HOME=/home/kafka + mkdir -p /home/kafka + chown -R 2000:1011 /home/kafka + exec setpriv --reuid 2000 --regid 1011 --clear-groups --no-new-privs bin/kafka-server-start.sh config/server.properties [2025-04-18 05:24:35,763] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) [2025-04-18 05:24:36,514] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) [2025-04-18 05:24:36,722] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) [2025-04-18 05:24:36,731] INFO starting (kafka.server.KafkaServer) [2025-04-18 05:24:36,735] INFO Connecting to zookeeper on 10.0.0.48:2181 (kafka.server.KafkaServer) [2025-04-18 05:24:36,788] INFO [ZooKeeperClient Kafka server] Initializing a new session to 10.0.0.48:2181. (kafka.zookeeper.ZooKeeperClient) [2025-04-18 05:24:36,806] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,806] INFO Client environment:host.name=cn-jenkins-deploy-platform-juju-k8s-1148-1 (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,806] INFO Client environment:java.version=1.8.0_402 (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,807] INFO Client environment:java.vendor=Red Hat, Inc. (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,807] INFO Client environment:java.home=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.402.b06-1.el7_9.x86_64/jre (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,807] INFO Client environment:java.class.path=/opt/kafka/bin/../libs/activation-1.1.1.jar:/opt/kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/opt/kafka/bin/../libs/argparse4j-0.7.0.jar:/opt/kafka/bin/../libs/audience-annotations-0.5.0.jar:/opt/kafka/bin/../libs/commons-cli-1.4.jar:/opt/kafka/bin/../libs/commons-lang3-3.8.1.jar:/opt/kafka/bin/../libs/connect-api-2.6.3.jar:/opt/kafka/bin/../libs/connect-basic-auth-extension-2.6.3.jar:/opt/kafka/bin/../libs/connect-file-2.6.3.jar:/opt/kafka/bin/../libs/connect-json-2.6.3.jar:/opt/kafka/bin/../libs/connect-mirror-2.6.3.jar:/opt/kafka/bin/../libs/connect-mirror-client-2.6.3.jar:/opt/kafka/bin/../libs/connect-runtime-2.6.3.jar:/opt/kafka/bin/../libs/connect-transforms-2.6.3.jar:/opt/kafka/bin/../libs/hk2-api-2.6.1.jar:/opt/kafka/bin/../libs/hk2-locator-2.6.1.jar:/opt/kafka/bin/../libs/hk2-utils-2.6.1.jar:/opt/kafka/bin/../libs/jackson-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-core-2.10.5.jar:/opt/kafka/bin/../libs/jackson-databind-2.10.5.1.jar:/opt/kafka/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/opt/kafka/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-paranamer-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-scala_2.12-2.10.5.jar:/opt/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/opt/kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/opt/kafka/bin/../libs/jakarta.inject-2.6.1.jar:/opt/kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/opt/kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/opt/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/opt/kafka/bin/../libs/javassist-3.25.0-GA.jar:/opt/kafka/bin/../libs/javassist-3.26.0-GA.jar:/opt/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/opt/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/opt/kafka/bin/../libs/jaxb-api-2.3.0.jar:/opt/kafka/bin/../libs/jersey-client-2.31.jar:/opt/kafka/bin/../libs/jersey-common-2.31.jar:/opt/kafka/bin/../libs/jersey-container-servlet-2.31.jar:/opt/kafka/bin/../libs/jersey-container-servlet-core-2.31.jar:/opt/kafka/bin/../libs/jersey-hk2-2.31.jar:/opt/kafka/bin/../libs/jersey-media-jaxb-2.31.jar:/opt/kafka/bin/../libs/jersey-server-2.31.jar:/opt/kafka/bin/../libs/jetty-client-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-http-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-io-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-security-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-server-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-util-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/opt/kafka/bin/../libs/jopt-simple-5.0.4.jar:/opt/kafka/bin/../libs/kafka_2.12-2.6.3.jar:/opt/kafka/bin/../libs/kafka_2.12-2.6.3-sources.jar:/opt/kafka/bin/../libs/kafka-clients-2.6.3.jar:/opt/kafka/bin/../libs/kafka-log4j-appender-2.6.3.jar:/opt/kafka/bin/../libs/kafka-streams-2.6.3.jar:/opt/kafka/bin/../libs/kafka-streams-examples-2.6.3.jar:/opt/kafka/bin/../libs/kafka-streams-scala_2.12-2.6.3.jar:/opt/kafka/bin/../libs/kafka-streams-test-utils-2.6.3.jar:/opt/kafka/bin/../libs/kafka-tools-2.6.3.jar:/opt/kafka/bin/../libs/log4j-1.2.17.jar:/opt/kafka/bin/../libs/lz4-java-1.7.1.jar:/opt/kafka/bin/../libs/maven-artifact-3.8.1.jar:/opt/kafka/bin/../libs/metrics-core-2.2.0.jar:/opt/kafka/bin/../libs/netty-buffer-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-codec-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-common-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-handler-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-resolver-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-epoll-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-unix-common-4.1.59.Final.jar:/opt/kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/opt/kafka/bin/../libs/paranamer-2.8.jar:/opt/kafka/bin/../libs/plexus-utils-3.2.1.jar:/opt/kafka/bin/../libs/reflections-0.9.12.jar:/opt/kafka/bin/../libs/rocksdbjni-5.18.4.jar:/opt/kafka/bin/../libs/scala-collection-compat_2.12-2.1.6.jar:/opt/kafka/bin/../libs/scala-java8-compat_2.12-0.9.1.jar:/opt/kafka/bin/../libs/scala-library-2.12.11.jar:/opt/kafka/bin/../libs/scala-logging_2.12-3.9.2.jar:/opt/kafka/bin/../libs/scala-reflect-2.12.11.jar:/opt/kafka/bin/../libs/slf4j-api-1.7.30.jar:/opt/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/opt/kafka/bin/../libs/snappy-java-1.1.7.3.jar:/opt/kafka/bin/../libs/zookeeper-3.5.9.jar:/opt/kafka/bin/../libs/zookeeper-jute-3.5.9.jar:/opt/kafka/bin/../libs/zstd-jni-1.4.4-7.jar (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:os.version=5.4.0-167-generic (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:user.name=kafka (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:user.home=/home/kafka (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,810] INFO Client environment:user.dir=/opt/kafka (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,811] INFO Client environment:os.memory.free=976MB (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,811] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,811] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,821] INFO Initiating client connection, connectString=10.0.0.48:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@9f116cc (org.apache.zookeeper.ZooKeeper) [2025-04-18 05:24:36,852] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) [2025-04-18 05:24:36,893] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) [2025-04-18 05:24:36,908] INFO Opening socket connection to server cn-jenkins-deploy-platform-juju-k8s-1148-1/10.0.0.48:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) [2025-04-18 05:24:36,927] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2025-04-18 05:24:36,943] INFO Socket connection established, initiating session, client: /10.0.0.48:44440, server: cn-jenkins-deploy-platform-juju-k8s-1148-1/10.0.0.48:2181 (org.apache.zookeeper.ClientCnxn) [2025-04-18 05:24:36,984] INFO Session establishment complete on server cn-jenkins-deploy-platform-juju-k8s-1148-1/10.0.0.48:2181, sessionid = 0x1000021ba44000d, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) [2025-04-18 05:24:36,993] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2025-04-18 05:24:38,307] INFO Cluster ID = yQ5Xh-f3Su-Yh3WUhyP_6A (kafka.server.KafkaServer) [2025-04-18 05:24:38,331] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint) [2025-04-18 05:24:38,608] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = null advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 1 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.max.bytes = 57671680 fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 0 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = null inter.broker.protocol.version = 2.6-IV0 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL listeners = PLAINTEXT://10.0.0.48:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 250000000 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 2 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /tmp/kafka-logs log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.6-IV0 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = 268435456 log.retention.check.interval.ms = 300000 log.retention.hours = 24 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 268435456 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1048588 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 30 num.recovery.threads.per.data.dir = 1 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 1 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 30000 replica.selector.class = null replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 100001 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [GSSAPI] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = GSSAPI sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT security.providers = null socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2] ssl.endpoint.identification.algorithm = https ssl.engine.factory.class = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = DEFAULT ssl.protocol = TLSv1.2 ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.clientCnxnSocket = null zookeeper.connect = 10.0.0.48:2181 zookeeper.connection.timeout.ms = 18000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 18000 zookeeper.set.acl = false zookeeper.ssl.cipher.suites = null zookeeper.ssl.client.enable = false zookeeper.ssl.crl.enable = false zookeeper.ssl.enabled.protocols = null zookeeper.ssl.endpoint.identification.algorithm = HTTPS zookeeper.ssl.keystore.location = null zookeeper.ssl.keystore.password = null zookeeper.ssl.keystore.type = null zookeeper.ssl.ocsp.enable = false zookeeper.ssl.protocol = TLSv1.2 zookeeper.ssl.truststore.location = null zookeeper.ssl.truststore.password = null zookeeper.ssl.truststore.type = null zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2025-04-18 05:24:38,710] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = null advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 1 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.max.bytes = 57671680 fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 0 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = null inter.broker.protocol.version = 2.6-IV0 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL listeners = PLAINTEXT://10.0.0.48:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 250000000 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 2 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /tmp/kafka-logs log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.6-IV0 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = 268435456 log.retention.check.interval.ms = 300000 log.retention.hours = 24 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 268435456 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1048588 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 30 num.recovery.threads.per.data.dir = 1 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 1 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 30000 replica.selector.class = null replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 100001 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [GSSAPI] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = GSSAPI sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT security.providers = null socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2] ssl.endpoint.identification.algorithm = https ssl.engine.factory.class = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = DEFAULT ssl.protocol = TLSv1.2 ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.clientCnxnSocket = null zookeeper.connect = 10.0.0.48:2181 zookeeper.connection.timeout.ms = 18000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 18000 zookeeper.set.acl = false zookeeper.ssl.cipher.suites = null zookeeper.ssl.client.enable = false zookeeper.ssl.crl.enable = false zookeeper.ssl.enabled.protocols = null zookeeper.ssl.endpoint.identification.algorithm = HTTPS zookeeper.ssl.keystore.location = null zookeeper.ssl.keystore.password = null zookeeper.ssl.keystore.type = null zookeeper.ssl.ocsp.enable = false zookeeper.ssl.protocol = TLSv1.2 zookeeper.ssl.truststore.location = null zookeeper.ssl.truststore.password = null zookeeper.ssl.truststore.type = null zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2025-04-18 05:24:38,902] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-04-18 05:24:38,908] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-04-18 05:24:38,905] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-04-18 05:24:38,962] INFO Log directory /tmp/kafka-logs not found, creating it. (kafka.log.LogManager) [2025-04-18 05:24:38,975] INFO Loading logs from log dirs ArrayBuffer(/tmp/kafka-logs) (kafka.log.LogManager) [2025-04-18 05:24:38,983] INFO Attempting recovery for all logs in /tmp/kafka-logs since no clean shutdown file was found (kafka.log.LogManager) [2025-04-18 05:24:39,014] INFO Loaded 0 logs in 39ms. (kafka.log.LogManager) [2025-04-18 05:24:39,057] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) [2025-04-18 05:24:39,096] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) [2025-04-18 05:24:40,548] INFO Awaiting socket connections on 10.0.0.48:9092. (kafka.network.Acceptor) [2025-04-18 05:24:40,700] INFO [SocketServer brokerId=1] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) [2025-04-18 05:24:40,801] INFO [ExpirationReaper-1-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:40,808] INFO [ExpirationReaper-1-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:40,808] INFO [ExpirationReaper-1-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:40,812] INFO [ExpirationReaper-1-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:40,901] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) [2025-04-18 05:24:40,978] INFO Creating /brokers/ids/1 (is it secure? false) (kafka.zk.KafkaZkClient) [2025-04-18 05:24:41,059] INFO Stat of the created znode at /brokers/ids/1 is: 187,187,1744953881025,1744953881025,1,0,0,72057738896867341,188,0,187 (kafka.zk.KafkaZkClient) [2025-04-18 05:24:41,060] INFO Registered broker 1 at path /brokers/ids/1 with addresses: PLAINTEXT://10.0.0.48:9092, czxid (broker epoch): 187 (kafka.zk.KafkaZkClient) [2025-04-18 05:24:41,254] INFO [ExpirationReaper-1-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:41,257] INFO [ExpirationReaper-1-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:41,261] INFO [ExpirationReaper-1-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:41,329] INFO Successfully created /controller_epoch with initial epoch 0 (kafka.zk.KafkaZkClient) [2025-04-18 05:24:41,410] INFO [GroupCoordinator 1]: Starting up. (kafka.coordinator.group.GroupCoordinator) [2025-04-18 05:24:41,426] INFO [GroupCoordinator 1]: Startup complete. (kafka.coordinator.group.GroupCoordinator) [2025-04-18 05:24:41,485] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 57 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2025-04-18 05:24:41,488] INFO [ProducerId Manager 1]: Acquired new producerId block (brokerId:1,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1 (kafka.coordinator.transaction.ProducerIdManager) [2025-04-18 05:24:41,569] INFO [TransactionCoordinator id=1] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) [2025-04-18 05:24:41,574] INFO [TransactionCoordinator id=1] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) [2025-04-18 05:24:41,661] INFO [Transaction Marker Channel Manager 1]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) [2025-04-18 05:24:41,838] INFO [ExpirationReaper-1-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-04-18 05:24:42,078] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [2025-04-18 05:24:42,125] INFO [SocketServer brokerId=1] Starting socket server acceptors and processors (kafka.network.SocketServer) [2025-04-18 05:24:42,131] INFO [SocketServer brokerId=1] Started data-plane acceptor and processor(s) for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) [2025-04-18 05:24:42,137] INFO [SocketServer brokerId=1] Started socket server acceptors and processors (kafka.network.SocketServer) [2025-04-18 05:24:42,212] INFO Kafka version: 2.6.3 (org.apache.kafka.common.utils.AppInfoParser) [2025-04-18 05:24:42,212] INFO Kafka commitId: c24cbd3f5eeffa1e (org.apache.kafka.common.utils.AppInfoParser) [2025-04-18 05:24:42,212] INFO Kafka startTimeMs: 1744953882138 (org.apache.kafka.common.utils.AppInfoParser) [2025-04-18 05:24:42,213] INFO [KafkaServer id=1] started (kafka.server.KafkaServer) [2025-04-18 05:24:42,443] INFO Creating topic structured_syslog_topic with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:42,449] INFO Creating topic -uve-topic-0 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:42,542] INFO [KafkaApi-1] Auto creation of topic structured_syslog_topic with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:42,543] INFO [KafkaApi-1] Auto creation of topic -uve-topic-0 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:42,593] INFO Creating topic -uve-topic-5 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:42,659] INFO [KafkaApi-1] Auto creation of topic -uve-topic-5 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:42,675] INFO Creating topic -uve-topic-29 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:42,941] INFO [KafkaApi-1] Auto creation of topic -uve-topic-29 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:42,987] INFO Creating topic -uve-topic-9 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:43,277] INFO [KafkaApi-1] Auto creation of topic -uve-topic-9 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:43,312] INFO Creating topic -uve-topic-26 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:43,476] INFO [KafkaApi-1] Auto creation of topic -uve-topic-26 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:43,616] INFO Creating topic -uve-topic-18 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:43,731] INFO [KafkaApi-1] Auto creation of topic -uve-topic-18 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:43,854] INFO Creating topic -uve-topic-28 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:43,903] INFO [KafkaApi-1] Auto creation of topic -uve-topic-28 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:43,935] INFO Creating topic -uve-topic-4 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:43,989] INFO [KafkaApi-1] Auto creation of topic -uve-topic-4 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,003] INFO Creating topic -uve-topic-22 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,051] INFO [KafkaApi-1] Auto creation of topic -uve-topic-22 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,089] INFO Creating topic -uve-topic-11 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,235] INFO [KafkaApi-1] Auto creation of topic -uve-topic-11 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,250] INFO Creating topic -uve-topic-6 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,317] INFO [KafkaApi-1] Auto creation of topic -uve-topic-6 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,445] INFO Creating topic -uve-topic-19 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,509] INFO [KafkaApi-1] Auto creation of topic -uve-topic-19 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,523] INFO Creating topic -uve-topic-25 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,556] INFO [KafkaApi-1] Auto creation of topic -uve-topic-25 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,578] INFO Creating topic -uve-topic-3 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,648] INFO [KafkaApi-1] Auto creation of topic -uve-topic-3 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:44,723] INFO Creating topic -uve-topic-15 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:44,738] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(structured_syslog_topic-17, -uve-topic-0-23, -uve-topic-0-16, -uve-topic-0-11, -uve-topic-0-25, -uve-topic-0-5, structured_syslog_topic-22, structured_syslog_topic-26, structured_syslog_topic-3, structured_syslog_topic-16, -uve-topic-0-4, structured_syslog_topic-10, -uve-topic-0-2, structured_syslog_topic-6, -uve-topic-0-3, -uve-topic-0-15, -uve-topic-0-9, structured_syslog_topic-28, structured_syslog_topic-9, structured_syslog_topic-24, structured_syslog_topic-25, -uve-topic-0-8, -uve-topic-0-22, structured_syslog_topic-15, structured_syslog_topic-19, -uve-topic-0-13, -uve-topic-0-17, -uve-topic-0-0, -uve-topic-0-26, -uve-topic-0-12, structured_syslog_topic-20, structured_syslog_topic-8, structured_syslog_topic-14, structured_syslog_topic-2, -uve-topic-0-20, -uve-topic-0-21, structured_syslog_topic-1, -uve-topic-0-14, structured_syslog_topic-13, structured_syslog_topic-21, -uve-topic-0-24, -uve-topic-0-18, -uve-topic-0-27, structured_syslog_topic-0, structured_syslog_topic-12, -uve-topic-0-6, -uve-topic-0-28, structured_syslog_topic-5, -uve-topic-0-29, structured_syslog_topic-29, -uve-topic-0-10, structured_syslog_topic-23, structured_syslog_topic-27, structured_syslog_topic-11, structured_syslog_topic-7, -uve-topic-0-1, structured_syslog_topic-18, structured_syslog_topic-4, -uve-topic-0-19, -uve-topic-0-7) (kafka.server.ReplicaFetcherManager) [2025-04-18 05:24:44,950] INFO [KafkaApi-1] Auto creation of topic -uve-topic-15 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:45,125] INFO Creating topic -uve-topic-21 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:45,148] INFO [KafkaApi-1] Auto creation of topic -uve-topic-21 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:45,176] INFO Creating topic -uve-topic-20 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:45,408] INFO [KafkaApi-1] Auto creation of topic -uve-topic-20 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:45,519] INFO Creating topic -uve-topic-10 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:45,598] INFO [KafkaApi-1] Auto creation of topic -uve-topic-10 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:45,646] INFO Creating topic -uve-topic-13 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:45,656] INFO [KafkaApi-1] Auto creation of topic -uve-topic-13 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:45,831] INFO [Log partition=-uve-topic-0-7, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2025-04-18 05:24:45,860] INFO Created log for partition -uve-topic-0-7 in /tmp/kafka-logs/-uve-topic-0-7 with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 268435456, retention.ms -> 86400000, flush.messages -> 9223372036854775807, message.format.version -> 2.6-IV0, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> 268435456, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2025-04-18 05:24:45,918] INFO [Partition -uve-topic-0-7 broker=1] No checkpointed highwatermark is found for partition -uve-topic-0-7 (kafka.cluster.Partition) [2025-04-18 05:24:45,947] INFO [Partition -uve-topic-0-7 broker=1] Log loaded for partition -uve-topic-0-7 with initial high watermark 0 (kafka.cluster.Partition) [2025-04-18 05:24:45,978] INFO Creating topic -uve-topic-27 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:46,159] INFO [KafkaApi-1] Auto creation of topic -uve-topic-27 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:46,201] INFO [Log partition=-uve-topic-0-26, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2025-04-18 05:24:46,273] INFO Created log for partition -uve-topic-0-26 in /tmp/kafka-logs/-uve-topic-0-26 with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 268435456, retention.ms -> 86400000, flush.messages -> 9223372036854775807, message.format.version -> 2.6-IV0, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> 268435456, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2025-04-18 05:24:46,273] INFO [Partition -uve-topic-0-26 broker=1] No checkpointed highwatermark is found for partition -uve-topic-0-26 (kafka.cluster.Partition) [2025-04-18 05:24:46,273] INFO [Partition -uve-topic-0-26 broker=1] Log loaded for partition -uve-topic-0-26 with initial high watermark 0 (kafka.cluster.Partition) [2025-04-18 05:24:46,313] INFO Creating topic -uve-topic-24 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:46,368] INFO [Log partition=structured_syslog_topic-15, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2025-04-18 05:24:46,377] INFO Created log for partition structured_syslog_topic-15 in /tmp/kafka-logs/structured_syslog_topic-15 with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 268435456, retention.ms -> 86400000, flush.messages -> 9223372036854775807, message.format.version -> 2.6-IV0, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> 268435456, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2025-04-18 05:24:46,381] INFO [Partition structured_syslog_topic-15 broker=1] No checkpointed highwatermark is found for partition structured_syslog_topic-15 (kafka.cluster.Partition) [2025-04-18 05:24:46,382] INFO [Partition structured_syslog_topic-15 broker=1] Log loaded for partition structured_syslog_topic-15 with initial high watermark 0 (kafka.cluster.Partition) [2025-04-18 05:24:46,437] INFO [Log partition=-uve-topic-0-17, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2025-04-18 05:24:46,445] INFO Created log for partition -uve-topic-0-17 in /tmp/kafka-logs/-uve-topic-0-17 with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 268435456, retention.ms -> 86400000, flush.messages -> 9223372036854775807, message.format.version -> 2.6-IV0, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> 268435456, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2025-04-18 05:24:46,445] INFO [Partition -uve-topic-0-17 broker=1] No checkpointed highwatermark is found for partition -uve-topic-0-17 (kafka.cluster.Partition) [2025-04-18 05:24:46,445] INFO [Partition -uve-topic-0-17 broker=1] Log loaded for partition -uve-topic-0-17 with initial high watermark 0 (kafka.cluster.Partition) [2025-04-18 05:24:46,512] INFO [KafkaApi-1] Auto creation of topic -uve-topic-24 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:46,570] INFO Creating topic -uve-topic-16 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:46,657] INFO [KafkaApi-1] Auto creation of topic -uve-topic-16 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:46,666] INFO Creating topic -uve-topic-7 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:46,672] INFO [Log partition=-uve-topic-0-14, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2025-04-18 05:24:46,674] INFO [KafkaApi-1] Auto creation of topic -uve-topic-7 with 30 partitions and replication factor 1 is successful (kafka.server.KafkaApis) [2025-04-18 05:24:46,694] INFO Created log for partition -uve-topic-0-14 in /tmp/kafka-logs/-uve-topic-0-14 with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 268435456, retention.ms -> 86400000, flush.messages -> 9223372036854775807, message.format.version -> 2.6-IV0, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> 268435456, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2025-04-18 05:24:46,694] INFO [Partition -uve-topic-0-14 broker=1] No checkpointed highwatermark is found for partition -uve-topic-0-14 (kafka.cluster.Partition) [2025-04-18 05:24:46,695] INFO [Partition -uve-topic-0-14 broker=1] Log loaded for partition -uve-topic-0-14 with initial high watermark 0 (kafka.cluster.Partition) [2025-04-18 05:24:46,725] INFO Creating topic -uve-topic-14 with configuration {} and initial partition assignment Map(23 -> ArrayBuffer(1), 8 -> ArrayBuffer(1), 17 -> ArrayBuffer(1), 26 -> ArrayBuffer(1), 11 -> ArrayBuffer(1), 29 -> ArrayBuffer(1), 2 -> ArrayBuffer(1), 20 -> ArrayBuffer(1), 5 -> ArrayBuffer(1), 14 -> ArrayBuffer(1), 4 -> ArrayBuffer(1), 13 -> ArrayBuffer(1), 22 -> ArrayBuffer(1), 7 -> ArrayBuffer(1), 16 -> ArrayBuffer(1), 25 -> ArrayBuffer(1), 10 -> ArrayBuffer(1), 1 -> ArrayBuffer(1), 28 -> ArrayBuffer(1), 19 -> ArrayBuffer(1), 27 -> ArrayBuffer(1), 9 -> ArrayBuffer(1), 18 -> ArrayBuffer(1), 12 -> ArrayBuffer(1), 3 -> ArrayBuffer(1), 21 -> ArrayBuffer(1), 15 -> ArrayBuffer(1), 6 -> ArrayBuffer(1), 24 -> ArrayBuffer(1), 0 -> ArrayBuffer(1)) (kafka.zk.AdminZkClient) [2025-04-18 05:24:46,737] INFO [Log partition=structured_syslog_topic-12, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2025-04-18 05:24:46,767] INFO Created log for partition structured_syslog_topic-12 in /tmp/kafka-logs/structured_syslog_topic-12 with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 268435456, retention.ms -> 86400000, flush.messages -> 9223372036854775807, message.format.version -> 2.6-IV0, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> 268435456, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2025-04-18 05:24:46,769] INFO [Partition structured_syslog_topic-12 broker=1] No checkpointed highwatermark is found for partition structured_syslog_topic-12 (kafka.cluster.Partition) [2025-04-18 05:24:46,774] INFO [Partition structured_syslog_topic-12 broker=1] Log loaded for partition structured_syslog_topic-12 with initial high watermark 0 (kafka.cluster.Partition)