INFO [main] 2025-08-11 05:03:06,944 YamlConfigurationLoader.java:89 - Configuration location: file:/etc/cassandra/cassandra.yaml INFO [main] 2025-08-11 05:03:07,204 Config.java:495 - Node configuration:[allocate_tokens_for_keyspace=null; authenticator=AllowAllAuthenticator; authorizer=AllowAllAuthorizer; auto_bootstrap=true; auto_snapshot=true; back_pressure_enabled=false; back_pressure_strategy=org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}; batch_size_fail_threshold_in_kb=50; batch_size_warn_threshold_in_kb=5; batchlog_replay_throttle_in_kb=1024; broadcast_address=10.0.0.48; broadcast_rpc_address=10.0.0.48; buffer_pool_use_heap_if_exhausted=true; cas_contention_timeout_in_ms=1000; cdc_enabled=false; cdc_free_space_check_interval_ms=250; cdc_raw_directory=null; cdc_total_space_in_mb=0; client_encryption_options=; cluster_name=contrail_database; column_index_cache_size_in_kb=2; column_index_size_in_kb=64; commit_failure_policy=stop; commitlog_compression=null; commitlog_directory=/var/lib/cassandra/commitlog; commitlog_max_compression_buffers_in_pool=3; commitlog_periodic_queue_size=-1; commitlog_segment_size_in_mb=32; commitlog_sync=periodic; commitlog_sync_batch_window_in_ms=NaN; commitlog_sync_period_in_ms=10000; commitlog_total_space_in_mb=null; compaction_large_partition_warning_threshold_mb=100; compaction_throughput_mb_per_sec=256; concurrent_compactors=4; concurrent_counter_writes=32; concurrent_materialized_view_writes=32; concurrent_reads=64; concurrent_replicates=null; concurrent_writes=64; counter_cache_keys_to_save=2147483647; counter_cache_save_period=7200; counter_cache_size_in_mb=null; counter_write_request_timeout_in_ms=5000; credentials_cache_max_entries=1000; credentials_update_interval_in_ms=-1; credentials_validity_in_ms=2000; cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@6b19b79; disk_access_mode=auto; disk_failure_policy=stop; disk_optimization_estimate_percentile=0.95; disk_optimization_page_cross_chance=0.1; disk_optimization_strategy=ssd; dynamic_snitch=true; dynamic_snitch_badness_threshold=0.1; dynamic_snitch_reset_interval_in_ms=600000; dynamic_snitch_update_interval_in_ms=100; enable_materialized_views=true; enable_scripted_user_defined_functions=false; enable_user_defined_functions=false; enable_user_defined_functions_threads=true; encryption_options=null; endpoint_snitch=SimpleSnitch; file_cache_round_up=null; file_cache_size_in_mb=null; gc_log_threshold_in_ms=200; gc_warn_threshold_in_ms=1000; hinted_handoff_disabled_datacenters=[]; hinted_handoff_enabled=true; hinted_handoff_throttle_in_kb=1024; hints_compression=null; hints_directory=null; hints_flush_period_in_ms=10000; incremental_backups=false; index_interval=null; index_summary_capacity_in_mb=null; index_summary_resize_interval_in_minutes=60; initial_token=null; inter_dc_stream_throughput_outbound_megabits_per_sec=200; inter_dc_tcp_nodelay=false; internode_authenticator=null; internode_compression=dc; internode_recv_buff_size_in_bytes=0; internode_send_buff_size_in_bytes=0; key_cache_keys_to_save=2147483647; key_cache_save_period=14400; key_cache_size_in_mb=null; listen_address=10.0.0.48; listen_interface=null; listen_interface_prefer_ipv6=false; listen_on_broadcast_address=false; max_hint_window_in_ms=10800000; max_hints_delivery_threads=2; max_hints_file_size_in_mb=128; max_mutation_size_in_kb=null; max_streaming_retries=3; max_value_size_in_mb=256; memtable_allocation_type=offheap_objects; memtable_cleanup_threshold=null; memtable_flush_writers=4; memtable_heap_space_in_mb=null; memtable_offheap_space_in_mb=null; min_free_space_per_drive_in_mb=50; native_transport_max_concurrent_connections=-1; native_transport_max_concurrent_connections_per_ip=-1; native_transport_max_frame_size_in_mb=256; native_transport_max_threads=128; native_transport_port=9042; native_transport_port_ssl=null; num_tokens=256; otc_backlog_expiration_interval_ms=200; otc_coalescing_enough_coalesced_messages=8; otc_coalescing_strategy=DISABLED; otc_coalescing_window_us=200; partitioner=org.apache.cassandra.dht.Murmur3Partitioner; permissions_cache_max_entries=1000; permissions_update_interval_in_ms=-1; permissions_validity_in_ms=2000; phi_convict_threshold=8.0; prepared_statements_cache_size_mb=null; range_request_timeout_in_ms=10000; read_request_timeout_in_ms=5000; request_scheduler=org.apache.cassandra.scheduler.NoScheduler; request_scheduler_id=null; request_scheduler_options=null; request_timeout_in_ms=10000; role_manager=CassandraRoleManager; roles_cache_max_entries=1000; roles_update_interval_in_ms=-1; roles_validity_in_ms=2000; row_cache_class_name=org.apache.cassandra.cache.OHCProvider; row_cache_keys_to_save=2147483647; row_cache_save_period=0; row_cache_size_in_mb=0; rpc_address=10.0.0.48; rpc_interface=null; rpc_interface_prefer_ipv6=false; rpc_keepalive=true; rpc_listen_backlog=50; rpc_max_threads=2147483647; rpc_min_threads=16; rpc_port=9160; rpc_recv_buff_size_in_bytes=null; rpc_send_buff_size_in_bytes=null; rpc_server_type=sync; saved_caches_directory=/var/lib/cassandra/saved_caches; seed_provider=org.apache.cassandra.locator.SimpleSeedProvider{seeds=10.0.0.254,10.0.0.38}; server_encryption_options=; slow_query_log_timeout_in_ms=500; snapshot_before_compaction=false; ssl_storage_port=7001; sstable_preemptive_open_interval_in_mb=50; start_native_transport=true; start_rpc=true; storage_port=7000; stream_throughput_outbound_megabits_per_sec=200; streaming_keep_alive_period_in_secs=300; streaming_socket_timeout_in_ms=86400000; thrift_framed_transport_size_in_mb=15; thrift_max_message_length_in_mb=16; thrift_prepared_statements_cache_size_mb=null; tombstone_failure_threshold=100000; tombstone_warn_threshold=1000; tracetype_query_ttl=86400; tracetype_repair_ttl=604800; transparent_data_encryption_options=org.apache.cassandra.config.TransparentDataEncryptionOptions@2a32de6c; trickle_fsync=false; trickle_fsync_interval_in_kb=10240; truncate_request_timeout_in_ms=60000; unlogged_batch_across_partitions_warn_threshold=10; user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000] INFO [main] 2025-08-11 05:03:07,204 DatabaseDescriptor.java:367 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap INFO [main] 2025-08-11 05:03:07,205 DatabaseDescriptor.java:425 - Global memtable on-heap threshold is enabled at 502MB INFO [main] 2025-08-11 05:03:07,205 DatabaseDescriptor.java:429 - Global memtable off-heap threshold is enabled at 502MB INFO [main] 2025-08-11 05:03:07,244 RateBasedBackPressure.java:123 - Initialized back-pressure with high ratio: 0.9, factor: 5, flow: FAST, window size: 2000. INFO [main] 2025-08-11 05:03:07,245 DatabaseDescriptor.java:729 - Back-pressure is disabled with strategy org.apache.cassandra.net.RateBasedBackPressure{high_ratio=0.9, factor=5, flow=FAST}. INFO [main] 2025-08-11 05:03:07,451 JMXServerUtils.java:246 - Configured JMX server at: service:jmx:rmi://0.0.0.0/jndi/rmi://0.0.0.0:7201/jmxrmi INFO [main] 2025-08-11 05:03:07,516 CassandraDaemon.java:473 - Hostname: cn-jenkins-deploy-platform-ansible-os-3797-3. INFO [main] 2025-08-11 05:03:07,517 CassandraDaemon.java:480 - JVM vendor/version: OpenJDK 64-Bit Server VM/1.8.0_322 INFO [main] 2025-08-11 05:03:07,517 CassandraDaemon.java:481 - Heap size: 984.000MiB/1.961GiB INFO [main] 2025-08-11 05:03:07,518 CassandraDaemon.java:486 - Code Cache Non-heap memory: init = 2555904(2496K) used = 4046400(3951K) committed = 4128768(4032K) max = 251658240(245760K) INFO [main] 2025-08-11 05:03:07,518 CassandraDaemon.java:486 - Metaspace Non-heap memory: init = 0(0K) used = 19223344(18772K) committed = 19791872(19328K) max = -1(-1K) INFO [main] 2025-08-11 05:03:07,518 CassandraDaemon.java:486 - Compressed Class Space Non-heap memory: init = 0(0K) used = 2248128(2195K) committed = 2490368(2432K) max = 1073741824(1048576K) INFO [main] 2025-08-11 05:03:07,518 CassandraDaemon.java:486 - Par Eden Space Heap memory: init = 335544320(327680K) used = 93992608(91789K) committed = 335544320(327680K) max = 335544320(327680K) INFO [main] 2025-08-11 05:03:07,518 CassandraDaemon.java:486 - Par Survivor Space Heap memory: init = 41943040(40960K) used = 0(0K) committed = 41943040(40960K) max = 41943040(40960K) INFO [main] 2025-08-11 05:03:07,518 CassandraDaemon.java:486 - CMS Old Gen Heap memory: init = 654311424(638976K) used = 0(0K) committed = 654311424(638976K) max = 1728053248(1687552K) INFO [main] 2025-08-11 05:03:07,519 CassandraDaemon.java:488 - Classpath: /opt/cassandra/conf:/opt/cassandra/build/classes/main:/opt/cassandra/build/classes/thrift:/opt/cassandra/lib/airline-0.6.jar:/opt/cassandra/lib/antlr-runtime-3.5.2.jar:/opt/cassandra/lib/apache-cassandra-3.11.3.jar:/opt/cassandra/lib/apache-cassandra-thrift-3.11.3.jar:/opt/cassandra/lib/asm-5.0.4.jar:/opt/cassandra/lib/caffeine-2.2.6.jar:/opt/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar:/opt/cassandra/lib/commons-cli-1.1.jar:/opt/cassandra/lib/commons-codec-1.9.jar:/opt/cassandra/lib/commons-lang3-3.1.jar:/opt/cassandra/lib/commons-math3-3.2.jar:/opt/cassandra/lib/compress-lzf-0.8.4.jar:/opt/cassandra/lib/concurrentlinkedhashmap-lru-1.4.jar:/opt/cassandra/lib/concurrent-trees-2.4.0.jar:/opt/cassandra/lib/disruptor-3.0.1.jar:/opt/cassandra/lib/ecj-4.4.2.jar:/opt/cassandra/lib/guava-18.0.jar:/opt/cassandra/lib/HdrHistogram-2.1.9.jar:/opt/cassandra/lib/high-scale-lib-1.0.6.jar:/opt/cassandra/lib/hppc-0.5.4.jar:/opt/cassandra/lib/jackson-core-asl-1.9.13.jar:/opt/cassandra/lib/jackson-mapper-asl-1.9.13.jar:/opt/cassandra/lib/jamm-0.3.0.jar:/opt/cassandra/lib/javax.inject.jar:/opt/cassandra/lib/jbcrypt-0.3m.jar:/opt/cassandra/lib/jcl-over-slf4j-1.7.7.jar:/opt/cassandra/lib/jctools-core-1.2.1.jar:/opt/cassandra/lib/jflex-1.6.0.jar:/opt/cassandra/lib/jna-4.2.2.jar:/opt/cassandra/lib/joda-time-2.4.jar:/opt/cassandra/lib/json-simple-1.1.jar:/opt/cassandra/lib/jstackjunit-0.0.1.jar:/opt/cassandra/lib/libthrift-0.13.0.jar:/opt/cassandra/lib/log4j-over-slf4j-1.7.7.jar:/opt/cassandra/lib/logback-classic-1.2.9.jar:/opt/cassandra/lib/logback-core-1.2.9.jar:/opt/cassandra/lib/lz4-1.3.0.jar:/opt/cassandra/lib/metrics-core-3.1.5.jar:/opt/cassandra/lib/metrics-jvm-3.1.5.jar:/opt/cassandra/lib/metrics-logback-3.1.5.jar:/opt/cassandra/lib/netty-all-4.1.39.Final.jar:/opt/cassandra/lib/ohc-core-0.4.4.jar:/opt/cassandra/lib/ohc-core-j8-0.4.4.jar:/opt/cassandra/lib/reporter-config3-3.0.3.jar:/opt/cassandra/lib/reporter-config-base-3.0.3.jar:/opt/cassandra/lib/sigar-1.6.4.jar:/opt/cassandra/lib/slf4j-api-1.7.7.jar:/opt/cassandra/lib/snakeyaml-1.11.jar:/opt/cassandra/lib/snappy-java-1.1.1.7.jar:/opt/cassandra/lib/snowball-stemmer-1.3.0.581.1.jar:/opt/cassandra/lib/ST4-4.0.8.jar:/opt/cassandra/lib/stream-2.5.2.jar:/opt/cassandra/lib/thrift-server-0.3.7.jar:/opt/cassandra/lib/jsr223/*/*.jar:/opt/cassandra/lib/jamm-0.3.0.jar INFO [main] 2025-08-11 05:03:07,520 CassandraDaemon.java:490 - JVM Arguments: [-Xloggc:/opt/cassandra/logs/gc.log, -ea, -XX:+UseThreadPriorities, -XX:ThreadPriorityPolicy=42, -XX:+HeapDumpOnOutOfMemoryError, -Xss256k, -XX:StringTableSize=1000003, -XX:+AlwaysPreTouch, -XX:-UseBiasedLocking, -XX:+UseTLAB, -XX:+ResizeTLAB, -XX:+UseNUMA, -XX:+PerfDisableSharedMem, -Djava.net.preferIPv4Stack=true, -Xms1g, -Xmx2g, -XX:+UseParNewGC, -XX:+UseConcMarkSweepGC, -XX:+CMSParallelRemarkEnabled, -XX:SurvivorRatio=8, -XX:MaxTenuringThreshold=1, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:CMSWaitDuration=10000, -XX:+CMSParallelInitialMarkEnabled, -XX:+CMSEdenChunksRecordAlways, -XX:+CMSClassUnloadingEnabled, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintHeapAtGC, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -XX:+PrintPromotionFailure, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=10, -XX:GCLogFileSize=10M, -Xmn400M, -XX:+UseCondCardMark, -XX:CompileCommandFile=/opt/cassandra/conf/hotspot_compiler, -javaagent:/opt/cassandra/lib/jamm-0.3.0.jar, -Dcassandra.jmx.remote.port=7199, -Dcom.sun.management.jmxremote.rmi.port=7199, -Dcom.sun.management.jmxremote.authenticate=true, -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password, -Djava.library.path=/opt/cassandra/lib/sigar-bin, -Dcassandra.rpc_port=9161, -Dcassandra.native_transport_port=9041, -Dcassandra.ssl_storage_port=7013, -Dcassandra.storage_port=7012, -Dcassandra.jmx.local.port=7201, -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access, -Dcassandra.jmx.remote.port=7201, -Dcom.sun.management.jmxremote.rmi.port=7201, -Dcassandra.libjemalloc=/usr/lib64/libjemalloc.so.1, -XX:OnOutOfMemoryError=kill -9 %p, -Dlogback.configurationFile=logback.xml, -Dcassandra.logdir=/opt/cassandra/logs, -Dcassandra.storagedir=/opt/cassandra/data, -Dcassandra-foreground=yes] WARN [main] 2025-08-11 05:03:07,576 NativeLibrary.java:187 - Unable to lock JVM memory (ENOMEM). This can result in part of the JVM being swapped out, especially with mmapped I/O enabled. Increase RLIMIT_MEMLOCK or run Cassandra as root. INFO [main] 2025-08-11 05:03:07,576 StartupChecks.java:140 - jemalloc seems to be preloaded from /usr/lib64/libjemalloc.so.1 INFO [main] 2025-08-11 05:03:07,576 StartupChecks.java:176 - JMX is enabled to receive remote connections on port: 7201 INFO [main] 2025-08-11 05:03:07,587 SigarLibrary.java:44 - Initializing SIGAR library INFO [main] 2025-08-11 05:03:07,645 SigarLibrary.java:180 - Checked OS settings and found them configured for optimal performance. WARN [main] 2025-08-11 05:03:07,651 StartupChecks.java:311 - Maximum number of memory map areas per process (vm.max_map_count) 128960 is too low, recommended value: 1048575, you can change it with sysctl. WARN [main] 2025-08-11 05:03:07,664 StartupChecks.java:332 - Directory /var/lib/cassandra/commitlog doesn't exist WARN [main] 2025-08-11 05:03:07,664 StartupChecks.java:332 - Directory /var/lib/cassandra/saved_caches doesn't exist WARN [main] 2025-08-11 05:03:07,665 StartupChecks.java:332 - Directory /opt/cassandra/data/hints doesn't exist INFO [main] 2025-08-11 05:03:07,715 QueryProcessor.java:116 - Initialized prepared statement caches with 10 MB (native) and 10 MB (Thrift) INFO [main] 2025-08-11 05:03:08,362 ColumnFamilyStore.java:411 - Initializing system.IndexInfo INFO [main] 2025-08-11 05:03:09,978 ColumnFamilyStore.java:411 - Initializing system.batches INFO [main] 2025-08-11 05:03:10,015 ColumnFamilyStore.java:411 - Initializing system.paxos INFO [main] 2025-08-11 05:03:10,030 ColumnFamilyStore.java:411 - Initializing system.local INFO [main] 2025-08-11 05:03:10,050 ColumnFamilyStore.java:411 - Initializing system.peers INFO [main] 2025-08-11 05:03:10,081 ColumnFamilyStore.java:411 - Initializing system.peer_events INFO [main] 2025-08-11 05:03:10,091 ColumnFamilyStore.java:411 - Initializing system.range_xfers INFO [main] 2025-08-11 05:03:10,119 ColumnFamilyStore.java:411 - Initializing system.compaction_history INFO [main] 2025-08-11 05:03:10,130 ColumnFamilyStore.java:411 - Initializing system.sstable_activity INFO [main] 2025-08-11 05:03:10,166 ColumnFamilyStore.java:411 - Initializing system.size_estimates INFO [main] 2025-08-11 05:03:10,183 ColumnFamilyStore.java:411 - Initializing system.available_ranges INFO [main] 2025-08-11 05:03:10,205 ColumnFamilyStore.java:411 - Initializing system.transferred_ranges INFO [main] 2025-08-11 05:03:10,223 ColumnFamilyStore.java:411 - Initializing system.views_builds_in_progress INFO [main] 2025-08-11 05:03:10,245 ColumnFamilyStore.java:411 - Initializing system.built_views INFO [main] 2025-08-11 05:03:10,260 ColumnFamilyStore.java:411 - Initializing system.hints INFO [main] 2025-08-11 05:03:10,281 ColumnFamilyStore.java:411 - Initializing system.batchlog INFO [main] 2025-08-11 05:03:10,295 ColumnFamilyStore.java:411 - Initializing system.prepared_statements INFO [main] 2025-08-11 05:03:10,309 ColumnFamilyStore.java:411 - Initializing system.schema_keyspaces INFO [main] 2025-08-11 05:03:10,332 ColumnFamilyStore.java:411 - Initializing system.schema_columnfamilies INFO [main] 2025-08-11 05:03:10,357 ColumnFamilyStore.java:411 - Initializing system.schema_columns INFO [main] 2025-08-11 05:03:10,375 ColumnFamilyStore.java:411 - Initializing system.schema_triggers INFO [main] 2025-08-11 05:03:10,396 ColumnFamilyStore.java:411 - Initializing system.schema_usertypes INFO [main] 2025-08-11 05:03:10,434 ColumnFamilyStore.java:411 - Initializing system.schema_functions INFO [main] 2025-08-11 05:03:10,439 ColumnFamilyStore.java:411 - Initializing system.schema_aggregates INFO [main] 2025-08-11 05:03:10,462 ViewManager.java:137 - Not submitting build tasks for views in keyspace system as storage service is not initialized INFO [main] 2025-08-11 05:03:10,559 ApproximateTime.java:44 - Scheduling approximate time-check task with a precision of 10 milliseconds INFO [main] 2025-08-11 05:03:10,594 ColumnFamilyStore.java:411 - Initializing system_schema.keyspaces INFO [main] 2025-08-11 05:03:10,608 ColumnFamilyStore.java:411 - Initializing system_schema.tables INFO [main] 2025-08-11 05:03:10,638 ColumnFamilyStore.java:411 - Initializing system_schema.columns INFO [main] 2025-08-11 05:03:10,662 ColumnFamilyStore.java:411 - Initializing system_schema.triggers INFO [main] 2025-08-11 05:03:10,675 ColumnFamilyStore.java:411 - Initializing system_schema.dropped_columns INFO [main] 2025-08-11 05:03:10,682 ColumnFamilyStore.java:411 - Initializing system_schema.views INFO [main] 2025-08-11 05:03:10,689 ColumnFamilyStore.java:411 - Initializing system_schema.types INFO [main] 2025-08-11 05:03:10,696 ColumnFamilyStore.java:411 - Initializing system_schema.functions INFO [main] 2025-08-11 05:03:10,700 ColumnFamilyStore.java:411 - Initializing system_schema.aggregates INFO [main] 2025-08-11 05:03:10,706 ColumnFamilyStore.java:411 - Initializing system_schema.indexes INFO [main] 2025-08-11 05:03:10,708 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_schema as storage service is not initialized INFO [MemtableFlushWriter:1] 2025-08-11 05:03:11,773 CacheService.java:112 - Initializing key cache with capacity of 49 MBs. INFO [MemtableFlushWriter:1] 2025-08-11 05:03:11,780 CacheService.java:134 - Initializing row cache with capacity of 0 MBs INFO [MemtableFlushWriter:1] 2025-08-11 05:03:11,787 CacheService.java:163 - Initializing counter cache with capacity of 24 MBs INFO [MemtableFlushWriter:1] 2025-08-11 05:03:11,788 CacheService.java:174 - Scheduling counter cache save to every 7200 seconds (going to save all keys). INFO [CompactionExecutor:4] 2025-08-11 05:03:12,714 BufferPool.java:230 - Global buffer pool is enabled, when pool is exhausted (max is 502.000MiB) it will allocate on heap INFO [main] 2025-08-11 05:03:13,006 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-08-11 05:03:13,104 StorageService.java:607 - Token metadata: INFO [pool-4-thread-1] 2025-08-11 05:03:13,134 AutoSavingCache.java:174 - Completed loading (0 ms; 5 keys) KeyCache cache INFO [main] 2025-08-11 05:03:13,157 CommitLog.java:152 - No commitlog files found; skipping replay INFO [main] 2025-08-11 05:03:13,157 StorageService.java:600 - Populating token metadata from system tables INFO [main] 2025-08-11 05:03:13,167 StorageService.java:607 - Token metadata: INFO [main] 2025-08-11 05:03:13,272 QueryProcessor.java:163 - Preloaded 0 prepared statements INFO [main] 2025-08-11 05:03:13,273 StorageService.java:618 - Cassandra version: 3.11.3 INFO [main] 2025-08-11 05:03:13,273 StorageService.java:619 - Thrift API version: 20.1.0 INFO [main] 2025-08-11 05:03:13,273 StorageService.java:620 - CQL supported versions: 3.4.4 (default: 3.4.4) INFO [main] 2025-08-11 05:03:13,275 StorageService.java:622 - Native protocol supported versions: 3/v3, 4/v4, 5/v5-beta (default: 4/v4) INFO [main] 2025-08-11 05:03:13,311 IndexSummaryManager.java:85 - Initializing index summary manager with a memory pool size of 49 MB and a resize interval of 60 minutes INFO [main] 2025-08-11 05:03:13,323 MessagingService.java:761 - Starting Messaging Service on /10.0.0.48:7012 (ens3) WARN [main] 2025-08-11 05:03:13,335 SystemKeyspace.java:1087 - No host ID found, created f6d8457c-4929-4297-9160-2dab4f2a1cea (Note: This should happen exactly once per node). INFO [main] 2025-08-11 05:03:13,369 OutboundTcpConnection.java:108 - OutboundTcpConnection using coalescing strategy DISABLED INFO [HANDSHAKE-/10.0.0.38] 2025-08-11 05:03:13,455 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 INFO [HANDSHAKE-/10.0.0.254] 2025-08-11 05:03:13,828 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [main] 2025-08-11 05:03:14,461 StorageService.java:704 - Loading persisted ring state INFO [main] 2025-08-11 05:03:14,462 StorageService.java:822 - Starting up server gossip INFO [main] 2025-08-11 05:03:14,669 StorageService.java:1446 - JOINING: waiting for ring information INFO [HANDSHAKE-/10.0.0.254] 2025-08-11 05:03:15,591 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [HANDSHAKE-/10.0.0.38] 2025-08-11 05:03:16,232 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.38 INFO [GossipStage:1] 2025-08-11 05:03:17,097 Gossiper.java:1055 - Node /10.0.0.38 is now part of the cluster INFO [RequestResponseStage-3] 2025-08-11 05:03:17,119 Gossiper.java:1019 - InetAddress /10.0.0.38 is now UP INFO [GossipStage:1] 2025-08-11 05:03:17,126 TokenMetadata.java:479 - Updating topology for /10.0.0.38 INFO [GossipStage:1] 2025-08-11 05:03:17,128 TokenMetadata.java:479 - Updating topology for /10.0.0.38 INFO [GossipStage:1] 2025-08-11 05:03:17,129 Gossiper.java:1055 - Node /10.0.0.254 is now part of the cluster INFO [RequestResponseStage-5] 2025-08-11 05:03:17,136 Gossiper.java:1019 - InetAddress /10.0.0.254 is now UP INFO [MigrationStage:1] 2025-08-11 05:03:17,758 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_distributed as storage service is not initialized INFO [MigrationStage:1] 2025-08-11 05:03:17,761 ColumnFamilyStore.java:411 - Initializing system_distributed.parent_repair_history INFO [MigrationStage:1] 2025-08-11 05:03:17,822 ColumnFamilyStore.java:411 - Initializing system_distributed.repair_history INFO [MigrationStage:1] 2025-08-11 05:03:17,859 ColumnFamilyStore.java:411 - Initializing system_distributed.view_build_status INFO [MigrationStage:1] 2025-08-11 05:03:18,317 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_auth as storage service is not initialized INFO [MigrationStage:1] 2025-08-11 05:03:18,320 ColumnFamilyStore.java:411 - Initializing system_auth.resource_role_permissons_index INFO [MigrationStage:1] 2025-08-11 05:03:18,338 ColumnFamilyStore.java:411 - Initializing system_auth.role_members INFO [MigrationStage:1] 2025-08-11 05:03:18,342 ColumnFamilyStore.java:411 - Initializing system_auth.role_permissions INFO [MigrationStage:1] 2025-08-11 05:03:18,347 ColumnFamilyStore.java:411 - Initializing system_auth.roles INFO [main] 2025-08-11 05:03:18,671 StorageService.java:1446 - JOINING: waiting for schema information to complete INFO [InternalResponseStage:3] 2025-08-11 05:03:18,675 ViewManager.java:137 - Not submitting build tasks for views in keyspace system_traces as storage service is not initialized INFO [InternalResponseStage:3] 2025-08-11 05:03:18,677 ColumnFamilyStore.java:411 - Initializing system_traces.events INFO [InternalResponseStage:3] 2025-08-11 05:03:18,682 ColumnFamilyStore.java:411 - Initializing system_traces.sessions INFO [main] 2025-08-11 05:03:20,309 StorageService.java:1446 - JOINING: schema complete, ready to bootstrap INFO [main] 2025-08-11 05:03:20,309 StorageService.java:1446 - JOINING: waiting for pending range calculation INFO [main] 2025-08-11 05:03:20,309 StorageService.java:1446 - JOINING: calculation complete, ready to bootstrap INFO [main] 2025-08-11 05:03:20,310 StorageService.java:1446 - JOINING: getting bootstrap token INFO [main] 2025-08-11 05:03:20,312 BootStrapper.java:228 - Generated random tokens. tokens are [-4197083962363439800, 3802810409268025272, -126483762129396253, 6076655552401445658, -6134328162686352717, 7939111306577569629, 287205345523174351, 4730954820271062862, 7809704731643627816, 9131155933292727234, -6974891839665550841, -8140886725047479965, -4075245111865084965, -7945482649562121752, 8267828697757456233, 879148315199823612, 6634263990242994517, 8163925543986831980, 8296548771881857416, 2633398887500062129, 8955681662517860001, -2042107031688257267, 546589871580850270, -2412113944617227059, -4736130507197519227, 8244831497827488307, -2594057050346447974, -8672831724420565071, 3981848638898681815, -6019726669283324382, -368410311188461463, -7563818221229018578, -4521488618863649366, -1525033473775288397, 7710853501753560622, 4637960876282098213, -7820886195149708533, -4489080927856958381, -4931857931550745162, 5202002267371137672, -3324190144110356135, -7842244787832812792, 6658466004914435651, -5058061301161925240, 3679435756802228243, -8926960164116389699, 5673869951372515109, -7373728615857692031, 667074535379716065, -5896005696808980415, -7325045312313673778, 4146545004611932228, 7627809618223831895, -7441080496913558521, -8897011655877986556, -5501537733473191249, 8564815121727448612, -820488156107211676, -2676971557300201118, -2678788595120006951, 1846479752931742242, 5184123474996300722, 8559024631113527395, -8271007337897437676, -2331573974283261115, 7785109574683575069, 7868147537120335778, 6940642863900840592, -6185321664785390867, 1450089560116722459, 3442355594869325230, 5607251782982872643, 2600188940527603086, -7339713471625573045, -6638449789009138030, -8164877241424367753, 2748526250703330209, 3587633986700984353, -4744610109226369710, -7372010699134238267, 1846087640425745923, 8304557120858621072, 6851471028965206385, 7532134048416759969, 6925909714862136898, 2521067653443067332, -1008058511114750279, 7988926535747255750, -5287898245960497758, -7541593461873863722, 8652521434428728393, 2322477118279334453, 6525223930123867743, -1385086018746282321, -3622876436302789865, -5368807587504563866, -3398939377639998875, -8024808279800748894, -5282402710865866611, -4709496088264803392, 772167465841629907, 1583306038204323492, 2738071990804951970, 4783397415100528293, 5247904037617519989, 73323592733362748, -1468254437783722729, 4099540822772080403, 6748175350490038472, -3494221945950948918, -599957605788072323, 3045239571877813348, 8686087284939116258, 8361891497469610466, -4954876320170421286, -7776928376103312230, -8902069928309952155, 3345763701967709947, -814443708445050622, -3731744062770086480, 5426268162916247331, -1959599228599241227, -2412463596240449577, 7481937981249238027, -6276730227196050418, 5152840102451147086, 3409177703267527111, -5338176710596373213, 1625780839694007778, 1134104829855867022, 6633397075147707962, -1231167727123599538, 876962710834753353, 7301305743446726684, -3370375960617925362, -5607691303091636097, 1226187953282745912, 6996922375420074016, 2000980410043670775, -7259385112365084092, -5404863221082007118, -4844499461943104782, -3538179726955975952, -6648085478306854718, -5732215660699044543, -1416700314482582080, -3393099307933209902, -7634358062032714567, 3869134068301015860, 68294833281829858, 2851361835435089859, 5921375744912111606, 7706704737311625396, -7135887066198284632, 258116952272590857, 1791229607271991484, 2747077447355076954, -6778937614278536037, 7940493601285485624, 7796629263731044380, -6390307246210591476, 7429738461854395141, 5216494990726522639, -5864797990146363529, -2846430109168571684, -5829260093265123559, -4366712784783250131, 4218615015839983751, 5445234670039821968, -5778196278079830259, -7155161995324742235, -6731050168902207730, -7138653905397222926, 2997365060480352625, 3305376427039509107, 3447012091396137360, -167115230997859658, -1195708038337253839, 4125447657159692949, -676340756732728446, -8996103326770595632, 6952957122908849470, -4634068368093983901, -7396557057092411973, 9215517020814662734, -4870448718697103134, 5563394540808202670, -8737410513625769924, 7073654834383905566, -2842535312685516834, -8695058043261568531, -8052655054580716626, -9130648534392589358, 4238326087328156478, -7909294062430847492, 7166560032605143660, -3762931899282199458, 9007746987126081159, 8191972105586736739, -5451461841864486264, 3039370605047539794, -5274465902232592412, 4685141216318304434, -8057444986519686454, 8466791995214021007, 3463269399268576376, 4329013856362122413, 6582245283175723492, -7136486788277762002, 7972466993804802107, 2206434029682881197, 3509534650949514482, -7633924726455520826, -8459467544264058098, -3759827021383604513, -6384230829087607017, 5545070054430849941, 2157819566113161891, -1319901365273273097, 4832847439944737406, -6998935739506664203, -2556215560702328016, 2425800185139796318, -3404294544813305165, 696137572948901728, 1855913671156495034, -2103635988731619900, -1293654771512727052, -2656175836121173039, -9188173267260076915, -2684577250033897991, 8824467067786056237, 5235095483627725318, -4179599619691999607, 123949880842690610, -3166033446281240082, -6563076140482259321, 4447526190331659310, -3480881294206286530, 316158356243905899, -1695974319998699126, 8974813643354218862, -7965126086844380957, -569551658141340301, 7085862787404434814, -1986692481316011260, 2730956272123919711, -4841651669471292755, 8545913561086212560, 3817997244919460754, -433970502201470623, 642487503442138792, -8388697936529959207, 2030750847315927905, -4441330684351654963, -3165965038266689870] INFO [main] 2025-08-11 05:03:20,392 StorageService.java:1446 - JOINING: sleeping 30000 ms for pending range setup INFO [MigrationStage:1] 2025-08-11 05:03:38,609 ViewManager.java:137 - Not submitting build tasks for views in keyspace reaper_db as storage service is not initialized INFO [MigrationStage:1] 2025-08-11 05:03:40,991 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration INFO [InternalResponseStage:4] 2025-08-11 05:03:41,399 ColumnFamilyStore.java:411 - Initializing reaper_db.schema_migration_leader INFO [MigrationStage:1] 2025-08-11 05:03:42,309 ColumnFamilyStore.java:411 - Initializing reaper_db.running_reapers INFO [InternalResponseStage:1] 2025-08-11 05:03:42,473 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_unit_v1 INFO [MigrationStage:1] 2025-08-11 05:03:43,382 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_by_cluster_and_keyspace INFO [MigrationStage:1] 2025-08-11 05:03:43,767 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster INFO [MigrationStage:1] 2025-08-11 05:03:44,698 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_schedule_v1 INFO [MigrationStage:1] 2025-08-11 05:03:45,364 ColumnFamilyStore.java:411 - Initializing reaper_db.cluster INFO [MigrationStage:1] 2025-08-11 05:03:45,633 ColumnFamilyStore.java:411 - Initializing reaper_db.snapshot INFO [MigrationStage:1] 2025-08-11 05:03:46,498 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v1 INFO [MigrationStage:1] 2025-08-11 05:03:47,439 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run INFO [MigrationStage:1] 2025-08-11 05:03:47,782 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_unit INFO [MigrationStage:1] 2025-08-11 05:03:48,315 ColumnFamilyStore.java:411 - Initializing reaper_db.leader INFO [main] 2025-08-11 05:03:50,396 StorageService.java:1446 - JOINING: Starting to bootstrap... INFO [main] 2025-08-11 05:03:50,552 StreamResultFuture.java:90 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e] Executing streaming plan for Bootstrap INFO [StreamConnectionEstablisher:1] 2025-08-11 05:03:50,560 StreamSession.java:266 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e] Starting streaming to /10.0.0.38 INFO [StreamConnectionEstablisher:1] 2025-08-11 05:03:50,565 StreamCoordinator.java:264 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e, ID#0] Beginning stream session with /10.0.0.38 INFO [MigrationStage:1] 2025-08-11 05:03:50,603 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v2 INFO [STREAM-IN-/10.0.0.38:7012] 2025-08-11 05:03:50,689 StreamResultFuture.java:173 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e ID#0] Prepare completed. Receiving 1 files(0.079KiB), sending 0 files(0.000KiB) INFO [StreamConnectionEstablisher:2] 2025-08-11 05:03:50,692 StreamSession.java:266 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e] Starting streaming to /10.0.0.254 INFO [StreamConnectionEstablisher:2] 2025-08-11 05:03:50,695 StreamCoordinator.java:264 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e, ID#0] Beginning stream session with /10.0.0.254 INFO [StreamReceiveTask:1] 2025-08-11 05:03:50,993 StreamResultFuture.java:187 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e] Session with /10.0.0.38 is complete INFO [STREAM-IN-/10.0.0.254:7012] 2025-08-11 05:03:51,059 StreamResultFuture.java:173 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e ID#0] Prepare completed. Receiving 1 files(1.719KiB), sending 0 files(0.000KiB) INFO [StreamReceiveTask:1] 2025-08-11 05:03:51,133 StreamResultFuture.java:187 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e] Session with /10.0.0.254 is complete INFO [StreamReceiveTask:1] 2025-08-11 05:03:51,141 StreamResultFuture.java:219 - [Stream #91db79e0-7670-11f0-993e-6d60ca3e2f4e] All sessions completed INFO [StreamReceiveTask:1] 2025-08-11 05:03:51,144 StorageService.java:1505 - Bootstrap completed! for the tokens [-4197083962363439800, 3802810409268025272, -126483762129396253, 6076655552401445658, -6134328162686352717, 7939111306577569629, 287205345523174351, 4730954820271062862, 7809704731643627816, 9131155933292727234, -6974891839665550841, -8140886725047479965, -4075245111865084965, -7945482649562121752, 8267828697757456233, 879148315199823612, 6634263990242994517, 8163925543986831980, 8296548771881857416, 2633398887500062129, 8955681662517860001, -2042107031688257267, 546589871580850270, -2412113944617227059, -4736130507197519227, 8244831497827488307, -2594057050346447974, -8672831724420565071, 3981848638898681815, -6019726669283324382, -368410311188461463, -7563818221229018578, -4521488618863649366, -1525033473775288397, 7710853501753560622, 4637960876282098213, -7820886195149708533, -4489080927856958381, -4931857931550745162, 5202002267371137672, -3324190144110356135, -7842244787832812792, 6658466004914435651, -5058061301161925240, 3679435756802228243, -8926960164116389699, 5673869951372515109, -7373728615857692031, 667074535379716065, -5896005696808980415, -7325045312313673778, 4146545004611932228, 7627809618223831895, -7441080496913558521, -8897011655877986556, -5501537733473191249, 8564815121727448612, -820488156107211676, -2676971557300201118, -2678788595120006951, 1846479752931742242, 5184123474996300722, 8559024631113527395, -8271007337897437676, -2331573974283261115, 7785109574683575069, 7868147537120335778, 6940642863900840592, -6185321664785390867, 1450089560116722459, 3442355594869325230, 5607251782982872643, 2600188940527603086, -7339713471625573045, -6638449789009138030, -8164877241424367753, 2748526250703330209, 3587633986700984353, -4744610109226369710, -7372010699134238267, 1846087640425745923, 8304557120858621072, 6851471028965206385, 7532134048416759969, 6925909714862136898, 2521067653443067332, -1008058511114750279, 7988926535747255750, -5287898245960497758, -7541593461873863722, 8652521434428728393, 2322477118279334453, 6525223930123867743, -1385086018746282321, -3622876436302789865, -5368807587504563866, -3398939377639998875, -8024808279800748894, -5282402710865866611, -4709496088264803392, 772167465841629907, 1583306038204323492, 2738071990804951970, 4783397415100528293, 5247904037617519989, 73323592733362748, -1468254437783722729, 4099540822772080403, 6748175350490038472, -3494221945950948918, -599957605788072323, 3045239571877813348, 8686087284939116258, 8361891497469610466, -4954876320170421286, -7776928376103312230, -8902069928309952155, 3345763701967709947, -814443708445050622, -3731744062770086480, 5426268162916247331, -1959599228599241227, -2412463596240449577, 7481937981249238027, -6276730227196050418, 5152840102451147086, 3409177703267527111, -5338176710596373213, 1625780839694007778, 1134104829855867022, 6633397075147707962, -1231167727123599538, 876962710834753353, 7301305743446726684, -3370375960617925362, -5607691303091636097, 1226187953282745912, 6996922375420074016, 2000980410043670775, -7259385112365084092, -5404863221082007118, -4844499461943104782, -3538179726955975952, -6648085478306854718, -5732215660699044543, -1416700314482582080, -3393099307933209902, -7634358062032714567, 3869134068301015860, 68294833281829858, 2851361835435089859, 5921375744912111606, 7706704737311625396, -7135887066198284632, 258116952272590857, 1791229607271991484, 2747077447355076954, -6778937614278536037, 7940493601285485624, 7796629263731044380, -6390307246210591476, 7429738461854395141, 5216494990726522639, -5864797990146363529, -2846430109168571684, -5829260093265123559, -4366712784783250131, 4218615015839983751, 5445234670039821968, -5778196278079830259, -7155161995324742235, -6731050168902207730, -7138653905397222926, 2997365060480352625, 3305376427039509107, 3447012091396137360, -167115230997859658, -1195708038337253839, 4125447657159692949, -676340756732728446, -8996103326770595632, 6952957122908849470, -4634068368093983901, -7396557057092411973, 9215517020814662734, -4870448718697103134, 5563394540808202670, -8737410513625769924, 7073654834383905566, -2842535312685516834, -8695058043261568531, -8052655054580716626, -9130648534392589358, 4238326087328156478, -7909294062430847492, 7166560032605143660, -3762931899282199458, 9007746987126081159, 8191972105586736739, -5451461841864486264, 3039370605047539794, -5274465902232592412, 4685141216318304434, -8057444986519686454, 8466791995214021007, 3463269399268576376, 4329013856362122413, 6582245283175723492, -7136486788277762002, 7972466993804802107, 2206434029682881197, 3509534650949514482, -7633924726455520826, -8459467544264058098, -3759827021383604513, -6384230829087607017, 5545070054430849941, 2157819566113161891, -1319901365273273097, 4832847439944737406, -6998935739506664203, -2556215560702328016, 2425800185139796318, -3404294544813305165, 696137572948901728, 1855913671156495034, -2103635988731619900, -1293654771512727052, -2656175836121173039, -9188173267260076915, -2684577250033897991, 8824467067786056237, 5235095483627725318, -4179599619691999607, 123949880842690610, -3166033446281240082, -6563076140482259321, 4447526190331659310, -3480881294206286530, 316158356243905899, -1695974319998699126, 8974813643354218862, -7965126086844380957, -569551658141340301, 7085862787404434814, -1986692481316011260, 2730956272123919711, -4841651669471292755, 8545913561086212560, 3817997244919460754, -433970502201470623, 642487503442138792, -8388697936529959207, 2030750847315927905, -4441330684351654963, -3165965038266689870] INFO [main] 2025-08-11 05:03:51,147 StorageService.java:1446 - JOINING: Finish joining ring INFO [main] 2025-08-11 05:03:51,199 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_schedule_v1') INFO [main] 2025-08-11 05:03:51,200 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='schema_migration') INFO [main] 2025-08-11 05:03:51,200 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='snapshot') INFO [main] 2025-08-11 05:03:51,200 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_unit_v1') INFO [main] 2025-08-11 05:03:51,200 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='node_metrics_v2') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_schedule_by_cluster_and_keyspace') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_run_by_cluster') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_run') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='repair_run_by_unit') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='cluster') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='leader') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='schema_migration_leader') INFO [main] 2025-08-11 05:03:51,201 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='node_metrics_v1') INFO [main] 2025-08-11 05:03:51,202 SecondaryIndexManager.java:509 - Executing pre-join post-bootstrap tasks for: CFS(Keyspace='reaper_db', ColumnFamily='running_reapers') INFO [main] 2025-08-11 05:03:51,285 Gossiper.java:1692 - Waiting for gossip to settle... INFO [MigrationStage:1] 2025-08-11 05:03:51,422 ColumnFamilyStore.java:411 - Initializing reaper_db.node_operations INFO [MigrationStage:1] 2025-08-11 05:03:54,480 ColumnFamilyStore.java:411 - Initializing reaper_db.diagnostic_event_subscription INFO [MigrationStage:1] 2025-08-11 05:03:56,407 ColumnFamilyStore.java:411 - Initializing reaper_db.node_metrics_v3 INFO [MigrationStage:1] 2025-08-11 05:03:56,830 ColumnFamilyStore.java:411 - Initializing reaper_db.repair_run_by_cluster_v2 INFO [MigrationStage:1] 2025-08-11 05:03:58,035 ColumnFamilyStore.java:411 - Initializing reaper_db.running_repairs INFO [MigrationStage:1] 2025-08-11 05:03:58,446 ColumnFamilyStore.java:411 - Initializing reaper_db.percent_repaired_by_schedule INFO [main] 2025-08-11 05:03:59,286 Gossiper.java:1723 - No gossip backlog; proceeding INFO [main] 2025-08-11 05:03:59,493 NativeTransportService.java:70 - Netty using native Epoll event loop INFO [main] 2025-08-11 05:03:59,560 Server.java:155 - Using Netty Version: [netty-buffer=netty-buffer-4.1.39.Final.88c2a4c (repository: dirty), netty-codec=netty-codec-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-dns=netty-codec-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-haproxy=netty-codec-haproxy-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http=netty-codec-http-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-http2=netty-codec-http2-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-memcache=netty-codec-memcache-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-mqtt=netty-codec-mqtt-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-redis=netty-codec-redis-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-smtp=netty-codec-smtp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-socks=netty-codec-socks-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-stomp=netty-codec-stomp-4.1.39.Final.88c2a4c (repository: dirty), netty-codec-xml=netty-codec-xml-4.1.39.Final.88c2a4c (repository: dirty), netty-common=netty-common-4.1.39.Final.88c2a4c (repository: dirty), netty-handler=netty-handler-4.1.39.Final.88c2a4c (repository: dirty), netty-handler-proxy=netty-handler-proxy-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver=netty-resolver-4.1.39.Final.88c2a4c (repository: dirty), netty-resolver-dns=netty-resolver-dns-4.1.39.Final.88c2a4c (repository: dirty), netty-tcnative=netty-tcnative-2.0.25.Final.c46c351, netty-transport=netty-transport-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-epoll=netty-transport-native-epoll-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-native-kqueue=netty-transport-native-kqueue-4.1.39.Final.88c2a4cab5 (repository: dirty), netty-transport-native-unix-common=netty-transport-native-unix-common-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-rxtx=netty-transport-rxtx-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-sctp=netty-transport-sctp-4.1.39.Final.88c2a4c (repository: dirty), netty-transport-udt=netty-transport-udt-4.1.39.Final.88c2a4c (repository: dirty)] INFO [main] 2025-08-11 05:03:59,560 Server.java:156 - Starting listening for CQL clients on /10.0.0.48:9041 (unencrypted)... INFO [main] 2025-08-11 05:03:59,613 ThriftServer.java:116 - Binding thrift service to /10.0.0.48:9161 INFO [Thread-4] 2025-08-11 05:03:59,618 ThriftServer.java:133 - Listening for thrift clients... INFO [HANDSHAKE-/10.0.0.48] 2025-08-11 05:04:04,296 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.48 INFO [Native-Transport-Requests-1] 2025-08-11 05:04:04,420 MigrationManager.java:454 - Update table 'reaper_db/repair_run' From org.apache.cassandra.config.CFMetaData@4e51c110[cfId=8ffea390-7670-11f0-8f47-2597d7221e0e,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2723e671[cfId=8ffea390-7670-11f0-8f47-2597d7221e0e,ksName=reaper_db,cfName=repair_run,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : '5000'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.TimeUUIDType),partitionColumns=[[adaptive_schedule cause cluster_name creation_time end_time intensity last_event owner pause_time repair_parallelism repair_unit_id segment_count start_time state tables] | [coordinator_host end_token fail_count host_id replicas segment_end_time segment_start_time segment_state start_token token_ranges]],partitionKeyColumns=[id],clusteringColumns=[segment_id],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, id, segment_end_time, state, end_token, start_token, start_time, token_ranges, tables, pause_time, repair_unit_id, host_id, segment_count, last_event, adaptive_schedule, cluster_name, end_time, segment_start_time, segment_state, cause, creation_time, coordinator_host, replicas, owner, repair_parallelism, segment_id, fail_count],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-2] 2025-08-11 05:04:05,245 MigrationManager.java:454 - Update table 'reaper_db/leader' From org.apache.cassandra.config.CFMetaData@6bbfcc71[cfId=90842740-7670-11f0-8f47-2597d7221e0e,ksName=reaper_db,cfName=leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=600, default_time_to_live=600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host reaper_instance_id]],partitionKeyColumns=[leader_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host, leader_id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@61e37972[cfId=90842740-7670-11f0-8f47-2597d7221e0e,ksName=reaper_db,cfName=leader,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=600, default_time_to_live=600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [last_heartbeat reaper_instance_host reaper_instance_id]],partitionKeyColumns=[leader_id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[reaper_instance_id, last_heartbeat, reaper_instance_host, leader_id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-4] 2025-08-11 05:04:05,245 MigrationManager.java:454 - Update table 'reaper_db/repair_run_by_cluster_v2' From org.apache.cassandra.config.CFMetaData@18618236[cfId=95939fe0-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@4dcaf40f[cfId=95939fe0-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=repair_run_by_cluster_v2,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.TimeUUIDType)),partitionColumns=[[] | [repair_run_state]],partitionKeyColumns=[cluster_name],clusteringColumns=[id],keyValidator=org.apache.cassandra.db.marshal.UTF8Type,columnMetadata=[cluster_name, repair_run_state, id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-5] 2025-08-11 05:04:05,245 MigrationManager.java:454 - Update table 'reaper_db/repair_schedule_v1' From org.apache.cassandra.config.CFMetaData@700747ef[cfId=8e5c1720-7670-11f0-8f47-2597d7221e0e,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@793c4b00[cfId=8e5c1720-7670-11f0-8f47-2597d7221e0e,ksName=reaper_db,cfName=repair_schedule_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [adaptive creation_time days_between intensity last_run next_activation owner pause_time percent_unrepaired_threshold repair_parallelism repair_unit_id segment_count segment_count_per_node state run_history]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[intensity, segment_count, days_between, percent_unrepaired_threshold, id, last_run, state, run_history, creation_time, adaptive, owner, repair_parallelism, segment_count_per_node, pause_time, repair_unit_id, next_activation],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-3] 2025-08-11 05:04:05,247 MigrationManager.java:454 - Update table 'reaper_db/repair_unit_v1' From org.apache.cassandra.config.CFMetaData@b5a9e40[cfId=8ca73b30-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@203af137[cfId=8ca73b30-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=repair_unit_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(),partitionColumns=[[] | [cluster_name incremental_repair keyspace_name repair_thread_count timeout blacklisted_tables column_families datacenters nodes]],partitionKeyColumns=[id],clusteringColumns=[],keyValidator=org.apache.cassandra.db.marshal.TimeUUIDType,columnMetadata=[blacklisted_tables, datacenters, repair_thread_count, id, keyspace_name, timeout, nodes, cluster_name, incremental_repair, column_families],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-08-11 05:04:05,248 MigrationManager.java:454 - Update table 'reaper_db/running_repairs' From org.apache.cassandra.config.CFMetaData@45de66bc[cfId=963229d0-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@2658b2ce[cfId=963229d0-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=running_repairs,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=300, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [reaper_instance_host reaper_instance_id segment_id]],partitionKeyColumns=[repair_id],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.UUIDType,columnMetadata=[reaper_instance_id, repair_id, node, segment_id, reaper_instance_host],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-7] 2025-08-11 05:04:05,249 MigrationManager.java:454 - Update table 'reaper_db/node_metrics_v1' From org.apache.cassandra.config.CFMetaData@fe6ab34[cfId=8f6bb260-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6ff0346b[cfId=8f6bb260-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=node_metrics_v1,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=120, default_time_to_live=180, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy, options={min_threshold=4, max_threshold=32, compaction_window_size=2, compaction_window_unit=MINUTES, unchecked_tombstone_compaction=true}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [active_anticompactions cluster datacenter has_repair_running pending_compactions requested]],partitionKeyColumns=[run_id, time_partition],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.LongType),columnMetadata=[cluster, node, has_repair_running, pending_compactions, active_anticompactions, time_partition, datacenter, requested, run_id],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-6] 2025-08-11 05:04:05,245 MigrationManager.java:454 - Update table 'reaper_db/percent_repaired_by_schedule' From org.apache.cassandra.config.CFMetaData@52965fb6[cfId=9686b270-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=percent_repaired_by_schedule,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [keyspace_name percent_repaired table_name ts]],partitionKeyColumns=[cluster_name, repair_schedule_id, time_bucket],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[time_bucket, node, ts, keyspace_name, percent_repaired, repair_schedule_id, table_name, cluster_name],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3876cab7[cfId=9686b270-7670-11f0-bff1-ef813b7a18da,ksName=reaper_db,cfName=percent_repaired_by_schedule,flags=[COMPOUND],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.0, bloom_filter_fp_chance=0.1, crc_check_chance=1.0, gc_grace_seconds=300, default_time_to_live=3600, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.LeveledCompactionStrategy, options={}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.UTF8Type),partitionColumns=[[] | [keyspace_name percent_repaired table_name ts]],partitionKeyColumns=[cluster_name, repair_schedule_id, time_bucket],clusteringColumns=[node],keyValidator=org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UTF8Type),columnMetadata=[time_bucket, node, ts, keyspace_name, percent_repaired, repair_schedule_id, table_name, cluster_name],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:22,257 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_uuid_table INFO [Native-Transport-Requests-2] 2025-08-11 05:04:24,638 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@28c36881[cfId=a64415e0-7670-11f0-993e-6d60ca3e2f4e,ksName=svc_monitor_keyspace,cfName=service_instance_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:24,924 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.service_instance_table INFO [Native-Transport-Requests-5] 2025-08-11 05:04:25,780 MigrationManager.java:454 - Update table 'config_db_uuid/obj_uuid_table' From org.apache.cassandra.config.CFMetaData@3717cf7a[cfId=a470dc80-7670-11f0-8f47-2597d7221e0e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@73c42070[cfId=a470dc80-7670-11f0-8f47-2597d7221e0e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:29,362 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_fq_name_table INFO [MigrationStage:1] 2025-08-11 05:04:30,688 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.pool_table INFO [Native-Transport-Requests-1] 2025-08-11 05:04:31,677 MigrationManager.java:454 - Update table 'config_db_uuid/obj_fq_name_table' From org.apache.cassandra.config.CFMetaData@7d1840de[cfId=a8fef020-7670-11f0-bff1-ef813b7a18da,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@7b673639[cfId=a8fef020-7670-11f0-bff1-ef813b7a18da,ksName=config_db_uuid,cfName=obj_fq_name_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [Native-Transport-Requests-1] 2025-08-11 05:04:34,234 MigrationManager.java:454 - Update table 'svc_monitor_keyspace/pool_table' From org.apache.cassandra.config.CFMetaData@6b363405[cfId=a9a73e10-7670-11f0-bff1-ef813b7a18da,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@77bd2ca1[cfId=a9a73e10-7670-11f0-bff1-ef813b7a18da,ksName=svc_monitor_keyspace,cfName=pool_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:34,912 ColumnFamilyStore.java:411 - Initializing config_db_uuid.obj_shared_table INFO [MigrationStage:1] 2025-08-11 05:04:35,610 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.loadbalancer_table INFO [Native-Transport-Requests-1] 2025-08-11 05:04:40,514 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@54816e8a[cfId=afba9220-7670-11f0-993e-6d60ca3e2f4e,ksName=useragent,cfName=useragent_keyval_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:40,631 ColumnFamilyStore.java:411 - Initializing useragent.useragent_keyval_table INFO [Native-Transport-Requests-2] 2025-08-11 05:04:41,685 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@7eb63b97[cfId=b06d4050-7670-11f0-993e-6d60ca3e2f4e,ksName=svc_monitor_keyspace,cfName=healthmonitor_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:41,853 ColumnFamilyStore.java:411 - Initializing svc_monitor_keyspace.healthmonitor_table INFO [Native-Transport-Requests-3] 2025-08-11 05:04:45,176 MigrationManager.java:454 - Update table 'config_db_uuid/obj_uuid_table' From org.apache.cassandra.config.CFMetaData@3717cf7a[cfId=a470dc80-7670-11f0-8f47-2597d7221e0e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@59b91591[cfId=a470dc80-7670-11f0-8f47-2597d7221e0e,ksName=config_db_uuid,cfName=obj_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:54,214 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.route_target_table INFO [Native-Transport-Requests-3] 2025-08-11 05:04:55,311 MigrationManager.java:454 - Update table 'to_bgp_keyspace/route_target_table' From org.apache.cassandra.config.CFMetaData@ba84af0[cfId=b5cbc350-7670-11f0-bff1-ef813b7a18da,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@688bc3ec[cfId=b5cbc350-7670-11f0-bff1-ef813b7a18da,ksName=to_bgp_keyspace,cfName=route_target_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:04:58,621 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_ip_address_table INFO [Native-Transport-Requests-1] 2025-08-11 05:05:00,233 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@30e8353b[cfId=bb7b7390-7670-11f0-993e-6d60ca3e2f4e,ksName=to_bgp_keyspace,cfName=service_chain_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:05:00,670 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_table INFO [MigrationStage:1] 2025-08-11 05:05:01,410 ColumnFamilyStore.java:411 - Initializing to_bgp_keyspace.service_chain_uuid_table INFO [Native-Transport-Requests-3] 2025-08-11 05:05:01,699 MigrationManager.java:454 - Update table 'to_bgp_keyspace/service_chain_uuid_table' From org.apache.cassandra.config.CFMetaData@51934910[cfId=bc103980-7670-11f0-bff1-ef813b7a18da,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@49c046bc[cfId=bc103980-7670-11f0-bff1-ef813b7a18da,ksName=to_bgp_keyspace,cfName=service_chain_uuid_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:06:04,879 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_vn_ip_table INFO [Native-Transport-Requests-1] 2025-08-11 05:06:05,646 MigrationManager.java:454 - Update table 'dm_keyspace/dm_pr_vn_ip_table' From org.apache.cassandra.config.CFMetaData@6a12ae0f[cfId=e1ec4d60-7670-11f0-bff1-ef813b7a18da,ksName=dm_keyspace,cfName=dm_pr_vn_ip_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@3842aedc[cfId=e1ec4d60-7670-11f0-bff1-ef813b7a18da,ksName=dm_keyspace,cfName=dm_pr_vn_ip_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:06:06,977 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pr_asn_table INFO [Native-Transport-Requests-1] 2025-08-11 05:06:11,853 MigrationManager.java:376 - Create new table: org.apache.cassandra.config.CFMetaData@72d8253b[cfId=e62bcbd0-7670-11f0-993e-6d60ca3e2f4e,ksName=dm_keyspace,cfName=dm_ni_ipv6_ll_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={min_threshold=4, max_threshold=32}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [MigrationStage:1] 2025-08-11 05:06:12,210 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_ni_ipv6_ll_table INFO [MigrationStage:1] 2025-08-11 05:06:14,590 ColumnFamilyStore.java:411 - Initializing dm_keyspace.dm_pnf_resource_table INFO [Native-Transport-Requests-1] 2025-08-11 05:06:15,388 MigrationManager.java:454 - Update table 'dm_keyspace/dm_pnf_resource_table' From org.apache.cassandra.config.CFMetaData@470159b3[cfId=e7b6ffb0-7670-11f0-bff1-ef813b7a18da,ksName=dm_keyspace,cfName=dm_pnf_resource_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] To org.apache.cassandra.config.CFMetaData@6511f7ee[cfId=e7b6ffb0-7670-11f0-bff1-ef813b7a18da,ksName=dm_keyspace,cfName=dm_pnf_resource_table,flags=[DENSE],params=TableParams{comment=, read_repair_chance=0.0, dclocal_read_repair_chance=0.1, bloom_filter_fp_chance=0.01, crc_check_chance=1.0, gc_grace_seconds=864000, default_time_to_live=0, memtable_flush_period_in_ms=0, min_index_interval=128, max_index_interval=2048, speculative_retry=99PERCENTILE, caching={'keys' : 'ALL', 'rows_per_partition' : 'NONE'}, compaction=CompactionParams{class=org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy, options={max_threshold=32, min_threshold=4}}, compression=org.apache.cassandra.schema.CompressionParams@97d350d4, extensions={}, cdc=false},comparator=comparator(org.apache.cassandra.db.marshal.BytesType),partitionColumns=[[] | [value]],partitionKeyColumns=[key],clusteringColumns=[column1],keyValidator=org.apache.cassandra.db.marshal.BytesType,columnMetadata=[key, column1, value],droppedColumns={},triggers=[],indexes=[]] INFO [HANDSHAKE-/10.0.0.254] 2025-08-11 05:11:23,323 OutboundTcpConnection.java:561 - Handshaking version with /10.0.0.254 INFO [Repair-Task-2] 2025-08-11 05:11:23,553 RepairRunnable.java:139 - Starting repair command #1 (9ff57110-7671-11f0-993e-6d60ca3e2f4e), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-2] 2025-08-11 05:11:23,619 RepairSession.java:228 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] new session: will sync /10.0.0.48, /10.0.0.38, /10.0.0.254 on range [(-3927778243009099545,-3762931899282199458]] for reaper_db.[running_repairs, cluster, leader, repair_unit_v1, repair_run_by_cluster_v2, snapshot, repair_run_by_cluster, percent_repaired_by_schedule, schema_migration, diagnostic_event_subscription, running_reapers, repair_run, schema_migration_leader, repair_schedule_by_cluster_and_keyspace, repair_run_by_unit, repair_schedule_v1] INFO [RepairJobTask:2] 2025-08-11 05:11:23,717 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:23,718 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:23,746 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:23,746 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:23,786 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:23,786 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:23,793 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:23,795 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-11 05:11:23,795 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:4] 2025-08-11 05:11:23,795 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-11 05:11:23,795 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] running_repairs is fully synced INFO [RepairJobTask:1] 2025-08-11 05:11:24,055 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-08-11 05:11:24,055 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,058 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,058 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,064 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,064 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,066 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:24,066 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:1] 2025-08-11 05:11:24,066 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:5] 2025-08-11 05:11:24,067 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:3] 2025-08-11 05:11:24,067 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] cluster is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:24,076 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:11:24,076 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,080 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,080 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,085 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,085 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,087 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:24,087 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:4] 2025-08-11 05:11:24,087 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:4] 2025-08-11 05:11:24,088 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:4] 2025-08-11 05:11:24,088 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] leader is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:24,147 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:24,147 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,154 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,154 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,229 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,229 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,233 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:24,233 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-08-11 05:11:24,234 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-08-11 05:11:24,234 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-08-11 05:11:24,234 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_unit_v1 is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:24,282 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:24,282 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,284 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,284 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,288 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,288 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,291 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:24,292 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-11 05:11:24,292 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-08-11 05:11:24,292 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-11 05:11:24,292 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:24,295 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:24,297 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,304 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,304 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,307 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,307 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,309 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:24,312 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-11 05:11:24,312 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-11 05:11:24,312 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-11 05:11:24,312 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] snapshot is fully synced INFO [RepairJobTask:4] 2025-08-11 05:11:24,314 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:11:24,314 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,315 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,316 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,323 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,324 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,326 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:24,327 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-11 05:11:24,327 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-11 05:11:24,327 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-11 05:11:24,327 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:24,329 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:24,329 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,342 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,342 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,346 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,347 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,350 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:24,350 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-11 05:11:24,350 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-11 05:11:24,355 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:24,355 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,357 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,357 RepairJob.java:270 - Validating /10.0.0.254 INFO [RepairJobTask:7] 2025-08-11 05:11:24,358 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-11 05:11:24,358 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] percent_repaired_by_schedule is fully synced INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,366 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,366 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,367 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:24,368 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-11 05:11:24,368 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:2] 2025-08-11 05:11:24,368 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:3] 2025-08-11 05:11:24,368 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] schema_migration is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:24,383 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:11:24,383 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,385 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,385 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,389 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,389 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,391 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:6] 2025-08-11 05:11:24,391 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:4] 2025-08-11 05:11:24,391 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:7] 2025-08-11 05:11:24,392 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-11 05:11:24,392 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] diagnostic_event_subscription is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:24,641 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:11:24,641 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,644 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,644 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,648 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,648 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,651 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:24,651 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:4] 2025-08-11 05:11:24,652 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-11 05:11:24,652 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:4] 2025-08-11 05:11:24,652 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] running_reapers is fully synced INFO [RepairJobTask:4] 2025-08-11 05:11:24,933 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:11:24,933 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,939 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,939 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,941 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,941 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,942 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:24,943 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:7] 2025-08-11 05:11:24,943 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:2] 2025-08-11 05:11:24,943 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-11 05:11:24,943 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_run is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:24,945 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:11:24,945 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,951 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,951 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,955 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,955 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:24,956 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:11:24,956 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-11 05:11:24,956 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:4] 2025-08-11 05:11:24,957 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-11 05:11:24,957 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] schema_migration_leader is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:25,212 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:25,213 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,217 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,217 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,224 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,224 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,225 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:7] 2025-08-11 05:11:25,226 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-11 05:11:25,226 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-11 05:11:25,226 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-11 05:11:25,226 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:25,279 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:25,280 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,291 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,291 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,299 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,299 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,303 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:25,303 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-08-11 05:11:25,304 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-11 05:11:25,304 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-11 05:11:25,304 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_unit is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:25,539 RepairJob.java:234 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:11:25,539 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,542 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,543 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,558 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,559 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:25,563 RepairSession.java:180 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:25,564 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-11 05:11:25,564 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-11 05:11:25,564 SyncTask.java:66 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-11 05:11:25,564 RepairJob.java:143 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_v1 is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:25,565 RepairSession.java:270 - [repair #9fff5c20-7671-11f0-993e-6d60ca3e2f4e] Session completed successfully INFO [RepairJobTask:3] 2025-08-11 05:11:25,565 RepairRunnable.java:261 - Repair session 9fff5c20-7671-11f0-993e-6d60ca3e2f4e for range [(-3927778243009099545,-3762931899282199458]] finished INFO [RepairJobTask:3] 2025-08-11 05:11:25,567 ActiveRepairService.java:452 - [repair #9ff57110-7671-11f0-993e-6d60ca3e2f4e] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-11 05:11:25,580 RepairRunnable.java:343 - Repair command #1 finished in 2 seconds INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,095 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,121 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,141 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,162 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,176 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,189 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,439 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,451 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,472 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,482 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,494 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,506 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,519 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,579 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,590 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,600 Validator.java:281 - [repair #a3d4ce20-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-11 05:11:30,612 ActiveRepairService.java:452 - [repair #a3cc1b90-7671-11f0-bff1-ef813b7a18da] Not a global repair, will not do anticompaction INFO [Repair-Task-3] 2025-08-11 05:11:33,638 RepairRunnable.java:139 - Starting repair command #2 (a5f84a60-7671-11f0-993e-6d60ca3e2f4e), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 2, pull repair: false) INFO [Repair-Task-3] 2025-08-11 05:11:33,669 RepairSession.java:228 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] new session: will sync /10.0.0.48, /10.0.0.38, /10.0.0.254 on range [(-2412463596240449577,-2412113944617227059], (6301498409761326094,6377522213176995371]] for reaper_db.[running_repairs, cluster, leader, repair_unit_v1, repair_run_by_cluster_v2, snapshot, repair_run_by_cluster, percent_repaired_by_schedule, schema_migration, diagnostic_event_subscription, running_reapers, repair_run, schema_migration_leader, repair_schedule_by_cluster_and_keyspace, repair_run_by_unit, repair_schedule_v1] INFO [RepairJobTask:2] 2025-08-11 05:11:33,776 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:33,776 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,779 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,779 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,782 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,783 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,789 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:33,791 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:4] 2025-08-11 05:11:33,791 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:3] 2025-08-11 05:11:33,791 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:4] 2025-08-11 05:11:33,792 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] running_repairs is fully synced INFO [RepairJobTask:4] 2025-08-11 05:11:33,795 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:11:33,795 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,797 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,798 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,804 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,804 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,806 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:33,807 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:2] 2025-08-11 05:11:33,808 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:6] 2025-08-11 05:11:33,809 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:5] 2025-08-11 05:11:33,810 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] cluster is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:33,820 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:33,820 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,822 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,822 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,829 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,829 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,832 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:33,832 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:4] 2025-08-11 05:11:33,833 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:5] 2025-08-11 05:11:33,833 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:3] 2025-08-11 05:11:33,835 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] leader is fully synced INFO [RepairJobTask:4] 2025-08-11 05:11:33,851 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:11:33,851 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,862 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,864 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,869 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,869 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,872 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:33,873 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-11 05:11:33,873 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-11 05:11:33,873 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-11 05:11:33,873 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_unit_v1 is fully synced INFO [RepairJobTask:4] 2025-08-11 05:11:33,878 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:11:33,878 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,881 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,881 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,893 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,893 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,895 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:33,898 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:33,898 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-08-11 05:11:33,898 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:2] 2025-08-11 05:11:33,898 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-08-11 05:11:33,899 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-11 05:11:33,899 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster_v2 is fully synced INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,900 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,901 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,903 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,903 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,905 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:33,905 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:2] 2025-08-11 05:11:33,905 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:6] 2025-08-11 05:11:33,905 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:2] 2025-08-11 05:11:33,905 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] snapshot is fully synced INFO [RepairJobTask:2] 2025-08-11 05:11:33,909 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:33,909 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,912 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,912 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,915 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,915 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,917 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:33,917 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-11 05:11:33,917 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-11 05:11:33,917 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:2] 2025-08-11 05:11:33,917 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:33,925 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:33,925 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,939 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,939 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,944 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,944 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,946 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:33,947 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:3] 2025-08-11 05:11:33,947 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-11 05:11:33,947 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:1] 2025-08-11 05:11:33,947 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:1] 2025-08-11 05:11:33,949 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-08-11 05:11:33,949 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,951 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,951 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,953 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,953 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,954 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:11:33,954 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:2] 2025-08-11 05:11:33,954 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-11 05:11:33,955 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:6] 2025-08-11 05:11:33,955 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] schema_migration is fully synced INFO [RepairJobTask:7] 2025-08-11 05:11:33,959 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-08-11 05:11:33,960 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,962 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,963 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,967 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,967 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,969 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:33,969 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:3] 2025-08-11 05:11:33,969 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:7] 2025-08-11 05:11:33,969 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:6] 2025-08-11 05:11:33,969 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] diagnostic_event_subscription is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:33,973 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:33,973 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,975 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,975 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,979 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,979 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:33,980 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:33,981 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-11 05:11:33,981 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-11 05:11:33,981 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:3] 2025-08-11 05:11:33,981 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] running_reapers is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:34,025 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:34,027 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,033 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,033 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,038 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,039 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,041 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:11:34,041 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:5] 2025-08-11 05:11:34,041 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:7] 2025-08-11 05:11:34,041 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:5] 2025-08-11 05:11:34,041 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_run is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:34,048 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:34,048 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,055 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,055 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,058 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,058 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,059 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:34,061 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-08-11 05:11:34,061 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:6] 2025-08-11 05:11:34,061 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:5] 2025-08-11 05:11:34,061 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] schema_migration_leader is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:34,065 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:34,067 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,074 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,074 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,078 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,078 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,081 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:34,082 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-11 05:11:34,082 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-11 05:11:34,082 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:2] 2025-08-11 05:11:34,082 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:2] 2025-08-11 05:11:34,084 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:34,085 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,087 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,087 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,089 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,090 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,091 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:11:34,091 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-11 05:11:34,091 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-11 05:11:34,091 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-11 05:11:34,092 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_unit is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:34,096 RepairJob.java:234 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:34,097 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,104 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,104 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,109 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,109 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:34,113 RepairSession.java:180 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:7] 2025-08-11 05:11:34,114 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-11 05:11:34,114 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:6] 2025-08-11 05:11:34,114 SyncTask.java:66 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:3] 2025-08-11 05:11:34,114 RepairJob.java:143 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_v1 is fully synced INFO [RepairJobTask:3] 2025-08-11 05:11:34,115 RepairSession.java:270 - [repair #a5fd0550-7671-11f0-993e-6d60ca3e2f4e] Session completed successfully INFO [RepairJobTask:3] 2025-08-11 05:11:34,115 RepairRunnable.java:261 - Repair session a5fd0550-7671-11f0-993e-6d60ca3e2f4e for range [(-2412463596240449577,-2412113944617227059], (6301498409761326094,6377522213176995371]] finished INFO [RepairJobTask:3] 2025-08-11 05:11:34,117 ActiveRepairService.java:452 - [repair #a5f84a60-7671-11f0-993e-6d60ca3e2f4e] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-11 05:11:34,120 RepairRunnable.java:343 - Repair command #2 finished in 0 seconds INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,283 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,309 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,347 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,389 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,408 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,422 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,479 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,500 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,512 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,522 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,535 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,588 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,600 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,673 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,696 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,711 Validator.java:281 - [repair #a9e05a00-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-11 05:11:40,733 ActiveRepairService.java:452 - [repair #a9db02d0-7671-11f0-bff1-ef813b7a18da] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-11 05:11:43,788 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:43,821 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-11 05:11:43,846 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:43,879 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:43,914 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:43,951 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,039 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,064 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,091 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,124 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,136 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,161 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,201 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,314 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,333 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,566 Validator.java:281 - [repair #abfde2d0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-11 05:11:44,578 ActiveRepairService.java:452 - [repair #abfcd160-7671-11f0-bff1-ef813b7a18da] Not a global repair, will not do anticompaction INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,373 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,401 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_reapers INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,431 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration_leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,461 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,482 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.leader INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,505 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_unit INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,570 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.running_repairs INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,587 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.cluster INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,607 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_unit_v1 INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,821 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.percent_repaired_by_schedule INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,839 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.diagnostic_event_subscription INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,865 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_schedule_by_cluster_and_keyspace INFO [AntiEntropyStage:1] 2025-08-11 05:11:50,879 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.snapshot INFO [AntiEntropyStage:1] 2025-08-11 05:11:51,028 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run INFO [AntiEntropyStage:1] 2025-08-11 05:11:51,040 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.schema_migration INFO [AntiEntropyStage:1] 2025-08-11 05:11:51,049 Validator.java:281 - [repair #afdf3bb0-7671-11f0-bff1-ef813b7a18da] Sending completed merkle tree to /10.0.0.38 for reaper_db.repair_run_by_cluster_v2 INFO [AntiEntropyStage:1] 2025-08-11 05:11:51,058 ActiveRepairService.java:452 - [repair #afdacee0-7671-11f0-bff1-ef813b7a18da] Not a global repair, will not do anticompaction INFO [Repair-Task-4] 2025-08-11 05:11:53,795 RepairRunnable.java:139 - Starting repair command #3 (b1fc0130-7671-11f0-993e-6d60ca3e2f4e), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 1, pull repair: false) INFO [Repair-Task-4] 2025-08-11 05:11:53,816 RepairSession.java:228 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] new session: will sync /10.0.0.48, /10.0.0.38, /10.0.0.254 on range [(1591292242432273873,1622293110476668470]] for reaper_db.[running_repairs, cluster, leader, repair_unit_v1, repair_run_by_cluster_v2, snapshot, repair_run_by_cluster, percent_repaired_by_schedule, schema_migration, diagnostic_event_subscription, running_reapers, repair_run, schema_migration_leader, repair_schedule_by_cluster_and_keyspace, repair_run_by_unit, repair_schedule_v1] INFO [RepairJobTask:3] 2025-08-11 05:11:53,909 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:11:53,909 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,911 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,912 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,915 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,916 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,917 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:53,920 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:3] 2025-08-11 05:11:53,921 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:4] 2025-08-11 05:11:53,923 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:53,921 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:3] 2025-08-11 05:11:53,924 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] running_repairs is fully synced INFO [RepairJobTask:4] 2025-08-11 05:11:53,924 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,939 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,939 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,941 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,942 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,943 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:11:53,943 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:1] 2025-08-11 05:11:53,944 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:2] 2025-08-11 05:11:53,944 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:1] 2025-08-11 05:11:53,944 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] cluster is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:53,947 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:53,947 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,949 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,951 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,955 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,955 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,961 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:53,962 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:1] 2025-08-11 05:11:53,962 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:1] 2025-08-11 05:11:53,963 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:1] 2025-08-11 05:11:53,963 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] leader is fully synced INFO [RepairJobTask:2] 2025-08-11 05:11:53,965 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:53,965 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,967 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,967 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,970 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,971 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,976 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:53,977 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-11 05:11:53,977 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-11 05:11:53,977 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:1] 2025-08-11 05:11:53,977 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_unit_v1 is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:53,980 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:53,980 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,983 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,983 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,986 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,987 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:53,989 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:53,989 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-11 05:11:53,989 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:6] 2025-08-11 05:11:53,994 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:5] 2025-08-11 05:11:53,994 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:54,000 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:54,000 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,002 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,002 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,005 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,005 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,006 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:11:54,007 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-11 05:11:54,007 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-11 05:11:54,007 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:3] 2025-08-11 05:11:54,007 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] snapshot is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:54,010 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:54,011 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,012 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,013 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,015 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,015 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,016 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:54,016 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:3] 2025-08-11 05:11:54,016 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:6] 2025-08-11 05:11:54,016 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:5] 2025-08-11 05:11:54,016 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster is fully synced INFO [RepairJobTask:7] 2025-08-11 05:11:54,023 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-08-11 05:11:54,023 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,028 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,028 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,031 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,031 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,033 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:54,033 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-11 05:11:54,034 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:6] 2025-08-11 05:11:54,034 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:4] 2025-08-11 05:11:54,034 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:54,037 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:54,038 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,041 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,041 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,045 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,045 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,046 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:11:54,046 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-11 05:11:54,046 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:7] 2025-08-11 05:11:54,046 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:5] 2025-08-11 05:11:54,047 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] schema_migration is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:54,049 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:54,049 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,052 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,052 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,061 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,061 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,066 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:54,066 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:7] 2025-08-11 05:11:54,067 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-11 05:11:54,066 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:7] 2025-08-11 05:11:54,067 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] diagnostic_event_subscription is fully synced INFO [RepairJobTask:7] 2025-08-11 05:11:54,077 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-08-11 05:11:54,077 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,080 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,081 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,084 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,084 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,088 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:54,088 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-11 05:11:54,088 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:5] 2025-08-11 05:11:54,088 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:6] 2025-08-11 05:11:54,088 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] running_reapers is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:54,295 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:54,295 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,297 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,303 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,306 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,306 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,307 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:54,308 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:6] 2025-08-11 05:11:54,308 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:3] 2025-08-11 05:11:54,308 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:6] 2025-08-11 05:11:54,309 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_run is fully synced INFO [RepairJobTask:6] 2025-08-11 05:11:54,311 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:11:54,313 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,315 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,315 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,317 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,317 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,318 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:11:54,319 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-11 05:11:54,319 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:3] 2025-08-11 05:11:54,319 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-11 05:11:54,319 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] schema_migration_leader is fully synced INFO [RepairJobTask:2] 2025-08-11 05:11:54,322 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:11:54,323 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,325 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,325 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,327 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,327 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,329 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:11:54,329 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-11 05:11:54,329 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:6] 2025-08-11 05:11:54,329 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:5] 2025-08-11 05:11:54,330 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:54,332 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:54,332 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,336 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,336 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,338 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,338 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,340 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:11:54,341 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:7] 2025-08-11 05:11:54,341 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:3] 2025-08-11 05:11:54,341 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:5] 2025-08-11 05:11:54,341 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_unit is fully synced INFO [RepairJobTask:5] 2025-08-11 05:11:54,345 RepairJob.java:234 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:11:54,347 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,350 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,350 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,355 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,355 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:11:54,357 RepairSession.java:180 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:7] 2025-08-11 05:11:54,358 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-08-11 05:11:54,358 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:5] 2025-08-11 05:11:54,359 SyncTask.java:66 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:2] 2025-08-11 05:11:54,359 RepairJob.java:143 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_v1 is fully synced INFO [RepairJobTask:2] 2025-08-11 05:11:54,360 RepairSession.java:270 - [repair #b1ff3580-7671-11f0-993e-6d60ca3e2f4e] Session completed successfully INFO [RepairJobTask:2] 2025-08-11 05:11:54,360 RepairRunnable.java:261 - Repair session b1ff3580-7671-11f0-993e-6d60ca3e2f4e for range [(1591292242432273873,1622293110476668470]] finished INFO [RepairJobTask:2] 2025-08-11 05:11:54,362 ActiveRepairService.java:452 - [repair #b1fc0130-7671-11f0-993e-6d60ca3e2f4e] Not a global repair, will not do anticompaction INFO [InternalResponseStage:8] 2025-08-11 05:11:54,365 RepairRunnable.java:343 - Repair command #3 finished in 0 seconds INFO [Repair-Task-5] 2025-08-11 05:12:00,263 RepairRunnable.java:139 - Starting repair command #4 (b5d6f170-7671-11f0-993e-6d60ca3e2f4e), repairing keyspace reaper_db with repair options (parallelism: dc_parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [cluster, leader, diagnostic_event_subscription, schema_migration_leader, running_repairs, repair_run, repair_unit_v1, repair_run_by_cluster_v2, repair_schedule_by_cluster_and_keyspace, repair_run_by_cluster, running_reapers, repair_schedule_v1, percent_repaired_by_schedule, repair_run_by_unit, schema_migration, snapshot], dataCenters: [], hosts: [], # of ranges: 2, pull repair: false) INFO [Repair-Task-5] 2025-08-11 05:12:00,275 RepairSession.java:228 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] new session: will sync /10.0.0.48, /10.0.0.38, /10.0.0.254 on range [(-1098098127341220938,-1087882488792597341], (3869134068301015860,3948129839353478991]] for reaper_db.[running_repairs, cluster, leader, repair_unit_v1, repair_run_by_cluster_v2, snapshot, repair_run_by_cluster, percent_repaired_by_schedule, schema_migration, diagnostic_event_subscription, running_reapers, repair_run, schema_migration_leader, repair_schedule_by_cluster_and_keyspace, repair_run_by_unit, repair_schedule_v1] INFO [RepairJobTask:3] 2025-08-11 05:12:00,375 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_repairs (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:12:00,376 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,381 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,381 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,384 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,384 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,385 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_repairs from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:12:00,386 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_repairs INFO [RepairJobTask:1] 2025-08-11 05:12:00,386 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:5] 2025-08-11 05:12:00,392 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_repairs INFO [RepairJobTask:3] 2025-08-11 05:12:00,393 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] running_repairs is fully synced INFO [RepairJobTask:3] 2025-08-11 05:12:00,394 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:3] 2025-08-11 05:12:00,394 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,399 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,399 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,407 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,407 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,410 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for cluster from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:12:00,411 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:4] 2025-08-11 05:12:00,412 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for cluster INFO [RepairJobTask:3] 2025-08-11 05:12:00,412 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for cluster INFO [RepairJobTask:4] 2025-08-11 05:12:00,412 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] cluster is fully synced INFO [RepairJobTask:6] 2025-08-11 05:12:00,419 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:12:00,419 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,421 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,424 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,432 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,432 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,434 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for leader from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:12:00,435 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:6] 2025-08-11 05:12:00,435 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for leader INFO [RepairJobTask:2] 2025-08-11 05:12:00,435 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for leader INFO [RepairJobTask:6] 2025-08-11 05:12:00,436 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] leader is fully synced INFO [RepairJobTask:6] 2025-08-11 05:12:00,441 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_unit_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:12:00,443 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,449 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,450 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,453 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,453 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,489 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_unit_v1 from /10.0.0.48 INFO [RepairJobTask:3] 2025-08-11 05:12:00,489 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:4] 2025-08-11 05:12:00,489 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_unit_v1 INFO [RepairJobTask:6] 2025-08-11 05:12:00,495 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_unit_v1 INFO [RepairJobTask:2] 2025-08-11 05:12:00,497 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_unit_v1 is fully synced INFO [RepairJobTask:4] 2025-08-11 05:12:00,499 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster_v2 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:12:00,501 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,509 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,509 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,511 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,511 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,518 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster_v2 from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:12:00,518 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:3] 2025-08-11 05:12:00,518 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:4] 2025-08-11 05:12:00,519 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster_v2 INFO [RepairJobTask:2] 2025-08-11 05:12:00,519 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster_v2 is fully synced INFO [RepairJobTask:2] 2025-08-11 05:12:00,523 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for snapshot (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:12:00,524 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,526 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,526 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,529 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,529 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,535 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for snapshot from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:12:00,535 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for snapshot INFO [RepairJobTask:5] 2025-08-11 05:12:00,535 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:7] 2025-08-11 05:12:00,537 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for snapshot INFO [RepairJobTask:4] 2025-08-11 05:12:00,537 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] snapshot is fully synced INFO [RepairJobTask:7] 2025-08-11 05:12:00,540 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_cluster (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-08-11 05:12:00,540 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,543 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,543 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,548 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,548 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,549 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_cluster from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:12:00,551 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-11 05:12:00,551 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-11 05:12:00,551 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_cluster INFO [RepairJobTask:4] 2025-08-11 05:12:00,551 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_cluster is fully synced INFO [RepairJobTask:4] 2025-08-11 05:12:00,553 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for percent_repaired_by_schedule (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:12:00,553 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,554 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,554 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,557 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,557 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,558 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for percent_repaired_by_schedule from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:12:00,558 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-11 05:12:00,558 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:5] 2025-08-11 05:12:00,558 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for percent_repaired_by_schedule INFO [RepairJobTask:2] 2025-08-11 05:12:00,558 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] percent_repaired_by_schedule is fully synced INFO [RepairJobTask:7] 2025-08-11 05:12:00,565 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-08-11 05:12:00,565 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,574 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,574 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,579 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,579 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,581 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:12:00,582 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-11 05:12:00,583 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-11 05:12:00,583 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration INFO [RepairJobTask:1] 2025-08-11 05:12:00,583 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] schema_migration is fully synced INFO [RepairJobTask:1] 2025-08-11 05:12:00,587 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for diagnostic_event_subscription (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:1] 2025-08-11 05:12:00,589 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,598 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,598 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,600 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,600 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,601 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for diagnostic_event_subscription from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:12:00,604 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for running_reapers (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:12:00,604 RepairJob.java:257 - Validating /10.0.0.38 INFO [RepairJobTask:1] 2025-08-11 05:12:00,606 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-11 05:12:00,606 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-11 05:12:00,606 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for diagnostic_event_subscription INFO [RepairJobTask:1] 2025-08-11 05:12:00,606 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] diagnostic_event_subscription is fully synced INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,607 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,607 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,613 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,613 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,615 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for running_reapers from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:12:00,615 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:5] 2025-08-11 05:12:00,615 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for running_reapers INFO [RepairJobTask:1] 2025-08-11 05:12:00,615 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for running_reapers INFO [RepairJobTask:5] 2025-08-11 05:12:00,616 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] running_reapers is fully synced INFO [RepairJobTask:5] 2025-08-11 05:12:00,673 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:5] 2025-08-11 05:12:00,674 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,675 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,677 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,680 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,680 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,681 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run from /10.0.0.48 INFO [RepairJobTask:2] 2025-08-11 05:12:00,683 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:7] 2025-08-11 05:12:00,683 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run INFO [RepairJobTask:5] 2025-08-11 05:12:00,683 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run INFO [RepairJobTask:7] 2025-08-11 05:12:00,684 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_run is fully synced INFO [RepairJobTask:7] 2025-08-11 05:12:00,686 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for schema_migration_leader (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:7] 2025-08-11 05:12:00,687 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,691 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,692 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,694 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,694 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,695 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for schema_migration_leader from /10.0.0.48 INFO [RepairJobTask:4] 2025-08-11 05:12:00,695 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:1] 2025-08-11 05:12:00,695 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-11 05:12:00,695 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for schema_migration_leader INFO [RepairJobTask:2] 2025-08-11 05:12:00,699 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] schema_migration_leader is fully synced INFO [RepairJobTask:2] 2025-08-11 05:12:00,700 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_by_cluster_and_keyspace (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:2] 2025-08-11 05:12:00,700 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,707 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,708 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,710 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,712 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,714 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_by_cluster_and_keyspace from /10.0.0.48 INFO [RepairJobTask:1] 2025-08-11 05:12:00,714 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-11 05:12:00,714 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:7] 2025-08-11 05:12:00,714 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_by_cluster_and_keyspace INFO [RepairJobTask:4] 2025-08-11 05:12:00,715 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_by_cluster_and_keyspace is fully synced INFO [RepairJobTask:4] 2025-08-11 05:12:00,721 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_run_by_unit (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:4] 2025-08-11 05:12:00,721 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,724 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,729 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,732 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,732 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,735 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_run_by_unit from /10.0.0.48 INFO [RepairJobTask:5] 2025-08-11 05:12:00,736 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-11 05:12:00,736 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_run_by_unit INFO [RepairJobTask:1] 2025-08-11 05:12:00,736 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_run_by_unit INFO [RepairJobTask:6] 2025-08-11 05:12:00,736 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_run_by_unit is fully synced INFO [RepairJobTask:6] 2025-08-11 05:12:00,742 RepairJob.java:234 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Requesting merkle trees for repair_schedule_v1 (to [/10.0.0.38, /10.0.0.254, /10.0.0.48]) INFO [RepairJobTask:6] 2025-08-11 05:12:00,745 RepairJob.java:257 - Validating /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,750 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.38 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,752 RepairJob.java:270 - Validating /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,756 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.254 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,756 RepairJob.java:270 - Validating /10.0.0.48 INFO [AntiEntropyStage:1] 2025-08-11 05:12:00,758 RepairSession.java:180 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Received merkle tree for repair_schedule_v1 from /10.0.0.48 INFO [RepairJobTask:6] 2025-08-11 05:12:00,759 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.254 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-11 05:12:00,759 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.38 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:1] 2025-08-11 05:12:00,759 SyncTask.java:66 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Endpoints /10.0.0.254 and /10.0.0.48 are consistent for repair_schedule_v1 INFO [RepairJobTask:4] 2025-08-11 05:12:00,759 RepairJob.java:143 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] repair_schedule_v1 is fully synced INFO [RepairJobTask:4] 2025-08-11 05:12:00,763 RepairSession.java:270 - [repair #b5d8c630-7671-11f0-993e-6d60ca3e2f4e] Session completed successfully INFO [RepairJobTask:4] 2025-08-11 05:12:00,765 RepairRunnable.java:261 - Repair session b5d8c630-7671-11f0-993e-6d60ca3e2f4e for range [(-1098098127341220938,-1087882488792597341], (3869134068301015860,3948129839353478991]] finished INFO [RepairJobTask:4] 2025-08-11 05:12:00,768 ActiveRepairService.java:452 - [repair #b5d6f170-7671-11f0-993e-6d60ca3e2f4e] Not a global repair, will not do anticompaction INFO [InternalResponseStage:7] 2025-08-11 05:12:00,773 RepairRunnable.java:343 - Repair command #4 finished in 0 seconds