Adding members to the replica set will fail and incorrect configuration will be propagated to the nodes

in mongodb 6.0.15
Added a new node with different mongodb.key to the replica set.
it’s failed and the state of the new node became UNKNOWN.
However, a connection failure occurred as wrong config was spread to the client that the primary had been changed to a new member with no other members.
Could this be a bug?

{"t":{"$date":"2024-05-27T12:19:52.579+09:00"},"s":"I",  "c":"REPL",     "id":21352,   "ctx":"conn101","msg":"replSetReconfig admin command received from client","attr":{"newConfig":{"_id":"replicaset1","version":3,"term":5,"members":[
  {"_id":0,"host":"192.168.0.43:26310","arbiterOnly":false,"buildIndexes":true,"hidden":false,"priority":2,"tags":{},"secondaryDelaySecs":0,"votes":1}
 ,{"_id":1,"host":"192.168.0.44:26310","arbiterOnly":false,"buildIndexes":true,"hidden":false,"priority":2,"tags":{},"secondaryDelaySecs":0,"votes":1}
 ,{"_id":2,"host":"192.168.0.83:26310","arbiterOnly":false,"buildIndexes":true,"hidden":false,"priority":1,"tags":{},"secondaryDelaySecs":0,"votes":1}
 ,{"_id":3,"host":"192.168.0.111:26310"}]
 ,"protocolVersion":1,"writeConcernMajorityJournalDefault":true,"settings":{"chainingAllowed":true,"heartbeatIntervalMillis":2000,"heartbeatTimeoutSecs":10,"electionTimeoutMillis":10000,"catchUpTimeoutMillis":-1,"catchUpTakeoverDelayMillis":30000,"getLastErrorModes":{},"getLastErrorDefaults":{"w":1,"wtimeout":0},"replicaSetId":{"$oid":"627c965e70066a968588ea9c"}}}}}
...
{"t":{"$date":"2024-05-27T12:19:52.580+09:00"},"s":"I",  "c":"CONNPOOL", "id":22576,   "ctx":"ReplNetwork","msg":"Connecting","attr":{"hostAndPort":"192.168.0.111:26310"}}
{"t":{"$date":"2024-05-27T12:19:52.644+09:00"},"s":"W",  "c":"REPL",     "id":23722,   "ctx":"ReplCoord-2","msg":"Failed to complete heartbeat request to target","attr":{"requestTarget":"192.168.0.111:26310","responseStatus":{"code":18,"codeName":"AuthenticationFailed","errmsg":"Authentication failed."}}}
{"t":{"$date":"2024-05-27T12:19:52.646+09:00"},"s":"I",  "c":"NETWORK",  "id":4333213, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM Topology Change"
,"attr":{"replicaSet":"replicaset1","newTopologyDescription":{"id":"3b96a35d-3029-4557-814d-6f8aded2f624"
 ,"topologyType":"ReplicaSetWithPrimary","servers":
  {"192.168.0.43:26310":{"address":"192.168.0.43:26310","topologyVersion":{"processId":{"$oid":"6653f4c79ac4c39c6c516b9e"},"counter":8},"roundTripTime":162,"lastWriteDate":{"$date":"2024-05-27T12:19:52.000+09:00"},"opTime":{"ts":{"$timestamp":{"t":1716779992,"i":1}},"t":5},"type":"RSPrimary","minWireVersion":17,"maxWireVersion":17,"me":"192.168.0.43:26310","setName":"replicaset1","setVersion":3,"electionId":{"$oid":"7fffffff0000000000010005"},"primary":"192.168.0.43:26310","lastUpdateTime":{"$date":"2024-05-27T12:19:52.646+09:00"},"logicalSessionTimeoutMinutes":30,"hosts":{"0":"192.168.0.43:26310","1":"192.168.0.44:26310","2":"192.168.0.83:26310"},"arbiters":{}  ,"passives":{"0":"192.168.0.111:26310"}}
  ,"192.168.0.44:26310":{"address":"192.168.0.44:26310","topologyVersion":{"processId":{"$oid":"6653f465b324420413b18d02"},"counter":10},"roundTripTime":347,"lastWriteDate":{"$date":"2024-05-27T12:19:44.000+09:00"},"opTime":{"ts":{"$timestamp":{"t":1716779984,"i":16}},"t":5},"type":"RSSecondary","minWireVersion":17,"maxWireVersion":17,"me":"192.168.0.44:26310","setName":"replicaset1","setVersion":2,"primary":"192.168.0.43:26310","lastUpdateTime":{"$date":"2024-05-27T12:19:51.844+09:00"},"logicalSessionTimeoutMinutes":30,"hosts":{"0":"192.168.0.43:26310","1":"192.168.0.44:26310","2":"192.168.0.83:26310"},"arbiters":{},"passives":{}}
  ,"192.168.0.83:26310":{"address":"192.168.0.83:26310","topologyVersion":{"processId":{"$oid":"6653f3e7f107dd250ba545d8"},"counter":6},"roundTripTime":378,"lastWriteDate":{"$date":"2024-05-27T12:19:44.000+09:00"},"opTime":{"ts":{"$timestamp":{"t":1716779984,"i":16}},"t":5},"type":"RSSecondary","minWireVersion":17,"maxWireVersion":17,"me":"192.168.0.83:26310","setName":"replicaset1","setVersion":2,"primary":"192.168.0.43:26310","lastUpdateTime":{"$date":"2024-05-27T12:19:51.845+09:00"},"logicalSessionTimeoutMinutes":30,"hosts":{"0":"192.168.0.43:26310","1":"192.168.0.44:26310","2":"192.168.0.83:26310"},"arbiters":{},"passives":{}}
 ,"192.168.0.111:26310":{"address":"192.168.0.111:26310","type":"Unknown","minWireVersion":0,"maxWireVersion":0,"lastUpdateTime":{"$date":{"$numberLong":"-9223372036854775808"}},"hosts":{},"arbiters":{},"passives":{}}
 }
 ,"logicalSessionTimeoutMinutes":30,"setName":"replicaset1","compatible":true,"maxElectionIdSetVersion":{"electionId":{"$oid":"7fffffff0000000000010005"},"setVersion":3}},"previousTopologyDescription":{"id":"aa273e45-6bd6-46b0-b419-78d7ab8968ac","topologyType":"ReplicaSetWithPrimary","servers":{"192.168.0.43:26310":{"address":"192.168.0.43:26310","topologyVersion":{"processId":{"$oid":"6653f4c79ac4c39c6c516b9e"},"counter":7},"roundTripTime":162,"lastWriteDate":{"$date":"2024-05-27T12:19:44.000+09:00"},"opTime":{"ts":{"$timestamp":{"t":1716779984,"i":16}},"t":5},"type":"RSPrimary","minWireVersion":17,"maxWireVersion":17,"me":"192.168.0.43:26310","setName":"replicaset1","setVersion":2,"electionId":{"$oid":"7fffffff0000000000010005"},"primary":"192.168.0.43:26310","lastUpdateTime":{"$date":"2024-05-27T12:19:51.840+09:00"},"logicalSessionTimeoutMinutes":30,"hosts":{"0":"192.168.0.43:26310","1":"192.168.0.44:26310","2":"192.168.0.83:26310"},"arbiters":{},"passives":{}},"192.168.0.44:26310":{"address":"192.168.0.44:26310","topologyVersion":{"processId":{"$oid":"6653f465b324420413b18d02"},"counter":10},"roundTripTime":347,"lastWriteDate":{"$date":"2024-05-27T12:19:44.000+09:00"},"opTime":{"ts":{"$timestamp":{"t":1716779984,"i":16}},"t":5},"type":"RSSecondary","minWireVersion":17,"maxWireVersion":17,"me":"192.168.0.44:26310","setName":"replicaset1","setVersion":2,"primary":"192.168.0.43:26310","lastUpdateTime":{"$date":"2024-05-27T12:19:51.844+09:00"},"logicalSessionTimeoutMinutes":30,"hosts":{"0":"192.168.0.43:26310","1":"192.168.0.44:26310","2":"192.168.0.83:26310"},"arbiters":{},"passives":{}},"192.168.0.83:26310":{"address":"192.168.0.83:26310","topologyVersion":{"processId":{"$oid":"6653f3e7f107dd250ba545d8"},"counter":6},"roundTripTime":378,"lastWriteDate":{"$date":"2024-05-27T12:19:44.000+09:00"},"opTime":{"ts":{"$timestamp":{"t":1716779984,"i":16}},"t":5},"type":"RSSecondary","minWireVersion":17,"maxWireVersion":17,"me":"192.168.0.83:26310","setName":"replicaset1","setVersion":2,"primary":"192.168.0.43:26310","lastUpdateTime":{"$date":"2024-05-27T12:19:51.845+09:00"},"logicalSessionTimeoutMinutes":30,"hosts":{"0":"192.168.0.43:26310","1":"192.168.0.44:26310","2":"192.168.0.83:26310"},"arbiters":{},"passives":{}}},"logicalSessionTimeoutMinutes":30,"setName":"replicaset1","compatible":true,"maxElectionIdSetVersion":{"electionId":{"$oid":"7fffffff0000000000010005"},"setVersion":2}}}}
...
{"t":{"$date":"2024-05-27T12:19:52.646+09:00"},"s":"I",  "c":"-",        "id":4333226, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host was added to the topology","attr":{"replicaSet":"replicaset1","host":"192.168.0.111:26310"}}
{"t":{"$date":"2024-05-27T12:19:52.648+09:00"},"s":"I",  "c":"NETWORK",  "id":5940901, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"Stale primary detected, marking its state asunknown","attr":{"primary":"192.168.0.111:26310","incomingElectionIdSetVersion":{"electionId":{"$oid":"7fffffff0000000000000002"},"setVersion":1},"currentMaxElectionIdSetVersion":{"electionId":{"$oid":"7fffffff0000000000010005"},"setVersion":3}}}
{"t":{"$date":"2024-05-27T12:19:52.647+09:00"},"s":"I",  "c":"REPL",     "id":4508702, "ctx":"conn101","msg":"Waiting for the current config to propagate to a majority of nodes"}

Hi @nwind7 and welcome to the community forum.

The issue you are experiencing could be due to a configuration error rather than a bug in MongoDB itself. When adding a new node to a MongoDB replica set, it is crucial to ensure that all nodes use the same mongodb.key file.
The mongodb.key file is used for internal authentication between members of the replica set, and discrepancies in this file can cause authentication failures, leading to the node being unable to join the replica set properly.
Also, as mentioned in the documentation for Deploy New Replica Set with Keyfile Access Control , one key file should be added to all the replica set members. And, Ensure that the user running the mongod instances is the owner of the file and can access the keyfile.

Could you please follow the documentation suggested above to deploy the replica set. Also, please reach out if you are still facing the issue.

Regards
Aasawari

After setting right mongodb.key, members were added to replica_set normally.
However, it does not seem to be a normal behavior for incorrect information to be delivered to the client when adding a member to replica_set failed due to a problem with mongodb.key.
Is this behavior intended? If not, when can it be corrected so that incorrect information is not spread to the client even if member addition fails due to an error in mongodb.key?