Failed to refresh session cache: ShardingStateNotInitialized: Cannot accept sharding commands if sharding state has not been initialized with a shardIdentity document

Hello,

I followed the instruction from here: https://www.mongodb.com/docs/manual/tutorial/deploy-shard-cluster/#start-each-member-of-the-shard-replica-set to setup a shard.

  1. Here is the rs.config(); for the configuration servers:
{
	"set" : "rs0",
	"date" : ISODate("2022-08-08T06:36:33.451Z"),
	"myState" : 1,
	"term" : NumberLong(133),
	"syncingTo" : "",
	"syncSourceHost" : "",
	"syncSourceId" : -1,
	"configsvr" : true,
	"heartbeatIntervalMillis" : NumberLong(2000),
	"majorityVoteCount" : 2,
	"writeMajorityCount" : 2,
	"optimes" : {
		"lastCommittedOpTime" : {
			"ts" : Timestamp(1659940591, 1),
			"t" : NumberLong(133)
		},
		"lastCommittedWallTime" : ISODate("2022-08-08T06:36:31.072Z"),
		"readConcernMajorityOpTime" : {
			"ts" : Timestamp(1659940591, 1),
			"t" : NumberLong(133)
		},
		"readConcernMajorityWallTime" : ISODate("2022-08-08T06:36:31.072Z"),
		"appliedOpTime" : {
			"ts" : Timestamp(1659940591, 1),
			"t" : NumberLong(133)
		},
		"durableOpTime" : {
			"ts" : Timestamp(1659940591, 1),
			"t" : NumberLong(133)
		},
		"lastAppliedWallTime" : ISODate("2022-08-08T06:36:31.072Z"),
		"lastDurableWallTime" : ISODate("2022-08-08T06:36:31.072Z")
	},
	"lastStableRecoveryTimestamp" : Timestamp(1659940572, 1),
	"lastStableCheckpointTimestamp" : Timestamp(1659940572, 1),
	"electionCandidateMetrics" : {
		"lastElectionReason" : "stepUpRequestSkipDryRun",
		"lastElectionDate" : ISODate("2022-08-07T18:00:50.758Z"),
		"electionTerm" : NumberLong(133),
		"lastCommittedOpTimeAtElection" : {
			"ts" : Timestamp(1659895248, 2),
			"t" : NumberLong(132)
		},
		"lastSeenOpTimeAtElection" : {
			"ts" : Timestamp(1659895248, 2),
			"t" : NumberLong(132)
		},
		"numVotesNeeded" : 2,
		"priorityAtElection" : 1,
		"electionTimeoutMillis" : NumberLong(10000),
		"priorPrimaryMemberId" : 2,
		"numCatchUpOps" : NumberLong(0),
		"newTermStartDate" : ISODate("2022-08-07T18:00:51.320Z"),
		"wMajorityWriteAvailabilityDate" : ISODate("2022-08-07T18:00:52.676Z")
	},
	"electionParticipantMetrics" : {
		"votedForCandidate" : true,
		"electionTerm" : NumberLong(132),
		"lastVoteDate" : ISODate("2022-08-07T18:00:48.115Z"),
		"electionCandidateMemberId" : 2,
		"voteReason" : "",
		"lastAppliedOpTimeAtElection" : {
			"ts" : Timestamp(1659895244, 1),
			"t" : NumberLong(130)
		},
		"maxAppliedOpTimeInSet" : {
			"ts" : Timestamp(1659895246, 3),
			"t" : NumberLong(131)
		},
		"priorityAtElection" : 1
	},
	"members" : [
		{
			"_id" : 0,
			"name" : "mongodb-conf1:27017",
			"health" : 1,
			"state" : 1,
			"stateStr" : "PRIMARY",
			"uptime" : 45347,
			"optime" : {
				"ts" : Timestamp(1659940591, 1),
				"t" : NumberLong(133)
			},
			"optimeDate" : ISODate("2022-08-08T06:36:31Z"),
			"syncingTo" : "",
			"syncSourceHost" : "",
			"syncSourceId" : -1,
			"infoMessage" : "",
			"electionTime" : Timestamp(1659895250, 1),
			"electionDate" : ISODate("2022-08-07T18:00:50Z"),
			"configVersion" : 1,
			"self" : true,
			"lastHeartbeatMessage" : ""
		},
		{
			"_id" : 1,
			"name" : "mongodb-conf2:27017",
			"health" : 1,
			"state" : 2,
			"stateStr" : "SECONDARY",
			"uptime" : 45345,
			"optime" : {
				"ts" : Timestamp(1659940591, 1),
				"t" : NumberLong(133)
			},
			"optimeDurable" : {
				"ts" : Timestamp(1659940591, 1),
				"t" : NumberLong(133)
			},
			"optimeDate" : ISODate("2022-08-08T06:36:31Z"),
			"optimeDurableDate" : ISODate("2022-08-08T06:36:31Z"),
			"lastHeartbeat" : ISODate("2022-08-08T06:36:31.539Z"),
			"lastHeartbeatRecv" : ISODate("2022-08-08T06:36:32.860Z"),
			"pingMs" : NumberLong(0),
			"lastHeartbeatMessage" : "",
			"syncingTo" : "mongodb-conf1:27017",
			"syncSourceHost" : "mongodb-conf1:27017",
			"syncSourceId" : 0,
			"infoMessage" : "",
			"configVersion" : 1
		},
		{
			"_id" : 2,
			"name" : "mongodb-conf3:27017",
			"health" : 1,
			"state" : 2,
			"stateStr" : "SECONDARY",
			"uptime" : 45338,
			"optime" : {
				"ts" : Timestamp(1659940591, 1),
				"t" : NumberLong(133)
			},
			"optimeDurable" : {
				"ts" : Timestamp(1659940591, 1),
				"t" : NumberLong(133)
			},
			"optimeDate" : ISODate("2022-08-08T06:36:31Z"),
			"optimeDurableDate" : ISODate("2022-08-08T06:36:31Z"),
			"lastHeartbeat" : ISODate("2022-08-08T06:36:31.519Z"),
			"lastHeartbeatRecv" : ISODate("2022-08-08T06:36:33.304Z"),
			"pingMs" : NumberLong(0),
			"lastHeartbeatMessage" : "",
			"syncingTo" : "mongodb-conf2:27017",
			"syncSourceHost" : "mongodb-conf2:27017",
			"syncSourceId" : 1,
			"infoMessage" : "",
			"configVersion" : 1
		}
	],
	"ok" : 1,
	"$gleStats" : {
		"lastOpTime" : Timestamp(0, 0),
		"electionId" : ObjectId("7fffffff0000000000000085")
	},
	"lastCommittedOpTime" : Timestamp(1659940591, 1),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1659940591, 1),
		"signature" : {
			"hash" : BinData(0,"xdvd4H1nUSIA2sIqQ8vxQHoaO+o="),
			"keyId" : NumberLong("7127557678649311233")
		}
	},
	"operationTime" : Timestamp(1659940591, 1)
}```

2. Here is the rs.config() for the shards:

{
“_id” : “rs1”,
“version” : 1,
“protocolVersion” : NumberLong(1),
“writeConcernMajorityJournalDefault” : true,
“members” : [
{
“_id” : 0,
“host” : “mongodb-shard1-01:27017”,
“arbiterOnly” : false,
“buildIndexes” : true,
“hidden” : false,
“priority” : 1,
“tags” : {

		},
		"slaveDelay" : NumberLong(0),
		"votes" : 1
	},
	{
		"_id" : 1,
		"host" : "mongodb-shard1-02:27017",
		"arbiterOnly" : false,
		"buildIndexes" : true,
		"hidden" : false,
		"priority" : 1,
		"tags" : {

		},
		"slaveDelay" : NumberLong(0),
		"votes" : 1
	},
	{
		"_id" : 2,
		"host" : "mongodb-shard1-03:27017",
		"arbiterOnly" : false,
		"buildIndexes" : true,
		"hidden" : false,
		"priority" : 1,
		"tags" : {

		},
		"slaveDelay" : NumberLong(0),
		"votes" : 1
	}
],
"settings" : {
	"chainingAllowed" : true,
	"heartbeatIntervalMillis" : 2000,
	"heartbeatTimeoutSecs" : 10,
	"electionTimeoutMillis" : 10000,
	"catchUpTimeoutMillis" : -1,
	"catchUpTakeoverDelayMillis" : 30000,
	"getLastErrorModes" : {

	},
	"getLastErrorDefaults" : {
		"w" : 1,
		"wtimeout" : 0
	},
	"replicaSetId" : ObjectId("62effe83cfa06cab70c2d2c0")
}

}



3. Here is the mongos.log output when trying to do a connection:

022-08-08T08:48:55.683+0200 I CONNPOOL [ShardRegistry] Connecting to mongodb-conf1:27017
2022-08-08T08:48:55.690+0200 I CONTROL [LogicalSessionCacheRefresh] Failed to refresh session cache: ShardingStateNotInitialized: Cannot accept sharding commands if sharding state has not been initialized with a shardIdentity document
2022-08-08T08:49:00.369+0200 I NETWORK [conn21] received client metadata from 10.135.169.16:33198 conn21: { driver: { name: “mongoc / ext-mongodb:PHP”, version: “1.16.2 / 1.7.4” }, os: { type: “Linux”, name: “Debian GNU/Linux”, version: “9”, architecture: “x86_64” }, platform: “PHP 7.4.4cfg=0x015156a8e9 posix=200809 stdc=201112 CC=GCC 6.3.0 20170516 CFLAGS=”" LDFLAGS=“”" }
2022-08-08T08:49:20.617+0200 I COMMAND [conn21] command feeder.logs command: create { create: “logs”, capped: false, $db: “feeder”, lsid: { id: UUID(“9ac401d6-0352-423a-9c8e-4557be32ecc3”) }, $clusterTime: { clusterTime: Timestamp(1659941336, 1), signature: { hash: BinData(0, 26334C49AAEA28A44B3DFF3A9911AFBD09350A29), keyId: 7127557678649311233 } } } numYields:0 ok:0 errMsg:“Could not find host matching read preference { mode: "primary" } for set rs1” errName:FailedToSatisfyReadPreference errCode:133 reslen:306 protocol:op_msg 20220ms
2022-08-08T08:50:16.066+0200 I CONNPOOL [ShardRegistry] Ending idle connection to host mongodb-conf1:27017 because the pool meets constraints; 1 connections to that host remain open
2022-08-08T08:53:55.686+0200 I CONTROL [LogicalSessionCacheRefresh] Failed to refresh session cache: ShardingStateNotInitialized: Cannot accept sharding commands if sharding state has not been initialized with a shardIdentity document


Can you please help me with some suggestions?

Hello,

Have you started your shard replica set using the correct configuration ?

The guide that you linked to shows that sharding.clusterRole option to be put on shardsvr , either by establising correct configuration file:

sharding:
    clusterRole: shardsvr
replication:
    replSetName: <replSetName>
net:
    bindIp: localhost,<ip address>

Or by starting mongod from command line (also using correct configuration):
mongod --shardsvr --replSet <replSetname> --dbpath <path> --bind_ip localhost,<hostname(s)|ip address(es)>

Kind regards, Tin.

Hello,

Yes. Here is the mongod.conf file for replica set:

# Where and how to store data.
storage:
  dbPath: /var/data/mongodb
  journal:
    enabled: true
#  engine:
#  mmapv1:
#  wiredTiger:

# where to write logging data.
systemLog:
  destination: file
  logAppend: true
  path: /var/log/mongodb/mongod.log

# network interfaces
net:
  port: 27017
#  bindIp: 127.0.0.1
  bindIpAll: true

# how the process runs
processManagement:
  timeZoneInfo: /usr/share/zoneinfo

security:
  keyFile: /etc/mongodb_key

replication:
  replSetName: "rs1"

sharding:
  clusterRole: shardsvr

Here is the sh.status() output command returned by mongos

-- Sharding Status ---
  sharding version: {
  	"_id" : 1,
  	"minCompatibleVersion" : 5,
  	"currentVersion" : 6,
  	"clusterId" : ObjectId("5e4539795396520a99f4c822")
  }
  shards:
        {  "_id" : "rs1",  "host" : "rs1/mongodb-shard1-01:27017,mongodb-shard1-02:27017,mongodb-shard1-03:27017",  "state" : 1 }
  active mongoses:
        "4.2.8" : 1
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  no
        Failed balancer rounds in last 5 attempts:  5
        Last reported error:  Could not find host matching read preference { mode: "primary" } for set rs1
        Time of Reported error:  Mon Aug 08 2022 10:36:17 GMT+0200 (CEST)
        Migration Results for the last 24 hours:
                No recent migrations
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                config.system.sessions
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                rs1	1
                        { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0)
        {  "_id" : "feeder",  "primary" : "rs1",  "partitioned" : true,  "version" : {  "uuid" : UUID("4062a851-c0f4-4cad-a63d-c07ee0888f8c"),  "lastMod" : 1 } }

It looks like the balancer is not started and even I’ve tried:

sh.startBalancer();

it didn’t start

Here is the config for conf servers:


# Where and how to store data.
storage:
  dbPath: /var/data/mongodb
  journal:
    enabled: true
#  engine:
#  mmapv1:
#  wiredTiger:

# where to write logging data.
systemLog:
  destination: file
  logAppend: true
  path: /var/log/mongodb/mongod.log

# network interfaces
net:
  port: 27017
#  bindIp: 127.0.0.1
  bindIpAll: true

# how the process runs
processManagement:
  timeZoneInfo: /usr/share/zoneinfo

security:
  keyFile: /etc/mongodb_key

replication:
  replSetName: "rs0"

sharding:
  clusterRole: configsvr

Have you correctly executed rs.addShard() commands ? Do you get any errors doing so ?

Hello, no errors,

Please check here:

Screen Shot on 2022-08-08 at 13-36-11

Basic question in case you forgot. You did execute sh.enableSharding(dbname);
Can you try not using same names in replica set naming ? I am not that experienced in sharding tehniques and please understand that I am just trying to point out to some mistakes I’ve made to help you out :slight_smile:

Edit: You should provide information like MongoDB Version, and OS that you are using.

Are the config files you have shared complete?
I don’t see clusterrole,replicasetname and configDB params
Please show complete config files of data replicaset,config server and mongos
Also share rs.status() of both data replica and config server replica

Thank you so much for your help.

  1. sh.enableSharding(dbname); - was run, of course and everything was ok
  2. I am running Debian and Mongo Version 4.2 (this is what was installed)
  3. In the meantime I found the solution: