Different Replica Set status via Docker Compose and Mongo Shell

I am setting up a Replica Set (1 Primary and 2 Secondary) via Docker Compose file. However, when I try to create indexes on the Primary, I get the error “codeName : NotWritablePrimary”. Upon investigation, I print rs.status() before the index creation step, I see that all the nodes in the Replica Set are SECONDARY. However, when I connect to each container via mongo shell, I see that the Replica Set is correctly setup (1 Primary and 2 Secondary). Can anyone please help me out here ? I have provided the shell script used to create the replicaset and the index below, along with the rs.staus output

setup.sh :

#!/bin/bash

#MONGODB1=`ping -c 1 mongo1 | head -1  | cut -d "(" -f 2 | cut -d ")" -f 1`
#MONGODB2=`ping -c 1 mongo2 | head -1  | cut -d "(" -f 2 | cut -d ")" -f 1`
#MONGODB3=`ping -c 1 mongo3 | head -1  | cut -d "(" -f 2 | cut -d ")" -f 1`

MONGODB1=mongodb1
MONGODB2=mongodb2
MONGODB3=mongodb3

echo "**********************************************" ${MONGODB1}
echo "Waiting for startup.."
until curl http://${MONGODB1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
  printf '.'
  sleep 1
done

# echo curl http://${MONGODB1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1
echo ${MONGODB1} " Started.."

echo SETUP.sh time now: `date +"%T" `
mongo --host ${MONGODB1}:27017 <<EOF
var cfg = {
    "_id": "rs0",
    "protocolVersion": 1,
    "version": 1,
    "members": [
        {
            "_id": 0,
            "host": "${MONGODB1}:27017",
            "priority": 2
        },
        {
            "_id": 1,
            "host": "${MONGODB2}:27017",
            "priority": 0
        },
        {
            "_id": 2,
            "host": "${MONGODB3}:27017",
            "priority": 0
        }
    ],settings: {chainingAllowed: true}
};

rs.initiate(cfg, { force: true });
rs.reconfig(cfg, { force: true });

echo "TEST........"
rs.status();
echo "TEST........"

use orion;
db.createCollection("entities");
db.entities.createIndex({"_id.servicePath": 1, "_id.id": 1, "_id.type": 1}, {unique: true});
db.entities.createIndex({"_id.type": 1}); db.entities.createIndex({"_id.id": 1});
use orion-iotteststand;
db.createCollection("entities");
db.entities.createIndex({"_id.servicePath": 1, "_id.id": 1, "_id.type": 1}, {unique: true});
db.entities.createIndex({"_id.type": 1}); db.entities.createIndex({"_id.id": 1});
use iotagentul;
db.createCollection("devices");
db.devices.createIndex({"_id.service": 1, "_id.id": 1, "_id.type": 1});


rs.slaveOk();
db.getMongo().setReadPref('secondary');
db.getMongo().setSlaveOk();


EOF

rs.status():

{


	"set" : "rs0",


	"date" : ISODate("2021-09-22T07:03:37.742Z"),


	"myState" : 2,


	"term" : NumberLong(12),


	"syncSourceHost" : "",


	"syncSourceId" : -1,


	"heartbeatIntervalMillis" : NumberLong(2000),


	"majorityVoteCount" : 2,


	"writeMajorityCount" : 2,


	"votingMembersCount" : 3,


	"writableVotingMembersCount" : 3,


	"optimes" : {


		"lastCommittedOpTime" : {


			"ts" : Timestamp(0, 0),


			"t" : NumberLong(-1)


		},


		"lastCommittedWallTime" : ISODate("1970-01-01T00:00:00Z"),


		"appliedOpTime" : {


			"ts" : Timestamp(1632294166, 1),


			"t" : NumberLong(12)


		},


		"durableOpTime" : {


			"ts" : Timestamp(1632294166, 1),


			"t" : NumberLong(12)


		},


		"lastAppliedWallTime" : ISODate("2021-09-22T07:02:46.624Z"),


		"lastDurableWallTime" : ISODate("2021-09-22T07:02:46.624Z")


	},


	"lastStableRecoveryTimestamp" : Timestamp(1632294146, 1),


	"members" : [


		{


			"_id" : 0,


			"name" : "mongodb1:27017",


			"health" : 1,


			"state" : 2,


			"stateStr" : "SECONDARY",


			"uptime" : 6,


			"optime" : {


				"ts" : Timestamp(1632294166, 1),


				"t" : NumberLong(12)


			},


			"optimeDate" : ISODate("2021-09-22T07:02:46Z"),


			"syncSourceHost" : "",


			"syncSourceId" : -1,


			"infoMessage" : "",


			"configVersion" : 583093,


			"configTerm" : -1,


			"self" : true,


			"lastHeartbeatMessage" : ""


		},


		{


			"_id" : 1,


			"name" : "mongodb2:27017",


			"health" : 1,


			"state" : 2,


			"stateStr" : "SECONDARY",


			"uptime" : 4,


			"optime" : {


				"ts" : Timestamp(1632294166, 1),


				"t" : NumberLong(12)


			},


			"optimeDurable" : {


				"ts" : Timestamp(1632294166, 1),


				"t" : NumberLong(12)


			},


			"optimeDate" : ISODate("2021-09-22T07:02:46Z"),


			"optimeDurableDate" : ISODate("2021-09-22T07:02:46Z"),


			"lastHeartbeat" : ISODate("2021-09-22T07:03:37.740Z"),


			"lastHeartbeatRecv" : ISODate("2021-09-22T07:03:37.514Z"),


			"pingMs" : NumberLong(0),


			"lastHeartbeatMessage" : "",


			"syncSourceHost" : "",


			"syncSourceId" : -1,


			"infoMessage" : "",


			"configVersion" : 486940,


			"configTerm" : -1


		},


		{


			"_id" : 2,


			"name" : "mongodb3:27017",


			"health" : 1,


			"state" : 2,


			"stateStr" : "SECONDARY",


			"uptime" : 4,


			"optime" : {


				"ts" : Timestamp(1632294166, 1),


				"t" : NumberLong(12)


			},


			"optimeDurable" : {


				"ts" : Timestamp(1632294166, 1),


				"t" : NumberLong(12)


			},


			"optimeDate" : ISODate("2021-09-22T07:02:46Z"),


			"optimeDurableDate" : ISODate("2021-09-22T07:02:46Z"),


			"lastHeartbeat" : ISODate("2021-09-22T07:03:37.740Z"),


			"lastHeartbeatRecv" : ISODate("2021-09-22T07:03:37.381Z"),


			"pingMs" : NumberLong(0),


			"lastHeartbeatMessage" : "",


			"syncSourceHost" : "",


			"syncSourceId" : -1,


			"infoMessage" : "",


			"configVersion" : 486940,


			"configTerm" : -1


		}


	],


	"ok" : 1,


	"$clusterTime" : {


		"clusterTime" : Timestamp(1632294166, 1),


		"signature" : {


			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),


			"keyId" : NumberLong(0)


		}


	},


	"operationTime" : Timestamp(1632294166, 1)


}

When you initialize the replicaset an election will occur to select the primary.

The one I just looked at took ~ 11s between rs.initiate() and a replica transitioning to Primary.

You can poll db.hello() or given that you appear to be using an outdated version(based on http interface use), db.isMaster() until a Primary is present and then proceed with the remainder of your setup.

db.isMaster().primary is going to be an empry string until a primary exists.

A comment on your replicaSet configuration. If your ${MONGODB1} stops or fails your replicaset will remain read only until it returns or you update the replaset configuration.

Thanks for the suggestion. I changed my setup.sh to as follows and it worked.

#!/bin/bash

#MONGODB1=`ping -c 1 mongo1 | head -1  | cut -d "(" -f 2 | cut -d ")" -f 1`
#MONGODB2=`ping -c 1 mongo2 | head -1  | cut -d "(" -f 2 | cut -d ")" -f 1`
#MONGODB3=`ping -c 1 mongo3 | head -1  | cut -d "(" -f 2 | cut -d ")" -f 1`

MONGODB1=mongodb1
MONGODB2=mongodb2
MONGODB3=mongodb3

echo "**********************************************" ${MONGODB1}
echo "Waiting for startup.."
until curl http://${MONGODB1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
  printf '.'
  sleep 1
done

# echo curl http://${MONGODB1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1
echo ${MONGODB1} " Started.."
echo "**************Replica Set configuration Begin*********"


echo SETUP.sh time now: `date +"%T" `
mongo --host ${MONGODB1}:27017 <<EOF
var cfg = {
    "_id": "rs0",
    "protocolVersion": 1,
    "version": 1,
    "members": [
        {
            "_id": 0,
            "host": "${MONGODB1}:27017",
            "priority": 2
        },
        {
            "_id": 1,
            "host": "${MONGODB2}:27017",
            "priority": 0
        },
        {
            "_id": 2,
            "host": "${MONGODB3}:27017",
            "priority": 0
        }
    ],settings: {chainingAllowed: true}
};

rs.initiate(cfg, { force: true });
rs.reconfig(cfg, { force: true });

rs.slaveOk();
db.getMongo().setReadPref('secondary');
db.getMongo().setSlaveOk();

echo "**************Replica Set configuration Complete*********"

EOF


until mongo --host ${MONGODB1}:27017 --eval 'db.isMaster().primary' | tail -1 | grep ${MONGODB1}; do
  printf 'Waiting for Replica Set to Intialize Primary.....'
  sleep 1
done

echo "Primary intialized...."
echo "**************Index Creation Begin*********"

mongo --host ${MONGODB1}:27017 <<EOF

use orion;
db.createCollection("entities");
db.entities.createIndex({"_id.servicePath": 1, "_id.id": 1, "_id.type": 1}, {unique: true});
db.entities.createIndex({"_id.type": 1}); db.entities.createIndex({"_id.id": 1});
use orion-iotteststand;
db.createCollection("entities");
db.entities.createIndex({"_id.servicePath": 1, "_id.id": 1, "_id.type": 1}, {unique: true});
db.entities.createIndex({"_id.type": 1}); db.entities.createIndex({"_id.id": 1});
use iotagentul;
db.createCollection("devices");
db.devices.createIndex({"_id.service": 1, "_id.id": 1, "_id.type": 1});

echo "**************Index Creation Complete*********"

EOF

About the replicaSet configuration, how do you suggest to set it up so that it handles the failure of the primary node and yet enables write updates ? Can one of the secondary nodes be setup to become a primary in that case ?

I would suggest not setting the priority at all. They will all get .