Function: Multi Collection Eventing

      +

      Access the Data Service when Eventing is listening to multiple collections.

      The multiCollectionEventing function:

      • Demonstrates how to access the Data Service when using * wildcard bindings

      • Requires 2 bindings of type bucket alias

      • Requires the following 4 keyspaces in two buckets, rr100 and source:

        • rr100.eventing.metadata

        • source._default._default

        • source.myscope.mycol1

        • source.myscope.mycol2

      • Operates on 3 test documents

      • Highlights the use of meta.keyspace

      For more information about the Advanced Self-Recursion Parameter, see Eventing Functions that Listen to Multiple Collections.

      • multiCollectionEventing

      • Input data

      • Output log in reverse order

      // Configure the settings for the multiCollectionEventing function as follows:
      //
      // Set up four (4) required keyspaces in two buckets "rr100" and "source"
      //   rr100.eventing.metadata
      //   source._default._default
      //   source.myscope.mycol1
      //   source.myscope.mycol2
      //
      // Version 7.1.1+
      //   "Function Scope"
      //     *.* (or try source.* if non-privileged)
      //   "Listen to Location"
      //     source.*.*
      //   "Eventing Storage"
      //     rr100.eventing.metadata
      //   Create four (4) Bindings
      //       "binding type", "alias name...", "bucket.scope.collection",     "Access"
      //       ---------------------------------------------------------------------------
      //       "bucket alias", "alias_ro",      "source.myscope.*",            "read only"
      //       "bucket alias", "alias_rw",      "source.myscope.*",            "read and write"
      //   Deploy the Function
      //   Create the following three (3) documents one at a time and inspect the Application log each time
      //       "bucket.scope.collection"   KEY     DATA
      //       ---------------------------------------------------------------------------
      //       source._default._default    doc0    {"data": "doc0"}
      //       source.myscope.mycol1       doc1    {"data": "doc1"}
      //       source.myscope.mycol2       doc2    {"data": "doc2"}
      
      function OnUpdate(doc, meta) {
          log('>>>A IN',doc, meta);
      
          // TEST GET with hardcode keyspace
          var res1 = couchbase.get(alias_ro,{"id": "doc2", "keyspace":
              {"bucket_name": "source","scope_name": "myscope","collection_name": "mycol2"}});
          log('>>>B fixed read',"res1", res1);
      
          // Protect against reading from something outside the alias
          if (meta.keyspace.scope_name == "myscope") {
              // TEST GET with keyspace from meta
              var res2 = couchbase.get(alias_ro,meta);
              log('>>>C read using passed meta (must be myscope)',"res2", res2);
      
              if (meta.keyspace.collection_name == "mycol2") {
                  // TEST UPSERT with hardcode keyspace
                  // Add a field to the document read in res1
                  res1.doc.random1 = Math.random();
                  var res3 = couchbase.upsert(alias_rw,{"id": "doc2", "keyspace":
                      {"bucket_name": "source","scope_name": "myscope","collection_name": "mycol2"}}, res1.doc)
                  log('>>>D upsert',"res3", res3);
      
                  // TEST REPLACE with hardcode keyspace
                  res1.doc.random2 = Math.random();
                  var res4 = couchbase.replace(alias_rw,{"id": "doc2", "keyspace":
                      {"bucket_name": "source","scope_name": "myscope","collection_name": "mycol2"}}, res1.doc)
                  log('>>>E replace',"res4", res4);
      
                  // TEST GET with hardcode keyspace
                  var res5 = couchbase.get(alias_rw,meta);
                  log('>>>F get (show added fields)',"res5", res5);
      
                  // TEST DELETE with hardcode keyspace (so the insert can be tested)
                  var res6 = couchbase.delete(alias_rw,{"id": "doc2", "keyspace":
                      {"bucket_name": "source","scope_name": "myscope","collection_name": "mycol2"}})
                  log('>>>G delete',"res6", res6);
      
                  // TEST INSERT with hardcode keyspace
                  // Remove the added items
                  delete res1.doc.random1;
                  delete res1.doc.random2;
                  var res7 = couchbase.insert(alias_rw,{"id": "doc2", "keyspace":
                      {"bucket_name": "source","scope_name": "myscope","collection_name": "mycol2"}}, res1.doc)
                  log('>>>H upsert',"res7", res7);
              }
          }
      }

      Create a test document set of 3 documents using the Query Editor to insert the data items. You do not need an Index.

      Add one test document at a time.

        UPSERT INTO source._default._default (KEY,VALUE) VALUES ( "doc0",  {"data": "doc0"} );
      
        UPSERT INTO source.myscope.mycol1 (KEY,VALUE) VALUES ( "doc1",  {"data": "doc1"} );
      
        UPSERT INTO source.myscope.mycol2 (KEY,VALUE) VALUES ( "doc2",  {"data": "doc2"} );
      LOG/OUTPUT: KEY doc0
      
      2022-07-28T17:02:04.599-07:00 [INFO] ">>>B fixed read" "res1" {"error":{"code":1,"name":"LCB_KEY_ENOENT","desc":"The document key does not exist on the server","key_not_found":true},"success":false}
      
      2022-07-28T17:02:04.597-07:00 [INFO] ">>>A IN" {"data":"doc0"} {"cas":"1659052924529868800","id":"doc0","expiration":0,"flags":0,"vb":642,"seq":6,"datatype":"json","keyspace":{"bucket_name":"source","scope_name":"_default","collection_name":"_default"},"cid":0}
      
      LOG/OUTPUT: KEY doc1
      
      2022-07-28T17:03:52.902-07:00 [INFO] ">>>C read using passed meta (must be myscope)" "res2" {"doc":{"data":"doc1"},"meta":{"id":"doc1","cas":"1659053032885387264","datatype":"json"},"success":true}
      
      2022-07-28T17:03:52.901-07:00 [INFO] ">>>B fixed read" "res1" {"error":{"code":1,"name":"LCB_KEY_ENOENT","desc":"The document key does not exist on the server","key_not_found":true},"success":false}
      
      2022-07-28T17:03:52.898-07:00 [INFO] ">>>A IN" {"data":"doc1"} {"cas":"1659053032885387264","id":"doc1","expiration":0,"flags":0,"vb":389,"seq":9,"datatype":"json","keyspace":{"bucket_name":"source","scope_name":"myscope","collection_name":"mycol1"},"cid":8}
      
      LOG/OUTPUT: KEY doc2
      
      Function Log - multiCollectionEventing
      2022-07-28T17:04:37.807-07:00 [INFO] ">>>H upsert" "res7" {"meta":{"id":"doc2","cas":"1659053077807104000"},"success":true}
      
      2022-07-28T17:04:37.806-07:00 [INFO] ">>>G delete" "res6" {"meta":{"id":"doc2","cas":"1659053077806055424"},"success":true}
      
      2022-07-28T17:04:37.805-07:00 [INFO] ">>>F get (show added fields)" "res5" {"doc":{"data":"doc2","random1":0.7875783578859457,"random2":0.47914947531399843},"meta":{"id":"doc2","cas":"1659053077803827200","datatype":"json"},"success":true}
      
      2022-07-28T17:04:37.804-07:00 [INFO] ">>>E replace" "res4" {"meta":{"id":"doc2","cas":"1659053077803827200"},"success":true}
      
      2022-07-28T17:04:37.803-07:00 [INFO] ">>>D upsert" "res3" {"meta":{"id":"doc2","cas":"1659053077802516480"},"success":true}
      
      2022-07-28T17:04:37.800-07:00 [INFO] ">>>C read using passed meta (must be myscope)" "res2" {"doc":{"data":"doc2"},"meta":{"id":"doc2","cas":"1659053077704474624","datatype":"json"},"success":true}
      
      2022-07-28T17:04:37.799-07:00 [INFO] ">>>B fixed read" "res1" {"doc":{"data":"doc2"},"meta":{"id":"doc2","cas":"1659053077704474624","datatype":"json"},"success":true}
      
      2022-07-28T17:04:37.797-07:00 [INFO] ">>>A IN" {"data":"doc2"} {"cas":"1659053077704474624","id":"doc2","expiration":0,"flags":0,"vb":140,"seq":38,"datatype":"json","keyspace":{"bucket_name":"source","scope_name":"myscope","collection_name":"mycol2"},"cid":9}