diff --git a/README.md b/README.md
index dcbe4ae84532c16b4bbcc328fbbe0a0dca5a1151..4f174ae7225af65038f10559ce33981b812f45a9 100644
--- a/README.md
+++ b/README.md
@@ -297,3 +297,35 @@ SHELL_PLUS_POST_IMPORTS = (
     ('comments.factories', ('CommentFactory')),
 )
 ```
+
+## Metacore (still in development)
+The Metacore app for citables, sourced - for now only - from Crossref, is available at /metacore.
+In order to get it running on the server (right now implemented on staging), the following things need to be running:
+
+First of all the Mongo daemon:
+```bash
+/home/scipoststg/webapps/mongo/mongodb-linux-x86_64-amazon-3.6.3/bin/mongod --auth --dbpath /home/scipoststg/webapps/mongo/data --port 21145 --logpath /home/scipoststg/webapps/scipost/logs/mongod.log --fork
+```
+
+The tasks that involve large requests from CR are supposed to run in the background. For this to work, Celery is required. The following commands assume that you are in the `scipost_v1` main folder, inside the right virtual environment.
+
+Celery depends on a broker, for which we use RabbitMQ. Start it with
+```bash
+nohup rabbitmq-server > ../logs/rabbitmq.log 2>&1 &
+```
+
+Then the Celery worker itself:
+```bash
+nohup celery -A SciPost_v1 worker --loglevel=info -E > ../logs/celery_worker.log 2>&1 &
+```
+
+And finally `beat`, which enables setting up periodic tasks:
+```bash
+nohup celery -A SciPost_v1 beat --loglevel=info --scheduler django_celery_beat.schedulers:DatabaseScheduler > ../logs/celery_beat.log 2>&1 &
+```
+
+Note: on the staging server, these commands are contained in two shell scripts in the `scipoststg` home folder. Just run
+```bash
+./start_mongod.sh
+./start_celery.sh
+```
diff --git a/metacore/services.py b/metacore/services.py
index 578c4fb13eeaa9e7734162d7357a497d78f15bb8..77186d22bad946eb42649ac4f7897778f6695c76 100644
--- a/metacore/services.py
+++ b/metacore/services.py
@@ -82,12 +82,9 @@ def import_journal(issn, cursor='*', from_index_date=None):
         # Mass insert in database (will fail on encountering existing documents
         # with same DOI
         if citables:
-            if from_index_date:
-                operations = [obj.to_UpdateOne() for obj in serialized_objects]
-                col = Citable._get_collection()
-                col.bulk_write(operations, ordered=False)
-            else:
-                Citable.objects.insert(citables, {'ordered': False})
+            operations = [obj.to_UpdateOne() for obj in serialized_objects]
+            col = Citable._get_collection()
+            col.bulk_write(operations, ordered=False)
 
         # Save current count so progress can be tracked in the admin page
         # TODO: make this work (currently only executed after whole import