The instance is with 64GB memory, I'm calling write_rels
as shown below, which is basically inserting about 500~1000 relations into the Neptune concurently, but i keep receiving this error
{code: "BoltProtocol.unexpectedException"} {message: "Unexpected server exception 'Operation terminated (out of memory)'"}
I would like to know why this happens and how to avoid this and insert relationships more effectively.
def write_rels(rels: list[dict], group_size = 200, concurrency= 16)->float:
print("write_rels, rels size:", len(rels), "group_size:", group_size, "concurrency:", concurrency)
d = Driver()
groups = []
for i in range(0, len(rels), group_size):
groups.append(rels[i:i+group_size])
start = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
futures = []
for data in groups:
futures.append(executor.submit(d.batch_create_rels, data))
for future in concurrent.futures.as_completed(futures):
pass
end = time.time()
return end - start
class Driver():
def __init__(self) -> None:
uri = ""
self.driver = GraphDatabase.driver(uri, auth=("username", "password"))
self.check_connectivity()
def batch_create_rels(self,data):
def merge_nodes(tx):
query = f"""
UNWIND $data AS param
MERGE (n:{label} {{id: param.id}})
ON CREATE SET n = param
ON MATCH SET n += param
"""
tx.run(query, data=data)
with self.driver.session() as session:
return session.write_transaction(merge_nodes)