AWS setting how many long-polling concurrent requests Node.js can handle?

0

I'm setting up a new application for long-polling messages with interval of 10 sec from AWS sqs. I've tried to test it. And after 50 users that waiting their requests latency start growing and reach 15 seconds and reached 30 second with 150 users. Is it something wrong with my code or aws/node have some type of setting for it?

const port = process.env.PORT || 3001;
const express = require('express');
const app = express();
const AWS = require('aws-sdk');

AWS.config.update({region: 'eu-west-1'});

const MD5 = function(d){<md5function>}
const sleep = (waitTimeInMs) => new Promise(resolve => setTimeout(resolve, waitTimeInMs));
const SQS = new AWS.SQS({
  region: 'eu-west-1'
});
const LONG_POLL_TIMEOUT = 10;

async function checkQueue(req, res) {

  const {version, token} = req.params;
  const auth = req.query.auth;

  if (!isTokenValid(token, auth)) {
    await sleep(LONG_POLL_TIMEOUT * 1000);
    res.send()
  } else {
    getUpdateMessage(version, token, res);
  }
}

function getUpdateMessage(version, token, res) {
  const urlParams = {
    QueueName: `_version-queue-${version}-${token}`
  };

  SQS.getQueueUrl(urlParams, (urlErr, urlData) => {
    if (urlErr) {
      res.status(204).send();
    } else {
      const messageParams = {
        QueueUrl: urlData.QueueUrl,
        WaitTimeSeconds: LONG_POLL_TIMEOUT,
      };
      SQS.receiveMessage(messageParams, (err, data) => {
        if (err) {
          res.status(204).send();
        } else {
          if (data.Messages) {
            res.send(data.Messages[0].Body);
            SQS.deleteMessage({
              QueueUrl: urlData.QueueUrl,
              ReceiptHandle: data.Messages[0].ReceiptHandle
            }, (err1, data) => {
              if (err1) {
              }
            });
          } else {
            res.send();
          }
        }
      });
    }
  });
}

function isTokenValid(token, auth) {
  // check against tokens for last 14 days
  let dayNumber = Math.ceil(Date.now() / (24 * 3600 * 1000));
  for (let i = 0; i < 14; i++) {
    const stringToHash = `<string>`;
    if (MD5(stringToHash) == auth) {
      return true;
    }
    dayNumber--;
  }
  return false;
}
app.use(function(req, res, next) {
  res.header("Access-Control-Allow-Origin", "*");
  next();
});

app.get('/versions/:version/long_poll_updates/:token', function (req, res) {
  checkQueue(req, res);
});

app.get('/check', function (req, res) {
  res.send('I\'m ok!');
});

app.use((req, res) => {
  res.status(404).send("Sorry, that route doesn't exist. Have a nice day :)");
});

app.listen(port, () => {
  console.log('Server running at http://127.0.0.1:' + port + '/');
});

CPU Utilisation was less then 10 percent.

se0ga
질문됨 5년 전802회 조회
2개 답변
0

Here's the detailed documentation:

https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/node-configuring-maxsockets.html

Quote:

When using the default of https, the SDK takes the maxSockets value from the globalAgent. If the maxSockets value is not defined or is Infinity, the SDK assumes a maxSockets value of 50.

You are most likely observing this setting kicking in.

Kuba
답변함 5년 전
profile picture
전문가
검토됨 한 달 전
0

Thank you, this setting solved the problem!

se0ga
답변함 5년 전

로그인하지 않았습니다. 로그인해야 답변을 게시할 수 있습니다.

좋은 답변은 질문에 명확하게 답하고 건설적인 피드백을 제공하며 질문자의 전문적인 성장을 장려합니다.

질문 답변하기에 대한 가이드라인

관련 콘텐츠