Trying to understand async/await in lambda

0

Thanks to some great help here I have made some progress un understanding the problem I'm trying to solve, but I'm still missing something fundamental.

My use case is an existing lambda-based rules engine, written in Node. The rules in a set generally use results from evaluation of earlier rules, so executing them in sequence, synchronously, makes most sense. I can see how it might be possible to fire off separate asynchronous function calls for everything and then somehow clear up the mess afterwards, potentially re-calling those that failed due to missing data but this would be a nightmare.

So what I'm trying to do is insert an async call in what is currently an exclusively synchronous architecture, and I'm not even sure if this is possible in JS/Node. Any advice gratefully appreciated! This might be a Javascript or even a Node.js question rather than an AWS Lambda one, but from my research the way that asynchronous functions are scheduled does seem to be unique to this combination so I thought I'd start here!

It would be great to get to the bottom of this though ... I'd feel less dumb :)

This is a stripped-back example that shows what I'd like to achieve, inserting an async call to ChatGPT:

const { Configuration, OpenAIApi } = require("openai");

exports.handler = async function(event, context, callback) {

    var output;

	try {
		output = await makeSentence(event.prompt);
		console.log('output: ' + output);
	}
	catch (e){
		output = e.message;
	}

	let response = {
		statusCode: 200,
		headers: { "Content-type" : "application/json" },
		body: JSON.stringify(output)
	};

	return response;
};

async function makeSentence(prompt)
{
	const rule1 = getOpening();                     // existing sync call
	const rule2 = await getMiddle(prompt);          // new async call
	const rule3 = getClosing(rule1 + ' ' + rule2);  // existing sync call
	
        // assemble all the results and return the final result
	return rule3;
}

function getOpening()
{
	const greeting = [ 'Hello', 'Hi', 'Greetings'];
	const random = Math.floor(Math.random() * greeting.length);
	
	return '<sentence>' + greeting[random];
}

async function getMiddle(prompt)
{
    console.log("getMiddle('" + prompt + "')");
    
	const configuration = new Configuration({
		apiKey: "my-api-key-here",
	});
	const openai = new OpenAIApi(configuration);

	await openai.createCompletion({
			model: "text-davinci-003",
			prompt: prompt,
			temperature: 0.7,
			max_tokens: 500,
			top_p: 1,
			frequency_penalty: 0,
			presence_penalty: 0,
	}).then(res => {
        var text = res.data.choices[0].text;
        console.log('got response: ' + text);
        return text;
    }).catch(error => {
        console.log('Error: ' + error.message);
        return error.message;
    });
}

function getClosing(resp)
{
	return resp + '</sentence>';
}

The result of this is:

Event JSON

{
  "prompt": "what did the fox say?"
}

Response

{
  "statusCode": 200,
  "headers": {
    "Content-type": "application/json"
  },
  "body": "\"<sentence>Greetings undefined</sentence>\""
}

Function Logs

START RequestId: c7153471-1563-430d-889a-cb394737b5dd Version: $LATEST
2023-04-26T17:16:44.120Z	c7153471-1563-430d-889a-cb394737b5dd	INFO	getMiddle('what did the fox say?')
2023-04-26T17:16:46.119Z	c7153471-1563-430d-889a-cb394737b5dd	INFO	got response: 

The fox usually says, "Ring-ding-ding-ding-dingeringeding!"
2023-04-26T17:16:46.119Z	c7153471-1563-430d-889a-cb394737b5dd	INFO	getClosing("<sentence>Hi undefined")
2023-04-26T17:16:46.119Z	c7153471-1563-430d-889a-cb394737b5dd	INFO	output: <sentence>Hi undefined</sentence>
END RequestId: c7153471-1563-430d-889a-cb394737b5dd
REPORT RequestId: c7153471-1563-430d-889a-cb394737b5dd	Duration: 2039.43 ms	Billed Duration: 2040 ms	Memory Size: 128 MB	Max Memory Used: 77 MB
dmb0058
asked a year ago582 views
3 Answers
0
Accepted Answer

I found the answer ...

In case it helps anyone, the code below mixes synchronous and async function calls in sequence, waiting for the async one to complete before continuing to the next function.

const { Configuration, OpenAIApi } = require("openai");

exports.handler = async function(event, context, callback) {

    var output;

    try {
	const rule1 = getStart();                                    // call the first (sync) function

        const rule2 = await getMiddle(event.prompt).then(res => {    // call the second (async) function and wait
                return res.data.choices[0].text;
    	    }).catch(error => {
                return error.message;
            });
        
        output = getEnd(rule1 + ' ' + rule2);                        // call the third (sync) function
        console.log('result: ' + output);
    }
    catch (e){
	output = e.message;
    }

    let response = {
	statusCode: 200,
	headers: { "Content-type" : "application/json" },
        body: JSON.stringify(output)
    };

    return response;
};

function getStart()
{
    return '<sentence>Hi there,';
}

async function getMiddle(prompt)
{
    console.log("getMiddle('" + prompt + "')");

    const configuration = new Configuration({
        apiKey: "my-api-key-here",
    });
    const openai = new OpenAIApi(configuration);

    return openai.createCompletion({
            model: "text-davinci-003",
            prompt: prompt,
            temperature: 0.7,
            max_tokens: 500,
            top_p: 1,
            frequency_penalty: 0,
            presence_penalty: 0,
    });
}

function getEnd(resp)
{
    return resp + '</sentence>';
}

Response

{
  "statusCode": 200,
  "headers": {
    "Content-type": "application/json"
  },
  "body": "\"<sentence>Hi there, \\n\\nThe fox usually makes a sound similar to \\\"Skrr! Skrr!\\\"</sentence>\""
}

Function Logs

START RequestId: 41cc46f8-9a9b-4500-854e-574739511e2e Version: $LATEST
2023-04-27T10:55:22.820Z	41cc46f8-9a9b-4500-854e-574739511e2e	INFO	getMiddle('what did the fox say?')
2023-04-27T10:55:25.040Z	41cc46f8-9a9b-4500-854e-574739511e2e	INFO	result: <sentence>Hi there, 

The fox usually makes a sound similar to "Skrr! Skrr!"</sentence>
END RequestId: 41cc46f8-9a9b-4500-854e-574739511e2e
dmb0058
answered a year ago
0

Hi,

I think your issue is due to that in getMiddle function, if you use await, you do not need to chain or with a then statement.

You could just say

Const res = await openai.createCompletion(…)

See below as example: https://levelup.gitconnected.com/build-your-personal-speaker-assistant-with-amplify-and-chatgpt-8b6433fea042

Hope it helps ;)

profile picture
EXPERT
answered a year ago
0

Hmm, I wondered about that, but it doesn't seem to make a difference.

I read your excellent article on Amplify and ChatGPT, and I thought I understood most of it - I'm still studying and learning :) The example that you gave to my related question worked perfectly, with no synchronous calls in the chain. However when I tried to integrate async and sync functions the async ones behave synchronously (actually I think they just return a Promise and the caller continues regardless).

I don't understand (mea culpa) why the call to getMiddle() in makeSentence() doesn't seem to wait, it sends the request and then goes straight to the getClosing() call ... I suspect the problem is something to do with how async functions are off-boarded to the Node environment, but I'm just guessing here.

dmb0058
answered a year ago

You are not logged in. Log in to post an answer.

A good answer clearly answers the question and provides constructive feedback and encourages professional growth in the question asker.

Guidelines for Answering Questions