By using AWS re:Post, you agree to the Terms of Use

Unanswered Questions tagged with Internet of Things

Sort by most recent
  • 1
  • 2
  • 12 / page

Browse through the questions and answers listed below or filter and sort to narrow down your results.

Error from Glue job

I got the below error from glue job Jul 19, 2022, 9:45:00 PM Pending execution 2022-07-20 01:45:13,382 main WARN JNDI lookup class is not available because this JRE does not support JNDI. JNDI string lookups will not be available, continuing configuration. java.lang.ClassNotFoundException: org.apache.logging.log4j.core.lookup.JndiLookup at java.net.URLClassLoader.findClass(URLClassLoader.java:387) at java.lang.ClassLoader.loadClass(ClassLoader.java:418) at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:352) at java.lang.ClassLoader.loadClass(ClassLoader.java:351) at java.lang.Class.forName0(Native Method) at java.lang.Class.forName(Class.java:264) at org.apache.logging.log4j.util.LoaderUtil.loadClass(LoaderUtil.java:173) at org.apache.logging.log4j.util.LoaderUtil.newInstanceOf(LoaderUtil.java:211) at org.apache.logging.log4j.util.LoaderUtil.newCheckedInstanceOf(LoaderUtil.java:232) at org.apache.logging.log4j.core.util.Loader.newCheckedInstanceOf(Loader.java:301) at org.apache.logging.log4j.core.lookup.Interpolator.<init>(Interpolator.java:95) at org.apache.logging.log4j.core.config.AbstractConfiguration.<init>(AbstractConfiguration.java:114) at org.apache.logging.log4j.core.config.DefaultConfiguration.<init>(DefaultConfiguration.java:55) at org.apache.logging.log4j.core.layout.PatternLayout$Builder.build(PatternLayout.java:430) at org.apache.logging.log4j.core.layout.PatternLayout.createDefaultLayout(PatternLayout.java:324) at org.apache.logging.log4j.core.appender.ConsoleAppender$Builder.<init>(ConsoleAppender.java:121) at org.apache.logging.log4j.core.appender.ConsoleAppender.newBuilder(ConsoleAppender.java:111) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.logging.log4j.core.config.plugins.util.PluginBuilder.createBuilder(PluginBuilder.java:158) at org.apache.logging.log4j.core.config.plugins.util.PluginBuilder.build(PluginBuilder.java:119) at org.apache.logging.log4j.core.config.AbstractConfiguration.createPluginObject(AbstractConfiguration.java:813) at org.apache.logging.log4j.core.config.AbstractConfiguration.createConfiguration(AbstractConfiguration.java:753) at org.apache.logging.log4j.core.config.AbstractConfiguration.createConfiguration(AbstractConfiguration.java:745) at org.apache.logging.log4j.core.config.AbstractConfiguration.doConfigure(AbstractConfiguration.java:389) at org.apache.logging.log4j.core.config.AbstractConfiguration.initialize(AbstractConfiguration.java:169) at org.apache.logging.log4j.core.config.AbstractConfiguration.start(AbstractConfiguration.java:181) at org.apache.logging.log4j.core.LoggerContext.setConfiguration(LoggerContext.java:446) at org.apache.logging.log4j.core.LoggerContext.reconfigure(LoggerContext.java:520) at org.apache.logging.log4j.core.LoggerContext.reconfigure(LoggerContext.java:536) at org.apache.logging.log4j.core.LoggerContext.start(LoggerContext.java:214) at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:146) at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:41) at org.apache.logging.log4j.LogManager.getContext(LogManager.java:194) at org.apache.logging.log4j.LogManager.getLogger(LogManager.java:597) at org.apache.spark.metrics.sink.MetricsConfigUtils.<clinit>(MetricsConfigUtils.java:12) at org.apache.spark.metrics.sink.MetricsProxyInfo.fromConfig(MetricsProxyInfo.java:17) at com.amazonaws.services.glue.cloudwatch.CloudWatchLogsAppenderCommon.<init>(CloudWatchLogsAppenderCommon.java:62) at com.amazonaws.services.glue.cloudwatch.CloudWatchLogsAppenderCommon$CloudWatchLogsAppenderCommonBuilder.build(CloudWatchLogsAppenderCommon.java:79) at com.amazonaws.services.glue.cloudwatch.CloudWatchAppender.activateOptions(CloudWatchAppender.java:73) at org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:307) at org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:172) at org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:104) at org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:842) at org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:768) at org.apache.log4j.PropertyConfigurator.parseCatsAndRenderers(PropertyConfigurator.java:672) at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:516) at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:580) at org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:526) at org.apache.log4j.LogManager.<clinit>(LogManager.java:127) at org.slf4j.impl.Log4jLoggerFactory.getLogger(Log4jLoggerFactory.java:81) at org.slf4j.LoggerFactory.getLogger(LoggerFactory.java:358) at org.slf4j.LoggerFactory.getLogger(LoggerFactory.java:383) at org.apache.spark.network.util.JavaUtils.<clinit>(JavaUtils.java:41) at org.apache.spark.internal.config.ConfigHelpers$.byteFromString(ConfigBuilder.scala:67) at org.apache.spark.internal.config.ConfigBuilder$$anonfun$bytesConf$1.apply(ConfigBuilder.scala:235) at org.apache.spark.internal.config.ConfigBuilder$$anonfun$bytesConf$1.apply(ConfigBuilder.scala:235) at org.apache.spark.internal.config.TypedConfigBuilder$$anonfun$transform$1.apply(ConfigBuilder.scala:101) at org.apache.spark.internal.config.TypedConfigBuilder$$anonfun$transform$1.apply(ConfigBuilder.scala:101) at org.apache.spark.internal.config.TypedConfigBuilder.createWithDefault(ConfigBuilder.scala:143) at org.apache.spark.internal.config.package$.<init>(package.scala:121) at org.apache.spark.internal.config.package$.<clinit>(package.scala) at org.apache.spark.SparkConf$.<init>(SparkConf.scala:716) at org.apache.spark.SparkConf$.<clinit>(SparkConf.scala) at org.apache.spark.SparkConf.set(SparkConf.scala:95) at org.apache.spark.SparkConf$$anonfun$loadFromSystemProperties$3.apply(SparkConf.scala:77) at org.apache.spark.SparkConf$$anonfun$loadFromSystemProperties$3.apply(SparkConf.scala:76) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) at scala.collection.immutable.HashMap$HashMap1.foreach(HashMap.scala:221) at scala.collection.immutable.HashMap$HashTrieMap.foreach(HashMap.scala:428) at scala.collection.immutable.HashMap$HashTrieMap.foreach(HashMap.scala:428) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) at org.apache.spark.SparkConf.loadFromSystemProperties(SparkConf.scala:76) at org.apache.spark.SparkConf.<init>(SparkConf.scala:71) at org.apache.spark.SparkConf.<init>(SparkConf.scala:58) at com.amazonaws.services.glue.SparkProcessLauncherPlugin$class.getSparkConf(ProcessLauncher.scala:41) at com.amazonaws.services.glue.ProcessLauncher$$anon$1.getSparkConf(ProcessLauncher.scala:78) at com.amazonaws.services.glue.ProcessLauncher.<init>(ProcessLauncher.scala:84) at com.amazonaws.services.glue.ProcessLauncher.<init>(ProcessLauncher.scala:78) at com.amazonaws.services.glue.ProcessLauncher$.main(ProcessLauncher.scala:29) at com.amazonaws.services.glue.ProcessLauncher.main(ProcessLauncher.scala) 2022-07-20 01:45:13,386 main INFO Log4j appears to be running in a Servlet environment, but there's no log4j-web module available. If you want better web container support, please add the log4j-web JAR to your web archive or server lib directory. Traceback (most recent call last): File "/opt/amazon/bin/runscript.py", line 67, in <module> runpy.run_path(script, run_name='__main__') File "/usr/lib64/python3.7/runpy.py", line 261, in run_path code, fname = _get_code_from_file(run_name, path_name) File "/usr/lib64/python3.7/runpy.py", line 236, in _get_code_from_file code = compile(f.read(), fname, 'exec') File "/tmp/oracle-postgres", line 5 <output> = glueContext.write_dynamic_frame.from_jdbc_conf(frame = <frame>, catalog_connection = "postgres-rds", connection_options = {"database" : "bm", "dbtable" : "bessemer"}, redshift_tmp_dir = args["TempDir"], transformation_ctx = "<transformation_ctx>") ^ SyntaxError: invalid syntax During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/amazon/bin/runscript.py", line 100, in <module> while "runpy.py" in new_stack.tb_frame.f_code.co_filename: AttributeError: 'NoneType' object has no attribute 'tb_frame'
0
answers
0
votes
33
views
asked 2 months ago

Greengrass v2: Operating in a Ubuntu Snap

I feel like I'm so close to this working, but I have one hurdle I can't quite overcome. I'm developing a Ubuntu Snap which contains all the runtime components we require for our greengrass deployments (node, python, java, etc.). After install, the entrypoint of my main snap daemon checks to see if a greengrass install exists at $SNAP_COMMON and if not, tries to initialize greengrass as follows: ```bash java \ -Droot="${GG_ROOT}" \ -Dlog.store=FILE \ -Djava.io.tmpdir=$GG_TMP \ -Djava.class.path=$SNAP/usr/lib/jvm/java-11-amazon-corretto/lib:/usr/lib \ -Duser.home=$HOME \ -jar $GG_ROOT/Greengrass.jar \ --component-default-user ggc_user:ggc_group \ --root $GG_ROOT \ --start false \ --setup-system-service false \ --provision true \ --aws-region ${AWS_REGION:=us-east-1} \ --thing-name $THING_NAME \ --thing-group-name ${THING_GROUP:=Default} \ --thing-policy-name $THING_POLICY_NAME \ ``` * `$GG_ROOT` is set to `$SNAP_COMMON/greengrass/v2` (comes out to something like `/var/snap/my-snap/greengrass/v2`). * `$GG_ROOT/Greengrass.jar` is copied from my snap (which I include during the snap build process) * `$THING_NAME`, `$THING_GROUP`, etc. are set by sourcing an identity file installed on each server. This seems to almost work. `Greengrass.jar` extracts everything to `$GG_ROOT` except for `alts/init/distro`... For some reason that get's unpacked to `$SNAP` (so `$SNAP/bin` has `greengrass.exe`, `loader`, etc.). If I set `start` to true, it launches correctly and I even get logs at `$GG_ROOT/logs`, it's just the initial deployment goes off to lala land. I haven't tried issuing a new deployment in this configuration, but can't imagine it'd work (since there are no symlinks). Would anyone happen to have a little insight into why the initial nucleus deployment is unpacking at `$SNAP` here instead of `$GG_ROOT/packages/...` and symlinked to `$GG_ROOT/alts/init`. I feel like if I can figure out this init deployment, it should work... :crossed_fingers: **Code tracing**: I _think_ my issue can start tracing at [KernelAlternatives](https://github.com/aws-greengrass/aws-greengrass-nucleus/blob/42ae03784014a44bc3baf90054a48d7a99fb1e77/src/main/java/com/aws/greengrass/lifecyclemanager/KernelAlternatives.java#L69): ```java private Path getAltsDir() { return nucleusPaths.kernelAltsPath().toAbsolutePath(); } ``` which I think ultimately ends up somewhere around [KernelCommandLine:146](https://github.com/aws-greengrass/aws-greengrass-nucleus/blob/42ae03784014a44bc3baf90054a48d7a99fb1e77/src/main/java/com/aws/greengrass/lifecyclemanager/KernelCommandLine.java#L146) ```java kernel.getConfig().lookup("system", "rootpath").dflt(rootAbsolutePath) .subscribe((whatHappened, topic) -> initPaths(Coerce.toString(topic))); } ``` which leads to [KernelCommandLine:166](https://github.com/aws-greengrass/aws-greengrass-nucleus/blob/42ae03784014a44bc3baf90054a48d7a99fb1e77/src/main/java/com/aws/greengrass/lifecyclemanager/KernelCommandLine.java#L166). ```java private void initPaths(String rootAbsolutePath) { ... } ``` Unfortunately this doesn't really explain why the install gets split up... since `kernelAltPaths` is relative to `rootPath`, which should be getting set to `$GG_ROOT` :(
0
answers
0
votes
40
views
asked 2 months ago

Expo build - APK upload fails when using aws-cli command, via GitHub Actions but works from terminal(local)

Command used in GitHub Actions to download APK from expo: latest_build=$(npx eas-cli build:list --status="finished" --distribution="store" --json | jq '[.[] | select(.releaseChannel=="development")][1].artifacts.buildUrl') Command used in GitHub Actions for create-upload and upload in device farm response_upload_app=$(aws devicefarm create-upload --project-arn $DEV_PROJECT_ARN --name latest_build.apk --type ANDROID_APP) curl -T latest_build.apk $url_upload_app Same command when run locally in terminal when the APK is available in a folder, works perfectly fine. Also, at times, when running in local terminal, it was giving request timeout error This is the error log in GitHub Actions in when running get-upload command for the corresponding create upload arn in device farm: "metadata": "{\"errorMessageUrl\":\"https://docs.aws.amazon.com/console/devicefarm/ANDROID_APP_AAPT_DEBUG_BADGING_FAILED\",\"errorMessage\":\"We could not extract information about your Android application package. Please verify that the application package is valid by running the command \\\"aapt debug badging <path to your test package>\\\", and try again after the command does not print any error.\",\"errorCode\":\"ANDROID_APP_AAPT_DEBUG_BADGING_FAILED\"} Debugging done so far: Ran this (aapt debug badging <path to apk>/latest_build.apk ) and was able to get package information correctly
0
answers
0
votes
36
views
asked 3 months ago

Memory leak with AWS C SDK released by ESP for ESP32C3 (example OTA mqtt) on Internet disconnection and reconnection

Hi All, We are working on ESP32C3 for OTA functionality using https://github.com/espressif/esp-aws-iot/tree/release/beta/examples/ota/ota_mqtt. We understood original C SDK from AWS side updated from ESP side to make it compatible to ESP32C3. As a part of testing, ESP32C3 is always connected to the Router over WiFi (Mobile Hotspot always ON). We are toggling the internet connection to the router.(**Mobile Data turned Off and ON**) We observed on every internet reconnection, there is memory leak of ~3KB. The memory leak used to get restored. But it take ~12 Min, ~17 Min, ~23Min. In one of the captured log, original free heap of 121828 get reduced to 118620 (~3K leak) after internet restore. After ~23 min, memory leak recovered to 121740. We are following with ESP for this issue. But we are getting little slow reply from ESP side. We have following observation, i) The AWS CSDK has WiFi disconnect event. But there is no event to handle Internet disconnect as we highlighted above - **Mobile Data turned Off and ON** ii) On WiFi disconnection and reconnection, memory leak not observed **Our queries/request:** i) If possible, we request AWS to replicate above testing on ESP32 device and share result. We are also trying to build code for ESP32 - mqtt OTA released from AWS side. We are getting some error. We are working on same. ii) Provide input from AWS side for memory leak observation. That will help to work on this issue further. **Did you came across the mentioned memory leak error in past? ** iii) Provide input on Internet disconnect event as we mentioned in observation 1 Thanks....
0
answers
0
votes
17
views
asked 5 months ago
0
answers
0
votes
72
views
SUPPORT ENGINEER
asked 5 months ago
0
answers
0
votes
37
views
asked 5 months ago

Launch Announcement: AWS IoT TwinMaker is now generally available

Today, we are announcing the general availability of [AWS IoT TwinMaker](https://aws.amazon.com/iot-twinmaker/), a service that makes it easier for developers to create digital twins of real-world systems such as buildings, factories, production lines, and equipment. Customers are increasingly adopting digital twins to make better operational and strategic decisions in industries such as smart buildings, manufacturing, construction, energy, power & utilities, and more. With AWS IoT TwinMaker you now have the tools you need to build digital twins to help you monitor and improve your industrial operations. With AWS IoT TwinMaker, you can quickly get started creating digital twins of equipment, processes, and facilities by connecting data from different data sources without having to re-ingest or move the data to another location. You can use built-in data connectors for the following AWS services: [AWS IoT SiteWise](https://aws.amazon.com/iot-sitewise/) for equipment and time-series sensor data, and [Amazon Kinesis Video Streams](https://aws.amazon.com/kinesis/video-streams/) for video data. AWS IoT TwinMaker also provides a framework for you to create your own data connectors to use with other AWS or third-party data sources (such as [Amazon Timestream](https://aws.amazon.com/timestream/), Snowflake, and Siemens MindSphere). Then, you can easily access all digital twin data using the AWS IoT TwinMaker unified data access API to underlying data sources without needing to query each data source individually. AWS IoT TwinMaker AWS IoT TwinMaker lets you model your physical environment using entities (e.g. any physical asset or system like a furnace, an assembly line, or an entire factory) and its components (e.g. data connectors) and then saves you time by automatically creating a knowledge (or digital twin) graph that combines and understands the relationships among the connected data sources. As your built environment evolves, for example if you add new sources of data, you can easily update your model so your digital twin remains accurate and current. Once the digital twin graph is created, you can then visualize the data in context of the physical environment. Using AWS IoT TwinMaker, you can import existing 3D models (such as CAD files, and point cloud scans) to compose and arrange 3D scenes of a physical space and its contents (e.g. a factory and its equipment) using simple 3D composition tools. To create a spatially aware visualization of your operations, you can then add interactive video and sensor data from connected data sources, insights from connected machine learning (ML) and simulation services, and equipment maintenance records and manuals. To help developers quickly build a web-based application for end users, such as plant operators and maintenance engineers, AWS IoT TwinMaker includes a plugin for Grafana and [Amazon Managed Grafana](https://aws.amazon.com/grafana/), a fully managed service for the open source dashboard and visualization platform from Grafana Labs. AWS IoT TwinMaker is generally available in regions of US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Europe (Frankfurt), and Europe (Ireland), with more regions to come soon. To learn more visit the AWS IoT TwinMaker product page, and to find an AWS Partner to help you on your digital twin journey, visit the AWS IoT TwinMaker [partner page](https://aws.amazon.com/iot-twinmaker/partners/). Use the [AWS Management Console](https://console.aws.amazon.com/iottwinmaker) to get started, or visit our [GitHub repository](https://github.com/aws-samples/aws-iot-twinmaker-samples) for a sample digital twin application.
0
answers
1
votes
32
views
asked 5 months ago

greengrasssdk.stream_manager.exceptions.UnknownFailureException: Broken bit parity

Hi there I have some problem with stream manager in ggv1, I can fix this after I reboot the device. However I dont think this is a good solution. Do you have any suggestion where I can look into ``` [2022-04-01T09:19:31.876+11:00][INFO]-imageCamera.py:108,>>> Get frame atm hour:9 - sleep_time:11 s [2022-04-01T09:19:31.876+11:00][INFO]-imageCamera.py:112,>>> Frame size from rtsp: (720, 1280, 3) [2022-04-01T09:19:31.889+11:00][INFO]-imageCamera.py:119,>>> Resized farme: (720, 1280, 3) [2022-04-01T09:19:32.002+11:00][INFO]-imageCamera.py:144, >>>>> realite-data-image Sleep time: 60 - payload size: 189004 [2022-04-01T09:19:32.002+11:00][INFO]-Response: 1065 [2022-04-01T09:19:39.336+11:00][INFO]-lambda_runtime.py:366,Caught signal 15. Stopping runtime. [2022-04-01T09:20:22.061+11:00][INFO]-imageCamera.py:173,>> Getting hub config ... [2022-04-01T09:20:22.064+11:00][INFO]-ipc_client.py:167,Posting work for function [:function:secret_loader] to http://localhost:8000/2016-11-01/functions/arn:aws:lambfunction:secret_loader [2022-04-01T09:20:22.078+11:00][INFO]-ipc_client.py:177,Work posted with invocation id [14ce6b2c-add7-4dc0-452d-be84d2700e4e] [2022-04-01T09:20:22.078+11:00][INFO]-ipc_client.py:290,Getting work result for invocation id [14ce6b2c-add7-4dc0-452d-be84d2700e4e] from http://localhost:8000/2016-11-01/functions/arn:aws:lambda:ap:function:secret_loader [2022-04-01T09:20:26.32+11:00][INFO]-ipc_client.py:298,Got result for invocation id [14ce6b2c-add7-4dc0-452d-be84d2700e4e] [2022-04-01T09:20:26.321+11:00][INFO]-imageCamera.py:175,>> Done ... [2022-04-01T09:20:26.321+11:00][INFO]-imageCamera.py:180,Setup tranfer stream [2022-04-01T09:20:26.628+11:00][INFO]-imageCamera.py:151, >>> Stream list: ['kstream1'] [2022-04-01T09:20:27.291+11:00][ERROR]-imageCamera.py:209, >>>>> Exception while running: Broken bit parity [2022-04-01T09:20:27.291+11:00][ERROR]-Traceback (most recent call last): [2022-04-01T09:20:27.291+11:00][ERROR]- File "/greengrass/ggc/deployment/lambda/.imageCamera.14/imageCamera.py", line 182, in main [2022-04-01T09:20:27.291+11:00][ERROR]- stream_client=setup_data_stream(stream_name,kinesis_stream_name) [2022-04-01T09:20:27.291+11:00][ERROR]- File "/greengrass/ggc/deployment/lambda/.function.imageCamera.14/imageCamera.py", line 154, in setup_data_stream [2022-04-01T09:20:27.291+11:00][ERROR]- client.delete_message_stream(stream_name=stream_name) [2022-04-01T09:20:27.291+11:00][ERROR]- File "/greengrass/ggc/deployment/lambda/arn.aws.lambda.ap-southeast-Camera.14/greengrasssdk/stream_manager/streammanagerclient.py", line 448, in delete_message_stream [2022-04-01T09:20:27.291+11:00][ERROR]- return Util.sync(self._delete_message_stream(stream_name), loop=self.__loop) [2022-04-01T09:20:27.291+11:00][ERROR]- File "/greengrass/ggc/deployment/lambda/.function.imageCamera.14/greengrasssdk/stream_manager/util.py", line 28, in sync [2022-04-01T09:20:27.291+11:00][ERROR]- return asyncio.run_coroutine_threadsafe(coro, loop=loop).result() [2022-04-01T09:20:27.291+11:00][ERROR]- File "/usr/lib/python3.7/concurrent/futures/_base.py", line 432, in result [2022-04-01T09:20:27.291+11:00][ERROR]- return self.__get_result() [2022-04-01T09:20:27.291+11:00][ERROR]- File "/usr/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result [2022-04-01T09:20:27.291+11:00][ERROR]- raise self._exception [2022-04-01T09:20:27.291+11:00][ERROR]- File "/greengrass/ggc/deployment/lambda/arn.aws.lambda.ap-southeast-2era.14/greengrasssdk/stream_manager/streammanagerclient.py", line 352, in _delete_message_stream [2022-04-01T09:20:27.291+11:00][ERROR]- Util.raise_on_error_response(delete_stream_response) [2022-04-01T09:20:27.291+11:00][ERROR]- File "/greengrass/ggc/deployment/lambda/ar.imageCamera.14/greengrasssdk/stream_manager/util.py", line 148, in raise_on_error_response [2022-04-01T09:20:27.291+11:00][ERROR]- raise UnknownFailureException(response.error_message, response.status, response.request_id) [2022-04-01T09:20:27.291+11:00][INFO]-imageCamera.py:210,>>> restart module after 10s ```
0
answers
0
votes
12
views
asked 6 months ago
  • 1
  • 2
  • 12 / page