Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue 3489 - Upgrade to Java 17 #3499

Merged
merged 26 commits into from
Oct 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
5fbc1b2
Upgrade to Java 17 in Maven build
patchwork01 Oct 16, 2024
a417c7c
Remove Java 11 from EMR Serverless Dockerfile
patchwork01 Oct 16, 2024
5272c1c
Upgrade to Java 17 in EKS Dockerfiles
patchwork01 Oct 16, 2024
0c38a47
Upgrade to Java 17 in ECS task Dockerfiles
patchwork01 Oct 16, 2024
3e98ee5
Upgrade lambda runtime
patchwork01 Oct 16, 2024
1f6ba20
Upgrade Ubuntu in CLI
patchwork01 Oct 16, 2024
9c05431
Fix Python install in builder image after Ubuntu upgrade
patchwork01 Oct 16, 2024
65b127a
Install Amazon Corretto JDK in CLI
patchwork01 Oct 16, 2024
f85eaf2
Log time taken for each system test
patchwork01 Oct 16, 2024
9906a96
Delete conflicting user & group
patchwork01 Oct 16, 2024
d73741b
Move ubuntu user deletion to dependencies image
patchwork01 Oct 16, 2024
871ce66
Merge branch '2000-upgrade-emr' into 3489-upgrade-java
patchwork01 Oct 16, 2024
3592062
Upgrade Java runtime in build uptime lambda
patchwork01 Oct 17, 2024
78b2e0f
Merge branch '2000-upgrade-emr' into 3489-upgrade-java
patchwork01 Oct 17, 2024
24db472
Adjust JAVA_HOME for EMR
patchwork01 Oct 17, 2024
a4d433d
Fix EmrServerlessPlatformExecutorIT
patchwork01 Oct 17, 2024
9b3dad2
Set add-opens in ECS Docker images
patchwork01 Oct 17, 2024
7d5775d
Add EKS logging namespace
patchwork01 Oct 18, 2024
d53f96d
Configure FluentBit logging in EKS
patchwork01 Oct 18, 2024
21b8af4
Pre-create log group for EKS
patchwork01 Oct 18, 2024
a90229a
Adjust FluentBit config, comment
patchwork01 Oct 18, 2024
b57df79
Use emr-on-eks Docker image for EKS
patchwork01 Oct 21, 2024
d0b12bd
Revert "Use emr-on-eks Docker image for EKS"
patchwork01 Oct 21, 2024
4a9daa7
Override Spark jars in Docker image for EKS
patchwork01 Oct 21, 2024
121e54e
Comment explaining Spark jars
patchwork01 Oct 21, 2024
0c35879
Manage version of OkHttp logging-interceptor
patchwork01 Oct 21, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
*/
public class ConfigurationUtils {

private static final String JAVA_HOME = "/usr/lib/jvm/java-11-amazon-corretto.%s";
private static final String JAVA_HOME = "/usr/lib/jvm/java-17-amazon-corretto.%s";

private ConfigurationUtils() {
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
ARG BUILDER_IMAGE_NAME=maven
ARG BUILDER_IMAGE_TAG=3.8-openjdk-8-slim
ARG BUILDER_IMAGE_TAG=3.9-amazoncorretto-17-al2023

ARG BASE_IMAGE_NAME=amazoncorretto
ARG BASE_IMAGE_TAG=11-al2023-headless
ARG BASE_IMAGE_TAG=17-al2023-headless

ARG SPARK_VERSION=3.5.1
ARG HADOOP_VERSION=3.3.6
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.

FROM apache/spark:3.5.1-scala2.12-java11-ubuntu
FROM apache/spark:3.5.1-scala2.12-java17-ubuntu

ENV PATH="$PATH:/opt/spark/bin"
USER root
RUN rm /opt/spark/jars/*
RUN mkdir /opt/spark/workdir
USER spark

# Replace Spark jars with versions managed by Sleeper (the version of Spark must match)
COPY ./spark/* /opt/spark/jars
COPY ./bulk-import-runner.jar /opt/spark/workdir
66 changes: 66 additions & 0 deletions java/bulk-import/bulk-import-eks/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 2022-2024 Crown Copyright
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>sleeper</groupId>
<artifactId>bulk-import</artifactId>
<version>0.26.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>

<artifactId>bulk-import-eks</artifactId>

<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_${scala.version}</artifactId>
<version>${spark.version}</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-kubernetes_${scala.version}</artifactId>
<version>${spark.version}</version>
<scope>runtime</scope>
</dependency>
</dependencies>

<build>
<plugins>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<!-- The Spark Docker image uses an old version of Hadoop, which is not compatible with the -->
<!-- version used in EMR. -->
<!-- This outputs the jars required to run Spark in EKS, to completely replace the jars -->
<!-- in the Spark Docker image with versions managed by Sleeper. -->
<!-- The version of Spark must match between the base image and Sleeper. -->
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/spark</outputDirectory>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,7 @@
FROM public.ecr.aws/emr-serverless/spark/emr-7.2.0:latest

USER root

# Install JDK 11
RUN yum update -y && \
yum install java-11-amazon-corretto -y && \
mkdir /workdir

RUN mkdir /workdir
COPY ./bulk-import-runner.jar /workdir

# EMR Severless will run the image as hadoop
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void shouldRunAServerlessJob(WireMockRuntimeInfo runtimeInfo) {
assertThatJson(body)
.inPath("$.jobDriver.sparkSubmit.sparkSubmitParameters").asString()
.startsWith("--class BulkImportClass ")
.contains(" --conf spark.executorEnv.JAVA_HOME=/usr/lib/jvm/java-11-amazon-corretto.x86_64 ");
.contains(" --conf spark.executorEnv.JAVA_HOME=/usr/lib/jvm/java-17-amazon-corretto.x86_64 ");
});
}

Expand Down
1 change: 1 addition & 0 deletions java/bulk-import/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,6 @@
<module>bulk-import-common</module>
<module>bulk-import-runner</module>
<module>bulk-import-starter</module>
<module>bulk-import-eks</module>
</modules>
</project>
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import software.amazon.awscdk.services.lambda.Code;
import software.amazon.awscdk.services.lambda.Function;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
import software.constructs.Construct;

import sleeper.environment.cdk.config.AppContext;
Expand All @@ -31,7 +32,6 @@
import java.util.Map;

import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
import static software.amazon.awscdk.services.lambda.Runtime.JAVA_11;

public class BuildUptimeDeployment {
public static final OptionalStringParameter LAMBDA_JAR = AppParameters.BUILD_UPTIME_LAMBDA_JAR;
Expand All @@ -48,7 +48,7 @@ public BuildUptimeDeployment(Construct scope) {
.code(Code.fromAsset(lambdaJarPath))
.functionName("sleeper-" + context.get(INSTANCE_ID) + "-build-uptime")
.description("Start and stop EC2 instances and schedule rules")
.runtime(JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(1024)
.timeout(Duration.minutes(10))
.handler("sleeper.build.uptime.lambda.BuildUptimeLambda::handleRequest")
Expand Down
2 changes: 1 addition & 1 deletion java/cdk/src/main/java/sleeper/cdk/stack/AthenaStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ private IFunction createConnector(
.functionName(functionName)
.memorySize(memory)
.timeout(Duration.seconds(timeout))
.runtime(Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.handler(className)
.environment(env));
Expand Down
10 changes: 5 additions & 5 deletions java/cdk/src/main/java/sleeper/cdk/stack/CompactionStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
import software.amazon.awscdk.services.iam.PolicyStatement;
import software.amazon.awscdk.services.lambda.CfnPermission;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
import software.amazon.awscdk.services.lambda.eventsources.SqsEventSource;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
Expand Down Expand Up @@ -138,7 +139,6 @@
import static sleeper.core.properties.instance.CompactionProperty.COMPACTION_TASK_CPU_ARCHITECTURE;
import static sleeper.core.properties.instance.CompactionProperty.COMPACTION_TASK_CREATION_PERIOD_IN_MINUTES;
import static sleeper.core.properties.instance.CompactionProperty.ECR_COMPACTION_REPO;
import static software.amazon.awscdk.services.lambda.Runtime.JAVA_11;

/**
* Deploys the resources needed to perform compaction jobs. Specifically, there is:
Expand Down Expand Up @@ -271,7 +271,7 @@ private void lambdaToCreateCompactionJobsBatchedViaSQS(
IFunction triggerFunction = jobCreatorJar.buildFunction(this, "CompactionJobsCreationTrigger", builder -> builder
.functionName(triggerFunctionName)
.description("Create batches of tables and send requests to create compaction jobs for those batches")
.runtime(JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
.handler("sleeper.compaction.job.creation.lambda.CreateCompactionJobsTriggerLambda::handleRequest")
Expand All @@ -282,7 +282,7 @@ private void lambdaToCreateCompactionJobsBatchedViaSQS(
IFunction handlerFunction = jobCreatorJar.buildFunction(this, "CompactionJobsCreationHandler", builder -> builder
.functionName(functionName)
.description("Scan the state stores of the provided tables looking for compaction jobs to create")
.runtime(JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(COMPACTION_JOB_CREATION_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(COMPACTION_JOB_CREATION_LAMBDA_TIMEOUT_IN_SECONDS)))
.handler("sleeper.compaction.job.creation.lambda.CreateCompactionJobsLambda::handleRequest")
Expand Down Expand Up @@ -594,7 +594,7 @@ private IFunction lambdaForCustomTerminationPolicy(CoreStacks coreStacks, Lambda
.handler("sleeper.compaction.task.creation.SafeTerminationLambda::handleRequest")
.logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.memorySize(512)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.timeout(Duration.seconds(10)));

coreStacks.grantReadInstanceConfig(handler);
Expand All @@ -619,7 +619,7 @@ private void lambdaToCreateCompactionTasks(
IFunction handler = taskCreatorJar.buildFunction(this, "CompactionTasksCreator", builder -> builder
.functionName(functionName)
.description("If there are compaction jobs on queue create tasks to run them")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(TASK_RUNNER_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TASK_RUNNER_LAMBDA_TIMEOUT_IN_SECONDS)))
.handler("sleeper.compaction.task.creation.RunCompactionTasksLambda::eventHandler")
Expand Down
4 changes: 4 additions & 0 deletions java/cdk/src/main/java/sleeper/cdk/stack/CoreStacks.java
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ public ILogGroup getLogGroupByStateMachineId(String id) {
return loggingStack.getLogGroupByStateMachineId(id);
}

public ILogGroup getLogGroupByEksClusterName(String clusterName) {
return loggingStack.getLogGroupByEksClusterName(clusterName);
}

public void grantReadInstanceConfig(IGrantable grantee) {
configBucketStack.grantRead(grantee);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ public GarbageCollectorStack(
IFunction triggerFunction = gcJar.buildFunction(this, "GarbageCollectorTrigger", builder -> builder
.functionName(triggerFunctionName)
.description("Creates batches of Sleeper tables to perform garbage collection for and puts them on a queue to be processed")
.runtime(Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.handler("sleeper.garbagecollector.GarbageCollectorTriggerLambda::handleRequest")
.environment(Utils.createDefaultEnvironment(instanceProperties))
.reservedConcurrentExecutions(1)
Expand All @@ -95,7 +95,7 @@ public GarbageCollectorStack(
IFunction handlerFunction = gcJar.buildFunction(this, "GarbageCollectorLambda", builder -> builder
.functionName(functionName)
.description("Scan the state store looking for files that need deleting and delete them")
.runtime(Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(GARBAGE_COLLECTOR_LAMBDA_MEMORY_IN_MB))
.timeout(handlerTimeout)
.handler("sleeper.garbagecollector.GarbageCollectorLambda::handleRequest")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import software.amazon.awscdk.services.events.Schedule;
import software.amazon.awscdk.services.events.targets.LambdaFunction;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
import software.amazon.awscdk.services.lambda.eventsources.SqsEventSource;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
Expand Down Expand Up @@ -139,7 +140,7 @@ public IngestBatcherStack(
IFunction submitterLambda = submitterJar.buildFunction(this, "SubmitToIngestBatcherLambda", builder -> builder
.functionName(submitterName)
.description("Triggered by an SQS event that contains a request to ingest a file")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(INGEST_BATCHER_SUBMITTER_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(INGEST_BATCHER_SUBMITTER_TIMEOUT_IN_SECONDS)))
.handler("sleeper.ingest.batcher.submitter.IngestBatcherSubmitterLambda::handleRequest")
Expand All @@ -156,7 +157,7 @@ public IngestBatcherStack(
IFunction jobCreatorLambda = jobCreatorJar.buildFunction(this, "IngestBatcherJobCreationLambda", builder -> builder
.functionName(jobCreatorName)
.description("Create jobs by batching up submitted file ingest requests")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(INGEST_BATCHER_JOB_CREATION_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(INGEST_BATCHER_JOB_CREATION_TIMEOUT_IN_SECONDS)))
.handler("sleeper.ingest.batcher.job.creator.IngestBatcherJobCreatorLambda::eventHandler")
Expand Down
3 changes: 2 additions & 1 deletion java/cdk/src/main/java/sleeper/cdk/stack/IngestStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import software.amazon.awscdk.services.iam.ManagedPolicy;
import software.amazon.awscdk.services.iam.PolicyStatement;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
import software.amazon.awscdk.services.sns.Topic;
Expand Down Expand Up @@ -250,7 +251,7 @@ private void lambdaToCreateIngestTasks(CoreStacks coreStacks, Queue ingestJobQue
IFunction handler = taskCreatorJar.buildFunction(this, "IngestTasksCreator", builder -> builder
.functionName(functionName)
.description("If there are ingest jobs on queue create tasks to run them")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(TASK_RUNNER_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TASK_RUNNER_LAMBDA_TIMEOUT_IN_SECONDS)))
.handler("sleeper.ingest.starter.RunIngestTasksLambda::eventHandler")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ public KeepLambdaWarmStack(Construct scope,
IFunction handler = queryJar.buildFunction(this, "WarmQueryExecutorLambda", builder -> builder
.functionName(functionName)
.description("Sends a message to query-executor lambda in order for it to stay warm")
.runtime(Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(QUERY_PROCESSOR_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(QUERY_PROCESSOR_LAMBDA_TIMEOUT_IN_SECONDS)))
.handler("sleeper.query.lambda.WarmQueryExecutorLambda::handleRequest")
Expand Down
5 changes: 5 additions & 0 deletions java/cdk/src/main/java/sleeper/cdk/stack/LoggingStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ public LoggingStack(Construct scope, String id, InstanceProperties instancePrope
createLogGroup("bulk-import-NonPersistentEMR-start");
createLogGroup("bulk-import-PersistentEMR-start");
createLogGroup("bulk-import-eks-starter");
createLogGroup("bulk-import-eks");
createStateMachineLogGroup("EksBulkImportStateMachine");
createLogGroup("bulk-import-autodelete");
createLogGroup("bulk-import-autodelete-provider");
Expand Down Expand Up @@ -108,6 +109,10 @@ public ILogGroup getLogGroupByStateMachineId(String id) {
return getLogGroupByNameWithPrefixes(addStateMachineNamePrefixes(id));
}

public ILogGroup getLogGroupByEksClusterName(String clusterName) {
return getLogGroupByNameWithPrefixes(clusterName);
}

private ILogGroup getLogGroupByNameWithPrefixes(String nameWithPrefixes) {
return Objects.requireNonNull(logGroupByName.get(nameWithPrefixes), "No log group found: " + nameWithPrefixes);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import software.amazon.awscdk.services.events.Schedule;
import software.amazon.awscdk.services.events.targets.LambdaFunction;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
import software.amazon.awscdk.services.lambda.eventsources.SqsEventSource;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
Expand Down Expand Up @@ -188,7 +189,7 @@ private void createTriggerFunction(InstanceProperties instanceProperties, Lambda
IFunction triggerFunction = splitterJar.buildFunction(this, "FindPartitionsToSplitTriggerLambda", builder -> builder
.functionName(triggerFunctionName)
.description("Creates batches of Sleeper tables to perform partition splitting for and puts them on a queue to be processed")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
.handler("sleeper.splitter.lambda.FindPartitionsToSplitTriggerLambda::handleRequest")
Expand Down Expand Up @@ -218,7 +219,7 @@ private void createFindPartitionsToSplitFunction(InstanceProperties instanceProp
IFunction findPartitionsToSplitLambda = splitterJar.buildFunction(this, "FindPartitionsToSplitLambda", builder -> builder
.functionName(functionName)
.description("Scan the state stores of the provided tables looking for partitions that need splitting")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(FIND_PARTITIONS_TO_SPLIT_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(FIND_PARTITIONS_TO_SPLIT_TIMEOUT_IN_SECONDS)))
.handler("sleeper.splitter.lambda.FindPartitionsToSplitLambda::handleRequest")
Expand All @@ -244,7 +245,7 @@ private void createSplitPartitionFunction(InstanceProperties instanceProperties,
IFunction splitPartitionLambda = splitterJar.buildFunction(this, "SplitPartitionLambda", builder -> builder
.functionName(splitFunctionName)
.description("Triggered by an SQS event that contains a partition to split")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.runtime(Runtime.JAVA_17)
.memorySize(instanceProperties.getInt(SPLIT_PARTITIONS_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(SPLIT_PARTITIONS_TIMEOUT_IN_SECONDS)))
.reservedConcurrentExecutions(concurrency)
Expand Down
Loading
Loading