diff --git a/Dockerfile b/Dockerfile index ff1ab960380..9ef108de7e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -34,6 +34,7 @@ ENV CROMWELL_CHART_VERSION 0.2.523 ENV HAIL_BATCH_CHART_VERSION 0.2.0 ENV RSTUDIO_CHART_VERSION 0.12.0 ENV SAS_CHART_VERSION 0.17.0 +ENV JUPYTER_CHART_VERSION 0.1.0 RUN mkdir /leonardo COPY ./leonardo*.jar /leonardo @@ -56,8 +57,11 @@ RUN helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx && \ # .Files helm helper can't access files outside a chart. Hence in order to populate cert file properly, we're # pulling `terra-app-setup` locally and add cert files to the chart. As a result we need to pull all GKE -# charts locally as well so they can acess the local cert files during the helm install step, see https://helm.sh/docs/chart_template_guide/accessing_files/ +# charts locally as well so they can access the local cert files during the helm install step, see https://helm.sh/docs/chart_template_guide/accessing_files/ # Helm does not seem to support the direct installation of a chart located in OCI so let's pull it to a local directory for now. +COPY ./jupyter-0.1.0.tgz /leonardo +RUN tar -xzf /leonardo/jupyter-0.1.0.tgz -C /leonardo + RUN cd /leonardo && \ helm repo update && \ helm pull terra-app-setup-charts/terra-app-setup --version $TERRA_APP_SETUP_VERSION --untar && \ @@ -68,6 +72,7 @@ RUN cd /leonardo && \ helm pull terra-helm/rstudio --version $RSTUDIO_CHART_VERSION --untar && \ helm pull terra-helm/sas --version $SAS_CHART_VERSION --untar && \ helm pull oci://terradevacrpublic.azurecr.io/hail/hail-batch-terra-azure --version $HAIL_BATCH_CHART_VERSION --untar && \ +# helm pull terra-helm/jupyter --version $JUPYTER_CHART_VERSION --untar && \ cd / # Install https://github.com/apangin/jattach to get access to JDK tools diff --git a/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/JsonCodec.scala b/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/JsonCodec.scala index c2386a52507..1a63512fcf5 100644 --- a/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/JsonCodec.scala +++ b/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/JsonCodec.scala @@ -661,7 +661,7 @@ object JsonCodec { ComputeClass.stringToObject.get(s.toLowerCase).toRight(s"Invalid compute class ${s}") ) implicit val autopilotDecoder: Decoder[Autopilot] = - Decoder.forProduct4("computeClass", "cpuInMillicores", "memoryInGb", "ephemeralStorageInGb")(Autopilot.apply) + Decoder.forProduct2("computeClass", "ephemeralStorageInGb")(Autopilot.apply) implicit val locationDecoder: Decoder[Location] = Decoder.decodeString.map(Location) implicit val kubeClusterIdDecoder: Decoder[KubernetesClusterLeoId] = Decoder.decodeLong.map(KubernetesClusterLeoId) diff --git a/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/diskModels.scala b/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/diskModels.scala index 0469790efa0..b977f2c8038 100644 --- a/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/diskModels.scala +++ b/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/diskModels.scala @@ -129,6 +129,10 @@ object FormattedBy extends Enum[FormattedBy] { override def asString: String = "CROMWELL" } + final case object Jupyter extends FormattedBy { + override def asString: String = "JUPYTER" + } + final case object Allowed extends FormattedBy { override def asString: String = "ALLOWED" } diff --git a/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/kubernetesModels.scala b/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/kubernetesModels.scala index 0d7213473b8..52bf6f9497e 100644 --- a/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/kubernetesModels.scala +++ b/core/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/kubernetesModels.scala @@ -364,6 +364,10 @@ object AppType { override def toString: String = "HAIL_BATCH" } + case object Jupyter extends AppType { + override def toString: String = "JUPYTER" + } + // See more context in https://docs.google.com/document/d/1RaQRMqAx7ymoygP6f7QVdBbZC-iD9oY_XLNMe_oz_cs/edit case object Allowed extends AppType { override def toString: String = "ALLOWED" @@ -377,16 +381,18 @@ object AppType { def stringToObject: Map[String, AppType] = values.map(v => v.toString -> v).toMap /** - * Disk formatting for an App. Currently, only Galaxy, RStudio and Custom app types + * Disk formatting for an App. Currently, only Galaxy, RStudio, Jupyter and Custom app types * support disk management. So we default all other app types to Cromwell, * but the field is unused. */ def appTypeToFormattedByType(appType: AppType): FormattedBy = appType match { case Galaxy => FormattedBy.Galaxy + case Jupyter => FormattedBy.Jupyter case Custom => FormattedBy.Custom case Allowed => FormattedBy.Allowed case Cromwell | Wds | HailBatch | WorkflowsApp | CromwellRunnerApp => FormattedBy.Cromwell + } } @@ -439,9 +445,9 @@ final case class App(id: AppId, descriptorPath: Option[Uri], extraArgs: List[String], sourceWorkspaceId: Option[WorkspaceId], - numOfReplicas: Option[Int], autodelete: Autodelete, - autopilot: Option[Autopilot], + autopilot: Boolean, + computeProfile: ComputeProfile, bucketNameToMount: Option[GcsBucketName] ) { @@ -598,7 +604,13 @@ object ComputeClass { val stringToObject = values.map(v => v.toString.toLowerCase -> v).toMap } final case class Autodelete(autodeleteEnabled: Boolean, autodeleteThreshold: Option[AutodeleteThreshold]) -final case class Autopilot(computeClass: ComputeClass, cpuInMillicores: Int, memoryInGb: Int, ephemeralStorageInGb: Int) + +final case class ComputeProfile(numOfReplicas: Option[Int], + cpuInMi: Option[Int], + memoryInGb: Option[Int], + computeClass: Option[ComputeClass], + ephemeralStorageInGb: Option[Int] +) final case class UpdateAppTableId(value: Long) extends AnyVal final case class UpdateAppJobId(value: UUID) extends AnyVal diff --git a/http/src/main/resources/leo.conf b/http/src/main/resources/leo.conf index 94c0c0d4a5f..a24df70239a 100644 --- a/http/src/main/resources/leo.conf +++ b/http/src/main/resources/leo.conf @@ -184,6 +184,10 @@ azure { enabled = ${?HAIL_BATCH_APP_ENABLED} } + jupyter-app-config { + enabled = ${?JUPYTER_APP_ENABLED} + } + coa-app-config { instrumentation-enabled = ${?COA_INSTRUMENTATION_ENABLED} database-enabled = ${?COA_DATABASE_ENABLED} diff --git a/http/src/main/resources/reference.conf b/http/src/main/resources/reference.conf index 68e9041b60b..6157264ce0e 100644 --- a/http/src/main/resources/reference.conf +++ b/http/src/main/resources/reference.conf @@ -455,6 +455,23 @@ azure { chart-versions-to-exclude-from-updates = [] } + jupyter-app-config { + chart-name = "/leonardo/jupyter" // TODO (LM) this should be terra-helm/jupyter + chart-version = "0.1.0" + release-name-suffix = "jupyter-rls" + namespace-name-suffix = "jupyter-ns" + ksa-name = "jupyter-ksa" + services = [ + { + name = "jupyter" + kind = "ClusterIP" + } + ] + enabled = true + # App developers - Please keep the list of non-backward compatible versions in the list below + chart-versions-to-exclude-from-updates = [] + } + # App types which are allowed to launch with WORKSPACE_SHARED access scope. allowed-shared-apps = [ "WDS", diff --git a/http/src/main/resources/swagger/api-docs.yaml b/http/src/main/resources/swagger/api-docs.yaml index aaa6ea003fe..57b731dbc03 100644 --- a/http/src/main/resources/swagger/api-docs.yaml +++ b/http/src/main/resources/swagger/api-docs.yaml @@ -2988,6 +2988,7 @@ components: - ALLOWED - WORKFLOWS_APP - CROMWELL_RUNNER_APP + - JUPYTER AllowedChartName: type: string enum: diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/AppInstall.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/AppInstall.scala index 36faa19632b..e6ad0d17b71 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/AppInstall.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/AppInstall.scala @@ -15,7 +15,8 @@ import org.broadinstitute.dsde.workbench.leonardo.{ LandingZoneResources, ManagedIdentityName, WorkspaceId, - WsmControlledDatabaseResource + WsmControlledDatabaseResource, + WsmControlledResourceId } import org.broadinstitute.dsp.Values import org.http4s.Uri @@ -34,6 +35,10 @@ trait AppInstall[F[_]] { /** Checks status of the app. */ def checkStatus(baseUri: Uri, authHeader: Authorization)(implicit ev: Ask[F, AppContext]): F[Boolean] + +// /** Checks status of the app. */ +// def checkStatus(cloudContext: CloudContext, runtimeName: RuntimeName)(implicit ev: Ask[F, AppContext]): F[Boolean] + } object AppInstall { @@ -43,13 +48,15 @@ object AppInstall { cromwellAppInstall: CromwellAppInstall[F], workflowsAppInstall: WorkflowsAppInstall[F], hailBatchAppInstall: HailBatchAppInstall[F], - cromwellRunnerAppInstall: CromwellRunnerAppInstall[F] + cromwellRunnerAppInstall: CromwellRunnerAppInstall[F], + jupyterAppInstall: JupyterAppInstall[F] ): AppType => AppInstall[F] = _ match { case AppType.Wds => wdsAppInstall case AppType.Cromwell => cromwellAppInstall case AppType.WorkflowsApp => workflowsAppInstall case AppType.HailBatch => hailBatchAppInstall case AppType.CromwellRunnerApp => cromwellRunnerAppInstall + case AppType.Jupyter => jupyterAppInstall case e => throw new IllegalArgumentException(s"Unexpected app type: ${e}") } @@ -75,6 +82,7 @@ object Database { final case class BuildHelmOverrideValuesParams(app: App, workspaceId: WorkspaceId, + workspaceName: String, cloudContext: AzureCloudContext, billingProfileId: BillingProfileId, landingZoneResources: LandingZoneResources, @@ -83,5 +91,6 @@ final case class BuildHelmOverrideValuesParams(app: App, ksaName: ServiceAccountName, managedIdentityName: ManagedIdentityName, databaseNames: List[WsmControlledDatabaseResource], - config: AKSInterpreterConfig + config: AKSInterpreterConfig, + diskWsmResourceId: Option[WsmControlledResourceId] ) diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/JupyterAppInstall.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/JupyterAppInstall.scala new file mode 100644 index 00000000000..988f3386965 --- /dev/null +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/app/JupyterAppInstall.scala @@ -0,0 +1,70 @@ +package org.broadinstitute.dsde.workbench.leonardo.app +import cats.effect.Async +import cats.mtl.Ask +import cats.syntax.all._ +import org.broadinstitute.dsde.workbench.leonardo.AppContext +import org.broadinstitute.dsde.workbench.leonardo.config.JupyterAppConfig +import org.broadinstitute.dsde.workbench.leonardo.dao.JupyterDAO +import org.broadinstitute.dsde.workbench.leonardo.util.AppCreationException +import org.broadinstitute.dsp.Values +import org.http4s.Uri +import org.http4s.headers.Authorization + +/** + * Jupyter app. + */ +class JupyterAppInstall[F[_]](config: JupyterAppConfig, jupyterDao: JupyterDAO[F])(implicit F: Async[F]) + extends AppInstall[F] { + override def databases: List[Database] = List.empty + + override def buildHelmOverrideValues( + params: BuildHelmOverrideValuesParams + )(implicit ev: Ask[F, AppContext]): F[Values] = + for { + ctx <- ev.ask + // Storage container is required for Cromwell app + storageContainer <- F.fromOption( + params.storageContainer, + AppCreationException("Storage container required for Jupyter app", Some(ctx.traceId)) + ) + + disk <- F.fromOption( + params.app.appResources.disk, + AppCreationException("Disk required for Jupyter app", Some(ctx.traceId)) + ) + +// diskResourceId <- F.fromOption( +// params.diskWsmResourceId, +// AppCreationException("Disk required for Jupyter app", Some(ctx.traceId)) +// ) + + values = + List( + // workspace configs + raw"workspace.id=${params.workspaceId.value.toString}", + raw"workspace.name=${params.workspaceName}", + raw"workspace.storageContainer.url=https://${params.landingZoneResources.storageAccountName.value}.blob.core.windows.net/${storageContainer.name.value}", + raw"workspace.storageContainer.resourceId=${storageContainer.resourceId.value.toString}", + raw"workspace.cloudProvider=Azure", + + // persistent disk configs + raw"persistence.diskName=${disk.name.value}", + raw"persistence.diskSize=${disk.size.gb}", + raw"persistence.subscriptionId=${params.cloudContext.subscriptionId.value}", + raw"persistence.resourceGroupName=${params.cloudContext.managedResourceGroupName.value}", + + // app resource requests + raw"resources.cpu=100", // ${params.app.appResources}", + raw"resources.memory=128", // ${disk.size.gb}", + + // misc + raw"serviceAccount.name=${params.ksaName.value}", + raw"relay.connectionName=${params.app.appName.value}" + ) + } yield Values(values.mkString(",")) + + override def checkStatus(baseUri: Uri, authHeader: Authorization)(implicit + ev: Ask[F, AppContext] + ): F[Boolean] = + jupyterDao.getStatus(baseUri, authHeader).handleError(_ => false) +} diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/auth/SamAuthProvider.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/auth/SamAuthProvider.scala index e30e9df0c5e..ee5ba7fb57a 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/auth/SamAuthProvider.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/auth/SamAuthProvider.scala @@ -74,6 +74,7 @@ class SamAuthProvider[F[_]: OpenTelemetryMetrics]( .info(Map("traceId" -> traceId.asString), e)(s"$action is not allowed for resource $samResource") .as(false) } + _ <- logger.info(s"result of hasPermission($samResource, $action): $res") } yield res override def hasPermissionWithProjectFallback[R, A]( diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/config/KubernetesAppConfig.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/config/KubernetesAppConfig.scala index 0c6c05540bc..2e8c7b96b91 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/config/KubernetesAppConfig.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/config/KubernetesAppConfig.scala @@ -47,6 +47,7 @@ object KubernetesAppConfig { case (CromwellRunnerApp, CloudProvider.Azure) => Some(ConfigReader.appConfig.azure.cromwellRunnerAppConfig) case (Wds, CloudProvider.Azure) => Some(ConfigReader.appConfig.azure.wdsAppConfig) case (HailBatch, CloudProvider.Azure) => Some(ConfigReader.appConfig.azure.hailBatchAppConfig) + case (Jupyter, CloudProvider.Azure) => Some(ConfigReader.appConfig.azure.jupyterAppConfig) case _ => None } } @@ -202,6 +203,22 @@ final case class HailBatchAppConfig(chartName: ChartName, val appType: AppType = AppType.HailBatch } +final case class JupyterAppConfig(chartName: ChartName, + chartVersion: ChartVersion, + releaseNameSuffix: ReleaseNameSuffix, + namespaceNameSuffix: NamespaceNameSuffix, + ksaName: KsaName, + services: List[ServiceConfig], + enabled: Boolean, + chartVersionsToExcludeFromUpdates: List[ChartVersion] +) extends KubernetesAppConfig { + override val kubernetesServices: List[KubernetesService] = services.map(s => KubernetesService(ServiceId(-1), s)) + override val serviceAccountName = ServiceAccountName(ksaName.value) + + val cloudProvider: CloudProvider = CloudProvider.Azure + val appType: AppType = AppType.Jupyter +} + final case class ContainerRegistryUsername(asString: String) extends AnyVal final case class ContainerRegistryPassword(asString: String) extends AnyVal final case class ContainerRegistryCredentials(username: ContainerRegistryUsername, password: ContainerRegistryPassword) diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/HttpJupyterDAO.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/HttpJupyterDAO.scala index 4f558efcc46..ce6e93bf8b6 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/HttpJupyterDAO.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/HttpJupyterDAO.scala @@ -1,28 +1,50 @@ package org.broadinstitute.dsde.workbench.leonardo.dao import cats.effect.Async +import cats.mtl.Ask import cats.syntax.all._ import io.circe.Decoder import org.broadinstitute.dsde.workbench.leonardo.dao.ExecutionState.{Idle, OtherState} import org.broadinstitute.dsde.workbench.leonardo.dao.HostStatus.HostReady import org.broadinstitute.dsde.workbench.leonardo.dao.HttpJupyterDAO._ import org.broadinstitute.dsde.workbench.leonardo.dns.RuntimeDnsCache -import org.broadinstitute.dsde.workbench.leonardo.{CloudContext, RuntimeName} +import org.broadinstitute.dsde.workbench.leonardo.{AppContext, CloudContext, RuntimeName} import org.broadinstitute.dsde.workbench.model.google.GoogleProject +import org.broadinstitute.dsde.workbench.openTelemetry.OpenTelemetryMetrics import org.http4s.circe.CirceEntityDecoder._ import org.http4s.client.Client -import org.http4s.{Header, Headers, Method, Request} +import org.http4s.client.dsl.Http4sClientDsl +import org.http4s.headers.Authorization +import org.http4s.{Header, Headers, Method, Request, Uri} import org.typelevel.ci.CIString import org.typelevel.log4cats.Logger //Jupyter server API doc https://github.com/jupyter/jupyter/wiki/Jupyter-Notebook-Server-API class HttpJupyterDAO[F[_]](val runtimeDnsCache: RuntimeDnsCache[F], client: Client[F], samDAO: SamDAO[F])(implicit F: Async[F], - logger: Logger[F] -) extends JupyterDAO[F] { + logger: Logger[F], + metrics: OpenTelemetryMetrics[F] +) extends JupyterDAO[F] + with Http4sClientDsl[F] { private val SETDATEACCESSEDINSPECTOR_HEADER_IGNORE: Header.Raw = Header.Raw(CIString("X-SetDateAccessedInspector-Action"), "ignore") + def getStatus(baseUri: Uri, authHeader: Authorization)(implicit + ev: Ask[F, AppContext] + ): F[Boolean] = for { + _ <- metrics.incrementCounter("jupyter/status") + res <- client.status( + Request[F]( + method = Method.GET, + uri = baseUri / "api" / "status", // TODO (LM) this may need to change + headers = Headers(authHeader) + ) + ) + _ <- logger.info(s"(LM) Jupyter endpoint: ${baseUri / "api" / "status"}") + _ <- logger.info(s"(LM) Jupyter status result: $res") + + } yield res.isSuccess + def isProxyAvailable(cloudContext: CloudContext, runtimeName: RuntimeName): F[Boolean] = for { hostStatus <- Proxy.getRuntimeTargetHost[F](runtimeDnsCache, cloudContext, runtimeName) @@ -37,7 +59,9 @@ class HttpJupyterDAO[F[_]](val runtimeDnsCache: RuntimeDnsCache[F], client: Clie client .successful( Request[F]( - method = Method.GET, + method = + Method.GET, // private def azureUri: Uri = Uri.unsafeFromString(s"https://${hostname.address()}/${path}") + // https://hostIp/runtimeName/api/status uri = x.toNotebooksUri / "api" / "status", headers = headers ) @@ -110,13 +134,6 @@ object HttpJupyterDAO { implicit val sessionDecoder: Decoder[Session] = Decoder.forProduct1("kernel")(Session) } -trait JupyterDAO[F[_]] { - def isAllKernelsIdle(cloudContext: CloudContext, runtimeName: RuntimeName): F[Boolean] - def isProxyAvailable(cloudContext: CloudContext, runtimeName: RuntimeName): F[Boolean] - def createTerminal(googleProject: GoogleProject, runtimeName: RuntimeName): F[Unit] - def terminalExists(googleProject: GoogleProject, runtimeName: RuntimeName, terminalName: TerminalName): F[Boolean] -} - sealed abstract class ExecutionState object ExecutionState { case object Idle extends ExecutionState { diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/JupyterDAO.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/JupyterDAO.scala new file mode 100644 index 00000000000..9a7ae591e79 --- /dev/null +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/dao/JupyterDAO.scala @@ -0,0 +1,15 @@ +package org.broadinstitute.dsde.workbench.leonardo.dao + +import cats.mtl.Ask +import org.broadinstitute.dsde.workbench.leonardo.{AppContext, CloudContext, RuntimeName} +import org.broadinstitute.dsde.workbench.model.google.GoogleProject +import org.http4s.Uri +import org.http4s.headers.Authorization + +trait JupyterDAO[F[_]] { + def isAllKernelsIdle(cloudContext: CloudContext, runtimeName: RuntimeName): F[Boolean] + def isProxyAvailable(cloudContext: CloudContext, runtimeName: RuntimeName): F[Boolean] + def createTerminal(googleProject: GoogleProject, runtimeName: RuntimeName): F[Unit] + def terminalExists(googleProject: GoogleProject, runtimeName: RuntimeName, terminalName: TerminalName): F[Boolean] + def getStatus(baseUri: Uri, authHeader: Authorization)(implicit ev: Ask[F, AppContext]): F[Boolean] +} diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/AppComponent.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/AppComponent.scala index 8c214d7405a..154c607ab83 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/AppComponent.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/AppComponent.scala @@ -41,9 +41,9 @@ final case class AppRecord(id: AppId, descriptorPath: Option[Uri], extraArgs: Option[List[String]], sourceWorkspaceId: Option[WorkspaceId], - numOfReplicas: Option[Int], autodelete: Autodelete, - autopilot: Option[Autopilot], + autopilot: Boolean, + computeProfile: ComputeProfile, bucketNameToMount: Option[GcsBucketName] ) @@ -101,10 +101,10 @@ class AppTable(tag: Tag) extends Table[AppRecord](tag, "APP") { descriptorPath, extraArgs, sourceWorkspaceId, - numOfReplicas, // combine these values to allow tuple creation; longer than 22 elements is not allowed (autodeleteEnabled, autodeleteThreshold), - (autopilotEnabled, computeClass, cpu, memory, ephemeralStorage), + autopilotEnabled, + (numOfReplicas, cpu, memory, computeClass, ephemeralStorage), bucketNameToMount ) <> ({ case ( @@ -126,9 +126,9 @@ class AppTable(tag: Tag) extends Table[AppRecord](tag, "APP") { descriptorPath, extraArgs, sourceWorkspaceId, - numOfReplicas, autodelete, autopilot, + computeProfile, bucketNameToMount ) => AppRecord( @@ -156,23 +156,14 @@ class AppTable(tag: Tag) extends Table[AppRecord](tag, "APP") { descriptorPath, extraArgs, sourceWorkspaceId, - numOfReplicas, Autodelete(autodelete._1, autodelete._2), - if (autopilot._1) - for { - computeClass <- autopilot._2 - cpu <- autopilot._3 - memory <- autopilot._4 - ephemeralStorage <- autopilot._5 - } yield Autopilot(computeClass, cpu, memory, ephemeralStorage) - else None, + autopilot, + ComputeProfile(computeProfile._1, computeProfile._2, computeProfile._3, computeProfile._4, computeProfile._5), bucketNameToMount ) }, { r: AppRecord => - val autopilotComputeClass = r.autopilot.map(_.computeClass) - val autopilotCpu = r.autopilot.map(_.cpuInMillicores) - val autopilotMemory = r.autopilot.map(_.memoryInGb) - val autopilotEphemeralStorage = r.autopilot.map(_.ephemeralStorageInGb) +// val autopilotComputeClass = r.autopilot.map(_.computeClass) +// val autopilotEphemeralStorage = r.autopilot.map(_.ephemeralStorageInGb) Some( ( r.id, @@ -197,10 +188,15 @@ class AppTable(tag: Tag) extends Table[AppRecord](tag, "APP") { r.descriptorPath, r.extraArgs, r.sourceWorkspaceId, - r.numOfReplicas, // combine these values to allow tuple creation; longer than 22 elements is not allowed (r.autodelete.autodeleteEnabled, r.autodelete.autodeleteThreshold), - (r.autopilot.isDefined, autopilotComputeClass, autopilotCpu, autopilotMemory, autopilotEphemeralStorage), + r.autopilot, + (r.computeProfile.numOfReplicas, + r.computeProfile.cpuInMi, + r.computeProfile.memoryInGb, + r.computeProfile.computeClass, + r.computeProfile.ephemeralStorageInGb + ), r.bucketNameToMount ) ) @@ -229,20 +225,15 @@ object appQuery extends TableQuery(new AppTable(_)) { app.googleServiceAccount, app.auditInfo, labels, - AppResources( - namespace, - disk, - services, - app.kubernetesServiceAccount - ), + AppResources(namespace, disk, services, app.kubernetesServiceAccount), errors, app.customEnvironmentVariables.getOrElse(Map.empty), app.descriptorPath, app.extraArgs.getOrElse(List.empty), app.sourceWorkspaceId, - app.numOfReplicas, app.autodelete, app.autopilot, + app.computeProfile, app.bucketNameToMount ) @@ -310,9 +301,9 @@ object appQuery extends TableQuery(new AppTable(_)) { saveApp.app.descriptorPath, if (saveApp.app.extraArgs.isEmpty) None else Some(saveApp.app.extraArgs), saveApp.app.sourceWorkspaceId, - saveApp.app.numOfReplicas, saveApp.app.autodelete, saveApp.app.autopilot, + saveApp.app.computeProfile, saveApp.app.bucketNameToMount ) appId <- appQuery returning appQuery.map(_.id) += record diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/PersistentDiskComponent.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/PersistentDiskComponent.scala index 15f7d97c151..6d8ec0eada0 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/PersistentDiskComponent.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/db/PersistentDiskComponent.scala @@ -129,6 +129,7 @@ class PersistentDiskTable(tag: Tag) extends Table[PersistentDiskRecord](tag, "PE case FormattedBy.Galaxy => (galaxyPvcId, lastUsedBy).mapN((gp, lb) => GalaxyRestore(gp, lb)) case FormattedBy.Cromwell => lastUsedBy.map(Other) + case FormattedBy.Jupyter => lastUsedBy.map(Other) case FormattedBy.Allowed => lastUsedBy.map(Other) case FormattedBy.GCE | FormattedBy.Custom => None }, @@ -274,7 +275,9 @@ object persistentDiskQuery { isAttachedToRuntime <- RuntimeConfigQueries.isDiskAttached(diskId) isAttached <- if (isAttachedToRuntime) DBIO.successful(true) else appQuery.isDiskAttached(diskId) } yield isAttached - case Some(FormattedBy.Galaxy | FormattedBy.Custom | FormattedBy.Cromwell | FormattedBy.Allowed) => + case Some( + FormattedBy.Galaxy | FormattedBy.Custom | FormattedBy.Cromwell | FormattedBy.Allowed | FormattedBy.Jupyter + ) => appQuery.isDiskAttached(diskId) case Some(FormattedBy.GCE) => RuntimeConfigQueries.isDiskAttached(diskId) diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/AppDependenciesBuilder.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/AppDependenciesBuilder.scala index fe8c91b172a..861e3016712 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/AppDependenciesBuilder.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/AppDependenciesBuilder.scala @@ -184,6 +184,7 @@ class AppDependenciesBuilder(baselineDependenciesBuilder: BaselineDependenciesBu baselineDependencies.cbasDAO, baselineDependencies.cromwellDAO, baselineDependencies.hailBatchDAO, + baselineDependencies.jupyterDAO, baselineDependencies.listenerDAO, baselineDependencies.samDAO, kubeAlg, @@ -221,6 +222,9 @@ class AppDependenciesBuilder(baselineDependenciesBuilder: BaselineDependenciesBu baselineDependencies.wdsDAO, baselineDependencies.azureApplicationInsightsService ) + val jupyterAppInstall = + new JupyterAppInstall[IO](ConfigReader.appConfig.azure.jupyterAppConfig, baselineDependencies.jupyterDAO) + val workflowsAppInstall = new WorkflowsAppInstall[IO]( ConfigReader.appConfig.azure.workflowsAppConfig, @@ -236,7 +240,8 @@ class AppDependenciesBuilder(baselineDependenciesBuilder: BaselineDependenciesBu cromwellAppInstall, workflowsAppInstall, hailBatchAppInstall, - cromwellRunnerAppInstall + cromwellRunnerAppInstall, + jupyterAppInstall ) val aksAlg = new AKSInterpreter[IO]( diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReader.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReader.scala index 7551e68c16a..178aeb40d20 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReader.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReader.scala @@ -28,6 +28,7 @@ final case class AzureConfig( workflowsAppConfig: WorkflowsAppConfig, wdsAppConfig: WdsAppConfig, hailBatchAppConfig: HailBatchAppConfig, + jupyterAppConfig: JupyterAppConfig, allowedSharedApps: List[AppType], tdr: TdrConfig, listenerChartConfig: ListenerChartConfig, diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/LeoAppServiceInterp.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/LeoAppServiceInterp.scala index 86917ba2aeb..0024e820699 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/LeoAppServiceInterp.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/LeoAppServiceInterp.scala @@ -91,7 +91,7 @@ final class LeoAppServiceInterp[F[_]: Parallel](config: AppServiceConfig, enableIntraNodeVisibility = req.labels.get(AOU_UI_LABEL).exists(x => x == "true") _ <- req.appType match { case AppType.Galaxy | AppType.HailBatch | AppType.Wds | AppType.Cromwell | AppType.WorkflowsApp | - AppType.CromwellRunnerApp => + AppType.CromwellRunnerApp | AppType.Jupyter => F.unit case AppType.Allowed => req.allowedChartName match { @@ -1209,6 +1209,7 @@ final class LeoAppServiceInterp[F[_]: Parallel](config: AppServiceConfig, (diskResult.disk.formattedBy, diskResult.disk.appRestore) match { case (Some(FormattedBy.Galaxy), Some(GalaxyRestore(_, _))) | (Some(FormattedBy.Cromwell), Some(AppRestore.Other(_))) | + (Some(FormattedBy.Jupyter), Some(AppRestore.Other(_))) | (Some(FormattedBy.Allowed), Some(AppRestore.Other(_))) => val lastUsedBy = diskResult.disk.appRestore.get.lastUsedBy for { @@ -1415,11 +1416,13 @@ final class LeoAppServiceInterp[F[_]: Parallel](config: AppServiceConfig, // Validate disk. // Apps on GCP require a disk. - // Apps on Azure require _no_ disk. - _ <- (cloudContext.cloudProvider, diskOpt) match { - case (CloudProvider.Gcp, None) => + // Apps on Azure require _no_ disk. !!except Jupyter apps!! + _ <- (cloudContext.cloudProvider, diskOpt, req.appType == AppType.Jupyter) match { + case (CloudProvider.Gcp, None, _) => Left(AppRequiresDiskException(cloudContext, appName, req.appType, ctx.traceId)) - case (CloudProvider.Azure, Some(_)) => + case (CloudProvider.Azure, None, true) => + Left(AppRequiresDiskException(cloudContext, appName, req.appType, ctx.traceId)) + case (CloudProvider.Azure, Some(_), false) => Left(AppDiskNotSupportedException(cloudContext, appName, req.appType, ctx.traceId)) case _ => Right(()) } diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/RuntimeServiceInterp.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/RuntimeServiceInterp.scala index 998011f5fce..f3a800d6529 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/RuntimeServiceInterp.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/http/service/RuntimeServiceInterp.scala @@ -1196,7 +1196,8 @@ object RuntimeServiceInterp { case Some(formattedBy) => if (willBeUsedBy == formattedBy) { formattedBy match { - case FormattedBy.Galaxy | FormattedBy.Cromwell | FormattedBy.Custom | FormattedBy.Allowed => + case FormattedBy.Galaxy | FormattedBy.Cromwell | FormattedBy.Custom | FormattedBy.Allowed | + FormattedBy.Jupyter => appQuery.isDiskAttached(pd.id).transaction case FormattedBy.GCE => RuntimeConfigQueries.isDiskAttached(pd.id).transaction } @@ -1299,7 +1300,8 @@ object RuntimeServiceInterp { case Some(formattedBy) => if (willBeUsedBy == formattedBy) { formattedBy match { - case FormattedBy.Galaxy | FormattedBy.Cromwell | FormattedBy.Custom | FormattedBy.Allowed => + case FormattedBy.Galaxy | FormattedBy.Cromwell | FormattedBy.Custom | FormattedBy.Allowed | + FormattedBy.Jupyter => appQuery.isDiskAttached(pd.id).transaction case FormattedBy.GCE => RuntimeConfigQueries.isDiskAttached(pd.id).transaction } diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/model/LeoException.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/model/LeoException.scala index 4a7d2118972..8add04f24d0 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/model/LeoException.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/model/LeoException.scala @@ -246,7 +246,7 @@ case class AppResourceCannotBeDeletedException(wsmResourceId: WsmControlledResou wsmResourceType: WsmResourceType, traceId: TraceId ) extends LeoException( - s"Azure ${wsmResourceType.toString} with id ${wsmResourceId.value} associated with ${appId.id} cannot be deleted in $status status, please wait and try again", + s"Azure ${wsmResourceType.toString} with id ${wsmResourceId.value} associated with ${appId.id} cannot be deleted in $status status in WSM, please wait and try again", StatusCodes.Conflict, traceId = Some(traceId) ) diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitor.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitor.scala index 62530014dd4..3b1c7a3a615 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitor.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitor.scala @@ -33,6 +33,7 @@ class LeoMetricsMonitor[F[_]](config: LeoMetricsMonitorConfig, cbasDAO: CbasDAO[F], cromwellDAO: CromwellDAO[F], hailBatchDAO: HailBatchDAO[F], + jupyterDAO: JupyterDAO[F], listenerDAO: ListenerDAO[F], samDAO: SamDAO[F], kubeAlg: KubernetesAlgebra[F], @@ -179,8 +180,9 @@ class LeoMetricsMonitor[F[_]](config: LeoMetricsMonitorConfig, case ServiceName("cbas") => cbasDAO.getStatus(relayPath, authHeader).handleError(_ => false) case ServiceName("cromwell") | ServiceName("cromwell-reader") | ServiceName("cromwell-runner") => cromwellDAO.getStatus(relayPath, authHeader).handleError(_ => false) - case ServiceName("wds") => wdsDAO.getStatus(relayPath, authHeader).handleError(_ => false) - case ServiceName("batch") => hailBatchDAO.getStatus(relayPath, authHeader).handleError(_ => false) + case ServiceName("wds") => wdsDAO.getStatus(relayPath, authHeader).handleError(_ => false) + case ServiceName("batch") => hailBatchDAO.getStatus(relayPath, authHeader).handleError(_ => false) + case ServiceName("jupyter") => jupyterDAO.getStatus(relayPath, authHeader).handleError(_ => false) case s if s == ConfigReader.appConfig.azure.listenerChartConfig.service.config.name => listenerDAO.getStatus(relayPath).handleError(_ => false) case s => diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/AKSInterpreter.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/AKSInterpreter.scala index da81725c578..648ffcb5bb7 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/AKSInterpreter.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/AKSInterpreter.scala @@ -57,14 +57,9 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, ) extends AKSAlgebra[F] { implicit private def booleanDoneCheckable: DoneCheckable[Boolean] = identity[Boolean] - implicit private def listDoneCheckable[A: DoneCheckable]: DoneCheckable[List[A]] = as => as.forall(_.isDone) - private[util] def isPodDone(podStatus: PodStatus): Boolean = podStatus == PodStatus.Failed || podStatus == PodStatus.Succeeded - implicit private def podDoneCheckable: DoneCheckable[List[PodStatus]] = - (ps: List[PodStatus]) => ps.forall(isPodDone) - implicit private def createDatabaseDoneCheckable: DoneCheckable[CreatedControlledAzureDatabaseResult] = _.getJobReport.getStatus != JobReport.StatusEnum.RUNNING @@ -75,6 +70,11 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, implicit private def deleteWsmResourceDoneCheckable: DoneCheckable[DeleteControlledAzureResourceResult] = _.getJobReport.getStatus != JobReport.StatusEnum.RUNNING + implicit private def wsmCreateAzureResourceResultDoneCheckable: DoneCheckable[CreateControlledAzureResourceResult] = + (v: CreateControlledAzureResourceResult) => + v.getJobReport.getStatus.equals(JobReport.StatusEnum.SUCCEEDED) || v.getJobReport.getStatus + .equals(JobReport.StatusEnum.FAILED) + private def getListenerReleaseName(appReleaseName: Release): Release = Release(s"${appReleaseName.asString}-listener-rls") @@ -118,6 +118,11 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, } } + wsmWorkspaceApi <- buildWsmWorkspaceApiClient + + // Resolve the workspace in WSM + workspace <- F.blocking(wsmWorkspaceApi.getWorkspace(params.workspaceId.value, IamRole.READER)) + wsmResourceApi <- buildWsmResourceApiClient // Create or fetch WSM managed identity (if shared app) @@ -148,6 +153,14 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, ) } + // Create or fetch WSM disk (only for Jupyter apps) + diskResourceId <- childSpan("createWsmDisk").use { implicit ev => + createOrFetchWsmDiskResource( + app, + params.workspaceId + ) + } + // Create or fetch WSM databases wsmDatabases <- childSpan("createWsmDatabaseResources").use { implicit ev => createOrFetchWsmDatabaseResources( @@ -232,6 +245,7 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, helmOverrideValueParams = BuildHelmOverrideValuesParams( app, params.workspaceId, + workspace.getDisplayName, params.cloudContext, params.billingProfileId, landingZoneResources, @@ -240,10 +254,15 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, namespace.serviceAccountName, managedIdentityName, wsmDatabases ++ referenceDatabases, - config + config, + diskResourceId ) values <- app.appType.buildHelmOverrideValues(helmOverrideValueParams) + _ <- logger.info(ctx.loggingCtx)( + s"Values for app ${params.appName.value} are ${values.asString}" + ) + // Install app chart _ <- childSpan("helmInstallApp").use { _ => helmClient @@ -421,6 +440,7 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, relayDomain = s"${landingZoneResources.relayNamespace.value}.servicebus.windows.net" relayEndpoint = s"https://${relayDomain}/" relayPath = Uri.unsafeFromString(relayEndpoint) / hcName.value + relayPrimaryKey <- azureRelayService.getRelayHybridConnectionKey(landingZoneResources.relayNamespace, hcName, params.cloudContext @@ -468,6 +488,7 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, helmOverrideValueParams = BuildHelmOverrideValuesParams( app, workspaceId, + workspaceDesc.displayName, params.cloudContext, billingProfileId, landingZoneResources, @@ -476,7 +497,8 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, ksaName, managedIdentityName, wsmDatabases ++ referenceDatabases, - config + config, + app.appResources.disk.flatMap(_.wsmResourceId) ) values <- app.appType.buildHelmOverrideValues(helmOverrideValueParams) @@ -885,6 +907,85 @@ class AKSInterpreter[F[_]](config: AKSInterpreterConfig, } } yield wsmControlledDBResources + private[util] def createOrFetchWsmDiskResource(app: App, workspaceId: WorkspaceId)(implicit + ev: Ask[F, AppContext] + ): F[Option[WsmControlledResourceId]] = + for { + ctx <- ev.ask + + diskResourceId <- + app.appResources.disk match { + case Some(disk) => + for { + + _ <- logger.info(ctx.loggingCtx)( + s"Creating WSM disk for app ${app.appName.value} in cloud workspace ${workspaceId.value}" + ) + + wsmResourceApi <- buildWsmControlledResourceApiClient + + common = getWsmCommonFields(disk.name.value, + s"Disk for Leo app ${app.appName.value}", + app, + CloningInstructionsEnum.NOTHING + ) + + createDiskJobId = UUID.randomUUID().toString + jobControl = new JobControl() + .id(createDiskJobId) + + azureDisk = new AzureDiskCreationParameters() + .name(disk.name.value) + .size(disk.size.gb) + + request = new CreateControlledAzureDiskRequestV2Body() + .common(common) + .azureDisk(azureDisk) + .jobControl(jobControl) + + _ <- logger.info(ctx.loggingCtx)(s"WSM create disk request: ${request}") + + // Execute WSM call + createDiskResponse <- F.blocking( + wsmResourceApi.createAzureDiskV2(request, workspaceId.value) + ) + _ <- logger.info(ctx.loggingCtx)(s"WSM create disk response: ${createDiskResponse}") + + op = F.blocking(wsmResourceApi.getCreateAzureDiskResult(workspaceId.value, createDiskJobId)) + result <- streamFUntilDone( + op, + config.appMonitorConfig.createApp.maxAttempts, + config.appMonitorConfig.createApp.interval + ).interruptAfter(config.appMonitorConfig.createApp.interruptAfter).compile.lastOrError + + _ <- logger.info(ctx.loggingCtx)(s"WSM create database job result: ${result}") + + _ <- F.raiseWhen(result.getJobReport.getStatus != JobReport.StatusEnum.SUCCEEDED)( + AppCreationException( + s"WSM disk creation failed for app ${app.appName.value}. WSM response: ${result}", + Some(ctx.traceId) + ) + ) + + // Save record in APP_CONTROLLED_RESOURCE table + _ <- appControlledResourceQuery + .insert( + app.id.id, + WsmControlledResourceId(common.getResourceId), + WsmResourceType.AzureDisk, + AppControlledResourceStatus.Created + ) + .transaction + + // Update disk status + _ <- persistentDiskQuery.updateStatus(disk.id, DiskStatus.Ready, ctx.now).transaction + + } yield Some(WsmControlledResourceId(common.getResourceId)) + + case None => F.pure(None) + } + } yield diskResourceId + private[util] def createMissingAppControlledResources(app: App, appInstall: AppInstall[F], workspaceId: WorkspaceId, diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/BuildHelmChartValues.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/BuildHelmChartValues.scala index ab11f42ccb9..b8e2e15d53a 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/BuildHelmChartValues.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/BuildHelmChartValues.scala @@ -259,7 +259,9 @@ private[leonardo] object BuildHelmChartValues { case AppType.Wds => s"http://wds-${release.asString}-wds-svc:8080" case AppType.HailBatch => "http://batch:8080" case AppType.WorkflowsApp => s"http://wfa-${release.asString}-reverse-proxy-service:8000/" - case _ => "unknown" + case AppType.Jupyter => + s"http://jupyter-${release.asString}:8888/" + case _ => "unknown" } // Hail batch serves requests on /{appName}/batch and uses relative redirects, diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GKEInterpreter.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GKEInterpreter.scala index 8b192c426bb..51143aa71d2 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GKEInterpreter.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GKEInterpreter.scala @@ -1213,7 +1213,7 @@ class GKEInterpreter[F[_]]( config.monitorConfig.startApp.interval ).interruptAfter(config.monitorConfig.startApp.interruptAfter).compile.lastOrError } yield last.isDone - case AppType.Wds | AppType.HailBatch | AppType.WorkflowsApp | AppType.CromwellRunnerApp => + case AppType.Wds | AppType.HailBatch | AppType.WorkflowsApp | AppType.CromwellRunnerApp | AppType.Jupyter => F.raiseError(AppCreationException(s"App type ${dbApp.app.appType} not supported on GCP")) } diff --git a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GceInterpreter.scala b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GceInterpreter.scala index 7ec68a89a34..b32142fbf37 100644 --- a/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GceInterpreter.scala +++ b/http/src/main/scala/org/broadinstitute/dsde/workbench/leonardo/util/GceInterpreter.scala @@ -156,7 +156,7 @@ class GceInterpreter[F[_]]( isFormatted <- persistentDisk.formattedBy match { case Some(FormattedBy.Galaxy) | Some(FormattedBy.Custom) | Some(FormattedBy.Cromwell) | Some( FormattedBy.Allowed - ) => + ) | Some(FormattedBy.Jupyter) => F.raiseError[Boolean]( new RuntimeException( s"Trying to use an app formatted disk for creating GCE runtime. This should never happen. Disk Id: ${x.persistentDiskId}." diff --git a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/BaseAppInstallSpec.scala b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/BaseAppInstallSpec.scala index 17c80b23c27..2f222db477a 100644 --- a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/BaseAppInstallSpec.scala +++ b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/BaseAppInstallSpec.scala @@ -7,7 +7,7 @@ import com.azure.resourcemanager.batch.models.{BatchAccount, BatchAccountKeys} import org.broadinstitute.dsde.workbench.azure._ import org.broadinstitute.dsde.workbench.google2.KubernetesSerializableName.ServiceAccountName import org.broadinstitute.dsde.workbench.google2.{NetworkName, SubnetworkName} -import org.broadinstitute.dsde.workbench.leonardo.CommonTestData.{azureRegion, billingProfileId, tokenValue} +import org.broadinstitute.dsde.workbench.leonardo.CommonTestData.{azureRegion, billingProfileId, wsmResourceId, tokenValue} import org.broadinstitute.dsde.workbench.leonardo.KubernetesTestData.makeApp import org.broadinstitute.dsde.workbench.leonardo.auth.SamAuthProvider import org.broadinstitute.dsde.workbench.leonardo.config.Config.appMonitorConfig @@ -78,6 +78,7 @@ class BaseAppInstallSpec extends AnyFlatSpecLike with LeonardoTestSuite with Moc ) val workspaceId = WorkspaceId(UUID.randomUUID) + val workspaceName = "workspaceName" val workspaceCreatedDate = java.time.OffsetDateTime.parse("1970-01-01T12:15:30-07:00") val aksInterpConfig = AKSInterpreterConfig( @@ -93,6 +94,7 @@ class BaseAppInstallSpec extends AnyFlatSpecLike with LeonardoTestSuite with Moc BuildHelmOverrideValuesParams( app, workspaceId, + workspaceName, cloudContext, billingProfileId, lzResources, @@ -101,7 +103,8 @@ class BaseAppInstallSpec extends AnyFlatSpecLike with LeonardoTestSuite with Moc ServiceAccountName("ksa-1"), ManagedIdentityName("mi-1"), databases, - aksInterpConfig + aksInterpConfig, + None ) private def setUpMockSamDAO: SamDAO[IO] = { diff --git a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/CromwellAppInstallSpec.scala b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/CromwellAppInstallSpec.scala index 867d708cd9c..300458c595f 100644 --- a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/CromwellAppInstallSpec.scala +++ b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/app/CromwellAppInstallSpec.scala @@ -82,6 +82,7 @@ class CromwellAppInstallSpec extends BaseAppInstallSpec { val params = BuildHelmOverrideValuesParams( app, workspaceId, + workspaceName, cloudContext, billingProfileId, lzResources.copy(postgresServer = Some(PostgresServer("postgres", false))), @@ -90,7 +91,8 @@ class CromwellAppInstallSpec extends BaseAppInstallSpec { ServiceAccountName("ksa-1"), ManagedIdentityName("mi-1"), cromwellOnAzureDatabases, - aksInterpConfig + aksInterpConfig, + None ) val overrides = cromwellAppInstall.buildHelmOverrideValues(params) diff --git a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/dao/MockJupyterDAO.scala b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/dao/MockJupyterDAO.scala index b6487febe52..3d34a8126cb 100644 --- a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/dao/MockJupyterDAO.scala +++ b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/dao/MockJupyterDAO.scala @@ -1,8 +1,11 @@ package org.broadinstitute.dsde.workbench.leonardo.dao import cats.effect.IO -import org.broadinstitute.dsde.workbench.leonardo.{CloudContext, RuntimeName} +import cats.mtl.Ask +import org.broadinstitute.dsde.workbench.leonardo.{AppContext, CloudContext, RuntimeName} import org.broadinstitute.dsde.workbench.model.google.GoogleProject +import org.http4s.Uri +import org.http4s.headers.Authorization class MockJupyterDAO(isUp: Boolean = true) extends JupyterDAO[IO] { override def isProxyAvailable(cloudContext: CloudContext, clusterName: RuntimeName): IO[Boolean] = @@ -17,6 +20,9 @@ class MockJupyterDAO(isUp: Boolean = true) extends JupyterDAO[IO] { runtimeName: RuntimeName, terminalName: TerminalName ): IO[Boolean] = IO.pure(true) + + override def getStatus(baseUri: Uri, authHeader: Authorization)(implicit ev: Ask[IO, AppContext]): IO[Boolean] = + IO.pure(isUp) } object MockJupyterDAO extends MockJupyterDAO(isUp = true) diff --git a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReaderSpec.scala b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReaderSpec.scala index 4ee50d99c20..5079cacf028 100644 --- a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReaderSpec.scala +++ b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/http/ConfigReaderSpec.scala @@ -234,7 +234,19 @@ class ConfigReaderSpec extends AnyFlatSpec with Matchers { List( ServiceConfig(ServiceName("batch"), KubernetesServiceKindName("ClusterIP")) ), - false, + enabled = false, + chartVersionsToExcludeFromUpdates = List() + ), + JupyterAppConfig( + ChartName("terra-helm/jupyter"), + ChartVersion("0.1.0"), + ReleaseNameSuffix("jupyter-rls"), + NamespaceNameSuffix("jupyter-ns"), + KsaName("jupyter-ksa"), + List( + ServiceConfig(ServiceName("jupyter"), KubernetesServiceKindName("ClusterIP")) + ), + enabled = true, chartVersionsToExcludeFromUpdates = List() ), List(AppType.Wds, AppType.WorkflowsApp), diff --git a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitorSpec.scala b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitorSpec.scala index 21ca1feb84a..2b77e81ee60 100644 --- a/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitorSpec.scala +++ b/http/src/test/scala/org/broadinstitute/dsde/workbench/leonardo/monitor/LeoMetricsMonitorSpec.scala @@ -97,6 +97,7 @@ class LeoMetricsMonitorSpec extends AnyFlatSpec with LeonardoTestSuite with Test cbasDAO, cromwellDAO, hailBatchDAO, + jupyterDAO, relayListenerDAO, samDAO, kube, @@ -371,6 +372,7 @@ class LeoMetricsMonitorSpec extends AnyFlatSpec with LeonardoTestSuite with Test cbasDAO, cromwellDAO, hailBatchDAO, + jupyterDAO, relayListenerDAO, samDAO, kube, diff --git a/jupyter-0.1.0.tgz b/jupyter-0.1.0.tgz new file mode 100644 index 00000000000..d2f7ca9d81a Binary files /dev/null and b/jupyter-0.1.0.tgz differ