diff --git a/README.md b/README.md index bc303c2bdd..56c79ca103 100644 --- a/README.md +++ b/README.md @@ -76,8 +76,8 @@ For more information about the different deployment options, please see our depl | Installation methods | Docs link | | ------------------- | --------- | -| Local instance | [![All-in-one isntallation](https://img.shields.io/badge/All--in--one%20Installer-%230db7ed)](https://www.comet.com/docs/opik/self-host/self_hosting_opik/#all-in-one-installation?utm_source=opik&utm_medium=github&utm_content=self_host_link) -| Kubernetes | [![Kubernetes](https://img.shields.io/badge/kubernetes-%23326ce5.svg?&logo=kubernetes&logoColor=white)](https://www.comet.com/docs/opik/self-host/self_hosting_opik/#kubernetes-installation?utm_source=opik&utm_medium=github&utm_content=kubernetes_link) +| Local instance | [![Local Deployment](https://img.shields.io/badge/Local%20Deployments-%232496ED?style=flat&logo=docker&logoColor=white)](https://www.comet.com/docs/opik/self-host/local_deployment?utm_source=opik&utm_medium=github&utm_content=self_host_link) +| Kubernetes | [![Kubernetes](https://img.shields.io/badge/Kubernetes-%23326ce5.svg?&logo=kubernetes&logoColor=white)](https://www.comet.com/docs/opik/self-host/kubernetes/#kubernetes-installation?utm_source=opik&utm_medium=github&utm_content=kubernetes_link) ## 🏁 Get Started diff --git a/apps/opik-backend/src/main/java/com/comet/opik/api/SpanBatch.java b/apps/opik-backend/src/main/java/com/comet/opik/api/SpanBatch.java index 57435ebf26..02727bec49 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/api/SpanBatch.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/api/SpanBatch.java @@ -4,9 +4,11 @@ import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; import jakarta.validation.constraints.Size; +import lombok.Builder; import java.util.List; +@Builder(toBuilder = true) public record SpanBatch(@NotNull @Size(min = 1, max = 1000) @JsonView( { Span.View.Write.class}) @Valid List spans){ } diff --git a/apps/opik-backend/src/main/java/com/comet/opik/api/TraceBatch.java b/apps/opik-backend/src/main/java/com/comet/opik/api/TraceBatch.java index ac5c164940..0765a89712 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/api/TraceBatch.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/api/TraceBatch.java @@ -4,9 +4,11 @@ import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; import jakarta.validation.constraints.Size; +import lombok.Builder; import java.util.List; +@Builder(toBuilder = true) public record TraceBatch(@NotNull @Size(min = 1, max = 1000) @JsonView( { Trace.View.Write.class}) @Valid List traces){ } diff --git a/apps/opik-backend/src/main/java/com/comet/opik/api/TracesDelete.java b/apps/opik-backend/src/main/java/com/comet/opik/api/TracesDelete.java new file mode 100644 index 0000000000..d5f721d266 --- /dev/null +++ b/apps/opik-backend/src/main/java/com/comet/opik/api/TracesDelete.java @@ -0,0 +1,17 @@ +package com.comet.opik.api; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.databind.PropertyNamingStrategies; +import com.fasterxml.jackson.databind.annotation.JsonNaming; +import jakarta.validation.constraints.NotNull; +import jakarta.validation.constraints.Size; +import lombok.Builder; + +import java.util.Set; +import java.util.UUID; + +@Builder(toBuilder = true) +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy.class) +public record TracesDelete(@NotNull @Size(min = 1, max = 1000) Set ids) { +} diff --git a/apps/opik-backend/src/main/java/com/comet/opik/api/resources/v1/priv/TraceResource.java b/apps/opik-backend/src/main/java/com/comet/opik/api/resources/v1/priv/TracesResource.java similarity index 93% rename from apps/opik-backend/src/main/java/com/comet/opik/api/resources/v1/priv/TraceResource.java rename to apps/opik-backend/src/main/java/com/comet/opik/api/resources/v1/priv/TracesResource.java index 31bf42715c..dc91804022 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/api/resources/v1/priv/TraceResource.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/api/resources/v1/priv/TracesResource.java @@ -9,6 +9,7 @@ import com.comet.opik.api.TraceBatch; import com.comet.opik.api.TraceSearchCriteria; import com.comet.opik.api.TraceUpdate; +import com.comet.opik.api.TracesDelete; import com.comet.opik.api.filter.FiltersFactory; import com.comet.opik.api.filter.TraceFilter; import com.comet.opik.domain.FeedbackScoreService; @@ -60,7 +61,7 @@ @Slf4j @RequiredArgsConstructor(onConstructor_ = @jakarta.inject.Inject) @Tag(name = "Traces", description = "Trace related resources") -public class TraceResource { +public class TracesResource { private final @NonNull TraceService service; private final @NonNull FeedbackScoreService feedbackScoreService; @@ -206,6 +207,20 @@ public Response deleteById(@PathParam("id") UUID id) { return Response.noContent().build(); } + @POST + @Path("/delete") + @Operation(operationId = "deleteTraces", summary = "Delete traces", description = "Delete traces", responses = { + @ApiResponse(responseCode = "204", description = "No Content")}) + public Response deleteTraces( + @RequestBody(content = @Content(schema = @Schema(implementation = TracesDelete.class))) @NotNull @Valid TracesDelete request) { + log.info("Deleting traces, count '{}'", request.ids().size()); + service.delete(request.ids()) + .contextWrite(ctx -> setRequestContext(ctx, requestContext)) + .block(); + log.info("Deleted traces, count '{}'", request.ids().size()); + return Response.noContent().build(); + } + @PUT @Path("/{id}/feedback-scores") @Operation(operationId = "addTraceFeedbackScore", summary = "Add trace feedback score", description = "Add trace feedback score", responses = { diff --git a/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreDAO.java b/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreDAO.java index d3f2e4d738..e63db2530f 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreDAO.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreDAO.java @@ -3,16 +3,18 @@ import com.comet.opik.api.FeedbackScore; import com.comet.opik.api.FeedbackScoreBatchItem; import com.comet.opik.api.ScoreSource; +import com.google.common.base.Preconditions; import com.google.inject.ImplementedBy; import io.r2dbc.spi.Connection; import io.r2dbc.spi.Result; import io.r2dbc.spi.Row; -import io.r2dbc.spi.Statement; import jakarta.inject.Inject; import jakarta.inject.Singleton; import lombok.Getter; import lombok.NonNull; import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.stringtemplate.v4.ST; import reactor.core.publisher.Flux; @@ -24,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -52,14 +55,16 @@ enum EntityType { Mono deleteScoreFrom(EntityType entityType, UUID id, String name, Connection connection); - Mono deleteByEntityId(EntityType entityType, UUID id, Connection connection); + Mono deleteByEntityId(EntityType entityType, UUID entityId, Connection connection); - Mono scoreBatchOf(EntityType entityType, List scores, Connection connection); + Mono deleteByEntityIds(EntityType entityType, Set entityIds, Connection connection); + Mono scoreBatchOf(EntityType entityType, List scores, Connection connection); } @Singleton @RequiredArgsConstructor(onConstructor_ = @Inject) +@Slf4j class FeedbackScoreDAOImpl implements FeedbackScoreDAO { record FeedbackScoreDto(UUID entityId, FeedbackScore score) { @@ -230,21 +235,21 @@ LEFT JOIN ( ; """; - private static final String DELETE_SPAN_CASCADE_FEEDBACK_SCORE = """ + private static final String DELETE_SPANS_CASCADE_FEEDBACK_SCORE = """ DELETE FROM feedback_scores WHERE entity_type = 'span' AND entity_id IN ( SELECT id FROM spans - WHERE trace_id = :trace_id + WHERE trace_id IN :trace_ids ) AND workspace_id = :workspace_id ; """; - private static final String DELETE_FEEDBACK_SCORE_BY_ENTITY_ID = """ + private static final String DELETE_FEEDBACK_SCORE_BY_ENTITY_IDS = """ DELETE FROM feedback_scores - WHERE entity_id = :entity_id + WHERE entity_id IN :entity_ids AND entity_type = :entity_type AND workspace_id = :workspace_id ; @@ -410,30 +415,39 @@ public Mono deleteScoreFrom(EntityType entityType, UUID id, String name, C } @Override - public Mono deleteByEntityId(@NonNull EntityType entityType, @NonNull UUID id, - @NonNull Connection connection) { + public Mono deleteByEntityId( + @NonNull EntityType entityType, @NonNull UUID entityId, @NonNull Connection connection) { + return deleteByEntityIds(entityType, Set.of(entityId), connection); + } + + @Override + public Mono deleteByEntityIds( + @NonNull EntityType entityType, Set entityIds, @NonNull Connection connection) { + Preconditions.checkArgument( + CollectionUtils.isNotEmpty(entityIds), "Argument 'entityIds' must not be empty"); + log.info("Deleting feedback scores for entityType '{}', entityIds count '{}'", entityType, entityIds.size()); return switch (entityType) { - case TRACE -> cascadeSpanDelete(id, connection) + case TRACE -> cascadeSpanDelete(entityIds, connection) .flatMap(result -> Mono.from(result.getRowsUpdated())) - .then(Mono.defer(() -> deleteScoresByEntityId(entityType, id, connection))) + .then(Mono.defer(() -> deleteScoresByEntityIds(entityType, entityIds, connection))) .then(); - case SPAN -> deleteScoresByEntityId(entityType, id, connection) + case SPAN -> deleteScoresByEntityIds(entityType, entityIds, connection) .then(); }; } - private Mono cascadeSpanDelete(UUID id, Connection connection) { - var statement = connection.createStatement(DELETE_SPAN_CASCADE_FEEDBACK_SCORE) - .bind("trace_id", id); - + private Mono cascadeSpanDelete(Set traceIds, Connection connection) { + log.info("Deleting feedback scores by span entityId, traceIds count '{}'", traceIds.size()); + var statement = connection.createStatement(DELETE_SPANS_CASCADE_FEEDBACK_SCORE) + .bind("trace_ids", traceIds.toArray(UUID[]::new)); return makeMonoContextAware(bindWorkspaceIdToMono(statement)); } - private Mono deleteScoresByEntityId(EntityType entityType, UUID id, Connection connection) { - Statement statement = connection.createStatement(DELETE_FEEDBACK_SCORE_BY_ENTITY_ID) - .bind("entity_id", id) + private Mono deleteScoresByEntityIds(EntityType entityType, Set entityIds, Connection connection) { + log.info("Deleting feedback scores by entityType '{}', entityIds count '{}'", entityType, entityIds.size()); + var statement = connection.createStatement(DELETE_FEEDBACK_SCORE_BY_ENTITY_IDS) + .bind("entity_ids", entityIds.toArray(UUID[]::new)) .bind("entity_type", entityType.getType()); - return makeMonoContextAware(bindWorkspaceIdToMono(statement)) .flatMap(result -> Mono.from(result.getRowsUpdated())); } diff --git a/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreMapper.java b/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreMapper.java index a6b88270e6..8add7adb4c 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreMapper.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/domain/FeedbackScoreMapper.java @@ -3,12 +3,18 @@ import com.comet.opik.api.FeedbackScore; import com.comet.opik.api.FeedbackScoreBatchItem; import org.mapstruct.Mapper; +import org.mapstruct.Mapping; import org.mapstruct.factory.Mappers; +import java.util.UUID; + @Mapper public interface FeedbackScoreMapper { FeedbackScoreMapper INSTANCE = Mappers.getMapper(FeedbackScoreMapper.class); FeedbackScore toFeedbackScore(FeedbackScoreBatchItem feedbackScoreBatchItem); + + @Mapping(target = "id", source = "entityId") + FeedbackScoreBatchItem toFeedbackScoreBatchItem(UUID entityId, String projectName, FeedbackScore feedbackScore); } diff --git a/apps/opik-backend/src/main/java/com/comet/opik/domain/SpanDAO.java b/apps/opik-backend/src/main/java/com/comet/opik/domain/SpanDAO.java index f3d1e37c46..8977cba042 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/domain/SpanDAO.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/domain/SpanDAO.java @@ -18,6 +18,7 @@ import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.reactivestreams.Publisher; import org.stringtemplate.v4.ST; import reactor.core.publisher.Mono; @@ -96,7 +97,7 @@ INSERT INTO spans( * This query handles the insertion of a new span into the database in two cases: * 1. When the span does not exist in the database. * 2. When the span exists in the database but the provided span has different values for the fields such as end_time, input, output, metadata and tags. - * **/ + **/ //TODO: refactor to implement proper conflict resolution private static final String INSERT = """ INSERT INTO spans( @@ -268,14 +269,13 @@ INSERT INTO spans ( /** * This query is used when updates are processed before inserts, and the span does not exist in the database. - * + *

* The query will insert/update a new span with the provided values such as end_time, input, output, metadata, tags etc. * In case the values are not provided, the query will use the default values such value are interpreted in other queries as null. - * + *

* This happens because the query is used in a patch endpoint which allows partial updates, so the query will update only the provided fields. * The remaining fields will be updated/inserted once the POST arrives with the all mandatory fields to create the trace. - * - * */ + */ //TODO: refactor to implement proper conflict resolution private static final String PARTIAL_INSERT = """ INSERT INTO spans( @@ -466,8 +466,8 @@ AND id in ( ; """; - private static final String DELETE_BY_TRACE_ID = """ - DELETE FROM spans WHERE trace_id = :trace_id AND workspace_id = :workspace_id; + private static final String DELETE_BY_TRACE_IDS = """ + DELETE FROM spans WHERE trace_id IN :trace_ids AND workspace_id = :workspace_id; """; private static final String SELECT_SPAN_ID_AND_WORKSPACE = """ @@ -745,15 +745,20 @@ private Publisher getById(UUID id, Connection connection) { @WithSpan public Mono deleteByTraceId(@NonNull UUID traceId, @NonNull Connection connection) { - Statement statement = connection.createStatement(DELETE_BY_TRACE_ID) - .bind("trace_id", traceId); - - Segment segment = startSegment("spans", "Clickhouse", "delete_by_trace_id"); + return deleteByTraceIds(Set.of(traceId), connection); + } + @Trace(dispatcher = true) + public Mono deleteByTraceIds(Set traceIds, @NonNull Connection connection) { + Preconditions.checkArgument( + CollectionUtils.isNotEmpty(traceIds), "Argument 'traceIds' must not be empty"); + log.info("Deleting spans by traceIds, count '{}'", traceIds.size()); + var statement = connection.createStatement(DELETE_BY_TRACE_IDS) + .bind("trace_ids", traceIds); + var segment = startSegment("spans", "Clickhouse", "delete_by_trace_id"); return makeMonoContextAware(bindWorkspaceIdToMono(statement)) .doFinally(signalType -> endSegment(segment)) .then(); - } private Publisher mapToDto(Result result) { diff --git a/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceDAO.java b/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceDAO.java index 4129de092d..e92a3fc9b4 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceDAO.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceDAO.java @@ -19,6 +19,7 @@ import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.reactivestreams.Publisher; import org.stringtemplate.v4.ST; import reactor.core.publisher.Flux; @@ -53,12 +54,13 @@ interface TraceDAO { Mono delete(UUID id, Connection connection); + Mono delete(Set ids, Connection connection); + Mono findById(UUID id, Connection connection); Mono find(int size, int page, TraceSearchCriteria traceSearchCriteria, Connection connection); - Mono partialInsert(UUID projectId, TraceUpdate traceUpdate, UUID traceId, - Connection connection); + Mono partialInsert(UUID projectId, TraceUpdate traceUpdate, UUID traceId, Connection connection); Mono> getTraceWorkspace(Set traceIds, Connection connection); @@ -109,7 +111,7 @@ INSERT INTO traces( * This query handles the insertion of a new trace into the database in two cases: * 1. When the trace does not exist in the database. * 2. When the trace exists in the database but the provided trace has different values for the fields such as end_time, input, output, metadata and tags. - * **/ + **/ //TODO: refactor to implement proper conflict resolution private static final String INSERT = """ INSERT INTO traces( @@ -308,7 +310,7 @@ AND id in ( private static final String DELETE_BY_ID = """ DELETE FROM traces - WHERE id = :id + WHERE id IN :ids AND workspace_id = :workspace_id ; """; @@ -324,15 +326,14 @@ AND id in ( """; /** - * This query is used when updates are processed before inserts, and the trace does not exist in the database. - * - * The query will insert/update a new trace with the provided values such as end_time, input, output, metadata and tags. - * In case the values are not provided, the query will use the default values such value are interpreted in other queries as null. - * - * This happens because the query is used in a patch endpoint which allows partial updates, so the query will update only the provided fields. - * The remaining fields will be updated/inserted once the POST arrives with the all mandatory fields to create the trace. - * - * */ + * This query is used when updates are processed before inserts, and the trace does not exist in the database. + *

+ * The query will insert/update a new trace with the provided values such as end_time, input, output, metadata and tags. + * In case the values are not provided, the query will use the default values such value are interpreted in other queries as null. + *

+ * This happens because the query is used in a patch endpoint which allows partial updates, so the query will update only the provided fields. + * The remaining fields will be updated/inserted once the POST arrives with the all mandatory fields to create the trace. + */ //TODO: refactor to implement proper conflict resolution private static final String INSERT_UPDATE = """ INSERT INTO traces ( @@ -506,8 +507,7 @@ private Mono update(UUID id, TraceUpdate traceUpdate, Connecti .doFinally(signalType -> endSegment(segment)); } - private Statement createUpdateStatement(UUID id, TraceUpdate traceUpdate, Connection connection, - String sql) { + private Statement createUpdateStatement(UUID id, TraceUpdate traceUpdate, Connection connection, String sql) { Statement statement = connection.createStatement(sql); bindUpdateParams(traceUpdate, statement); @@ -567,11 +567,16 @@ private Flux getById(UUID id, Connection connection) { @Override @WithSpan public Mono delete(@NonNull UUID id, @NonNull Connection connection) { - var statement = connection.createStatement(DELETE_BY_ID) - .bind("id", id); - - Segment segment = startSegment("traces", "Clickhouse", "delete"); + return delete(Set.of(id), connection); + } + @Override + public Mono delete(Set ids, @NonNull Connection connection) { + Preconditions.checkArgument(CollectionUtils.isNotEmpty(ids), "Argument 'ids' must not be empty"); + log.info("Deleting traces, count '{}'", ids.size()); + var statement = connection.createStatement(DELETE_BY_ID) + .bind("ids", ids.toArray(UUID[]::new)); + var segment = startSegment("traces", "Clickhouse", "delete"); return makeMonoContextAware(bindWorkspaceIdToMono(statement)) .doFinally(signalType -> endSegment(segment)) .then(); diff --git a/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceService.java b/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceService.java index 083787ef53..6be401e341 100644 --- a/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceService.java +++ b/apps/opik-backend/src/main/java/com/comet/opik/domain/TraceService.java @@ -24,6 +24,7 @@ import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; @@ -51,6 +52,8 @@ public interface TraceService { Mono delete(UUID id); + Mono delete(Set ids); + Mono find(int page, int size, TraceSearchCriteria criteria); Mono validateTraceWorkspace(String workspaceId, Set traceIds); @@ -163,11 +166,10 @@ private Mono getProjectById(TraceUpdate traceUpdate) { } private Mono getOrCreateProject(String projectName) { - return AsyncUtils.makeMonoContextAware((userName, workspaceName, workspaceId) -> { - return Mono.fromCallable(() -> projectService.getOrCreate(workspaceId, projectName, userName)) - .onErrorResume(e -> handleProjectCreationError(e, projectName, workspaceId)) - .subscribeOn(Schedulers.boundedElastic()); - }); + return AsyncUtils.makeMonoContextAware((userName, workspaceName, workspaceId) -> Mono + .fromCallable(() -> projectService.getOrCreate(workspaceId, projectName, userName)) + .onErrorResume(e -> handleProjectCreationError(e, projectName, workspaceId)) + .subscribeOn(Schedulers.boundedElastic())); } private Mono insertTrace(Trace newTrace, Project project, UUID id, Trace existingTrace) { @@ -273,6 +275,7 @@ public Mono get(@NonNull UUID id) { @Override @WithSpan public Mono delete(@NonNull UUID id) { + log.info("Deleting trace by id '{}'", id); return lockService.executeWithLock( new LockService.Lock(id, TRACE_KEY), Mono.defer(() -> template @@ -283,6 +286,19 @@ public Mono delete(@NonNull UUID id) { .then(Mono.defer(() -> template.nonTransaction(connection -> dao.delete(id, connection)))))); } + @Override + @WithSpan + @com.newrelic.api.agent.Trace(dispatcher = true) + public Mono delete(Set ids) { + Preconditions.checkArgument(CollectionUtils.isNotEmpty(ids), "Argument 'ids' must not be empty"); + log.info("Deleting traces, count '{}'", ids.size()); + return template + .nonTransaction(connection -> feedbackScoreDAO.deleteByEntityIds(EntityType.TRACE, ids, connection)) + .then(Mono + .defer(() -> template.nonTransaction(connection -> spanDAO.deleteByTraceIds(ids, connection)))) + .then(Mono.defer(() -> template.nonTransaction(connection -> dao.delete(ids, connection)))); + } + @Override @WithSpan public Mono find(int page, int size, @NonNull TraceSearchCriteria criteria) { diff --git a/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/SpansResourceTest.java b/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/SpansResourceTest.java index 028c1c3c75..ecc0b11279 100644 --- a/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/SpansResourceTest.java +++ b/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/SpansResourceTest.java @@ -3252,7 +3252,7 @@ void batch__whenSendingMultipleSpansWithSameId__thenReturn422() { .request() .header(HttpHeaders.AUTHORIZATION, API_KEY) .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .post(Entity.json(new SpanBatch(expectedSpans1)))) { + .post(Entity.json(SpanBatch.builder().spans(expectedSpans1).build()))) { assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(422); assertThat(actualResponse.hasEntity()).isTrue(); diff --git a/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/TracesResourceTest.java b/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/TracesResourceTest.java index aa5e155592..8bdf9fb926 100644 --- a/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/TracesResourceTest.java +++ b/apps/opik-backend/src/test/java/com/comet/opik/api/resources/v1/priv/TracesResourceTest.java @@ -6,9 +6,12 @@ import com.comet.opik.api.FeedbackScoreBatchItem; import com.comet.opik.api.Project; import com.comet.opik.api.ScoreSource; +import com.comet.opik.api.Span; +import com.comet.opik.api.SpanBatch; import com.comet.opik.api.Trace; import com.comet.opik.api.TraceBatch; import com.comet.opik.api.TraceUpdate; +import com.comet.opik.api.TracesDelete; import com.comet.opik.api.error.ErrorMessage; import com.comet.opik.api.filter.Filter; import com.comet.opik.api.filter.Operator; @@ -24,6 +27,8 @@ import com.comet.opik.api.resources.utils.TestDropwizardAppExtensionUtils; import com.comet.opik.api.resources.utils.TestUtils; import com.comet.opik.api.resources.utils.WireMockUtils; +import com.comet.opik.domain.FeedbackScoreMapper; +import com.comet.opik.domain.SpanType; import com.comet.opik.infrastructure.auth.RequestContext; import com.comet.opik.podam.PodamFactoryUtils; import com.comet.opik.utils.JsonUtils; @@ -94,16 +99,19 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TracesResourceTest { - public static final String URL_PATTERN = "http://.*/v1/private/traces/.{8}-.{4}-.{4}-.{4}-.{12}"; public static final String URL_TEMPLATE = "%s/v1/private/traces"; - public static final String[] IGNORED_FIELDS = {"projectId", "projectName", "id", "createdAt", "lastUpdatedAt", - "createdBy", "lastUpdatedBy"}; - public static final String[] IGNORED_FIELDS_LIST = {"projectId", "projectName", "createdAt", + private static final String URL_TEMPLATE_SPANS = "%s/v1/private/spans"; + private static final String[] IGNORED_FIELDS_TRACES = {"projectId", "projectName", "createdAt", + "lastUpdatedAt", "feedbackScores", "createdBy", "lastUpdatedBy"}; + private static final String[] IGNORED_FIELDS_SPANS = {"projectId", "projectName", "createdAt", "lastUpdatedAt", "feedbackScores", "createdBy", "lastUpdatedBy"}; + private static final String[] IGNORED_FIELDS_SCORES = {"projectId", "projectName", "id", "createdAt", + "lastUpdatedAt", + "createdBy", "lastUpdatedBy"}; private static final String API_KEY = UUID.randomUUID().toString(); - public static final String USER = UUID.randomUUID().toString(); - public static final String WORKSPACE_ID = UUID.randomUUID().toString(); + private static final String USER = UUID.randomUUID().toString(); + private static final String WORKSPACE_ID = UUID.randomUUID().toString(); private static final String TEST_WORKSPACE = UUID.randomUUID().toString(); private static final RedisContainer REDIS = RedisContainerUtils.newRedisContainer(); @@ -228,8 +236,8 @@ void setUp() { @DisplayName("create trace, when api key is present, then return proper response") void create__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean expected) { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); mockTargetWorkspace(okApikey, workspaceName, workspaceId); @@ -262,8 +270,8 @@ void create__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolea @DisplayName("update trace, when api key is present, then return proper response") void update__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean expected) { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); mockTargetWorkspace(okApikey, workspaceName, workspaceId); @@ -298,7 +306,7 @@ void update__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolea @DisplayName("delete trace, when api key is present, then return proper response") void delete__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean expected) { - String workspaceName = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); mockTargetWorkspace(okApikey, workspaceName, WORKSPACE_ID); @@ -341,7 +349,7 @@ void get__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean e .build()) .toList(); - traces.forEach(trace -> TracesResourceTest.this.create(trace, okApikey, workspaceName)); + traces.forEach(trace -> create(trace, okApikey, workspaceName)); try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) .queryParam("project_name", DEFAULT_PROJECT) @@ -369,7 +377,7 @@ void get__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean e @DisplayName("Trace feedback, when api key is present, then return proper response") void feedback__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean expected) { - String workspaceName = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); mockTargetWorkspace(okApikey, workspaceName, WORKSPACE_ID); var trace = factory.manufacturePojo(Trace.class) @@ -406,10 +414,10 @@ void feedback__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, bool @MethodSource("credentials") @DisplayName("delete feedback, when api key is present, then return proper response") void deleteFeedback__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean expected) { - Trace trace = factory.manufacturePojo(Trace.class); + var trace = factory.manufacturePojo(Trace.class); - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); mockTargetWorkspace(okApikey, workspaceName, workspaceId); @@ -442,8 +450,8 @@ void deleteFeedback__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey @DisplayName("Trace feedback batch, when api key is present, then return proper response") void feedbackBatch__whenApiKeyIsPresent__thenReturnProperResponse(String apiKey, boolean expected) { - Trace trace = factory.manufacturePojo(Trace.class); - String workspaceName = UUID.randomUUID().toString(); + var trace = factory.manufacturePojo(Trace.class); + var workspaceName = UUID.randomUUID().toString(); mockTargetWorkspace(okApikey, workspaceName, WORKSPACE_ID); @@ -606,7 +614,7 @@ void delete__whenSessionTokenIsPresent__thenReturnProperResponse(String sessionT void get__whenSessionTokenIsPresent__thenReturnProperResponse(String sessionToken, boolean expected, String workspaceName) { - String projectName = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); mockTargetWorkspace(API_KEY, workspaceName, WORKSPACE_ID); @@ -619,7 +627,7 @@ void get__whenSessionTokenIsPresent__thenReturnProperResponse(String sessionToke .build()) .toList(); - traces.forEach(trace -> TracesResourceTest.this.create(trace, API_KEY, workspaceName)); + traces.forEach(trace -> create(trace, API_KEY, workspaceName)); try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) .queryParam("project_name", projectName) @@ -685,7 +693,7 @@ void feedback__whenSessionTokenIsPresent__thenReturnProperResponse(String sessio @DisplayName("delete feedback, when session token is present, then return proper response") void deleteFeedback__whenSessionTokenIsPresent__thenReturnProperResponse(String sessionToken, boolean expected, String workspaceName) { - Trace trace = factory.manufacturePojo(Trace.class); + var trace = factory.manufacturePojo(Trace.class); mockTargetWorkspace(API_KEY, workspaceName, WORKSPACE_ID); @@ -720,7 +728,7 @@ void deleteFeedback__whenSessionTokenIsPresent__thenReturnProperResponse(String void feedbackBatch__whenSessionTokenIsPresent__thenReturnProperResponse(String sessionToken, boolean expected, String workspaceName) { - Trace trace = factory.manufacturePojo(Trace.class); + var trace = factory.manufacturePojo(Trace.class); mockTargetWorkspace(API_KEY, workspaceName, WORKSPACE_ID); @@ -790,9 +798,9 @@ void getByProjectName__whenProjectNameAndIdAreNull__thenReturnBadRequest() { @DisplayName("when project name is not empty, then return traces by project name") void getByProjectName__whenProjectNameIsNotEmpty__thenReturnTracesByProjectName() { - String projectName = UUID.randomUUID().toString(); - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -832,9 +840,9 @@ void getByProjectName__whenProjectNameIsNotEmpty__thenReturnTracesByProjectName( @DisplayName("when project id is not empty, then return traces by project id") void getByProjectName__whenProjectIdIsNotEmpty__thenReturnTracesByProjectId() { - String workspaceName = UUID.randomUUID().toString(); - String projectName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -908,8 +916,8 @@ void getByProjectName__whenFilterWorkspaceName__thenReturnTracesFiltered() { .build()) .toList(); - traces1.forEach(trace -> TracesResourceTest.this.create(trace, apiKey1, workspaceName1)); - traces2.forEach(trace -> TracesResourceTest.this.create(trace, apiKey2, workspaceName2)); + traces1.forEach(trace -> create(trace, apiKey1, workspaceName1)); + traces2.forEach(trace -> create(trace, apiKey2, workspaceName2)); getAndAssertPage(1, traces2.size() + traces1.size(), projectName1, List.of(), traces1.reversed(), traces2.reversed(), workspaceName1, apiKey1); @@ -920,8 +928,8 @@ void getByProjectName__whenFilterWorkspaceName__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterIdAndNameEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -935,12 +943,12 @@ void getByProjectName__whenFilterIdAndNameEqual__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of( TraceFilter.builder() @@ -958,8 +966,8 @@ void getByProjectName__whenFilterIdAndNameEqual__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterNameEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -973,12 +981,12 @@ void getByProjectName__whenFilterNameEqual__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.NAME) @@ -990,8 +998,8 @@ void getByProjectName__whenFilterNameEqual__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterNameStartsWith__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1005,12 +1013,12 @@ void getByProjectName__whenFilterNameStartsWith__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.NAME) @@ -1022,8 +1030,8 @@ void getByProjectName__whenFilterNameStartsWith__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterNameEndsWith__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1037,12 +1045,12 @@ void getByProjectName__whenFilterNameEndsWith__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.NAME) @@ -1054,8 +1062,8 @@ void getByProjectName__whenFilterNameEndsWith__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterNameContains__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1069,12 +1077,12 @@ void getByProjectName__whenFilterNameContains__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.NAME) @@ -1086,8 +1094,8 @@ void getByProjectName__whenFilterNameContains__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterNameNotContains__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1106,12 +1114,12 @@ void getByProjectName__whenFilterNameNotContains__thenReturnTracesFiltered() { traces.set(0, traces.getFirst().toBuilder() .name(generator.generate().toString()) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.NAME) @@ -1123,8 +1131,8 @@ void getByProjectName__whenFilterNameNotContains__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterStartTimeEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1138,12 +1146,12 @@ void getByProjectName__whenFilterStartTimeEqual__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.START_TIME) @@ -1155,8 +1163,8 @@ void getByProjectName__whenFilterStartTimeEqual__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterStartTimeGreaterThan__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1174,12 +1182,12 @@ void getByProjectName__whenFilterStartTimeGreaterThan__thenReturnTracesFiltered( traces.set(0, traces.getFirst().toBuilder() .startTime(Instant.now().plusSeconds(60 * 5)) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.START_TIME) @@ -1191,8 +1199,8 @@ void getByProjectName__whenFilterStartTimeGreaterThan__thenReturnTracesFiltered( @Test void getByProjectName__whenFilterStartTimeGreaterThanEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1210,12 +1218,12 @@ void getByProjectName__whenFilterStartTimeGreaterThanEqual__thenReturnTracesFilt traces.set(0, traces.getFirst().toBuilder() .startTime(Instant.now().plusSeconds(60 * 5)) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.START_TIME) @@ -1227,8 +1235,8 @@ void getByProjectName__whenFilterStartTimeGreaterThanEqual__thenReturnTracesFilt @Test void getByProjectName__whenFilterStartTimeLessThan__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1246,12 +1254,12 @@ void getByProjectName__whenFilterStartTimeLessThan__thenReturnTracesFiltered() { traces.set(0, traces.getFirst().toBuilder() .startTime(Instant.now().minusSeconds(60 * 5)) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.START_TIME) @@ -1263,8 +1271,8 @@ void getByProjectName__whenFilterStartTimeLessThan__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterStartTimeLessThanEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1282,12 +1290,12 @@ void getByProjectName__whenFilterStartTimeLessThanEqual__thenReturnTracesFiltere traces.set(0, traces.getFirst().toBuilder() .startTime(Instant.now().minusSeconds(60 * 5)) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.START_TIME) @@ -1299,8 +1307,8 @@ void getByProjectName__whenFilterStartTimeLessThanEqual__thenReturnTracesFiltere @Test void getByProjectName__whenFilterEndTimeEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1314,12 +1322,12 @@ void getByProjectName__whenFilterEndTimeEqual__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.END_TIME) @@ -1331,8 +1339,8 @@ void getByProjectName__whenFilterEndTimeEqual__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterInputEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1346,12 +1354,12 @@ void getByProjectName__whenFilterInputEqual__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.INPUT) @@ -1363,8 +1371,8 @@ void getByProjectName__whenFilterInputEqual__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterOutputEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1378,12 +1386,12 @@ void getByProjectName__whenFilterOutputEqual__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.OUTPUT) @@ -1395,8 +1403,8 @@ void getByProjectName__whenFilterOutputEqual__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterMetadataEqualString__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1416,12 +1424,12 @@ void getByProjectName__whenFilterMetadataEqualString__thenReturnTracesFiltered() .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":2024,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1434,8 +1442,8 @@ void getByProjectName__whenFilterMetadataEqualString__thenReturnTracesFiltered() @Test void getByProjectName__whenFilterMetadataEqualNumber__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1454,12 +1462,12 @@ void getByProjectName__whenFilterMetadataEqualNumber__thenReturnTracesFiltered() .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":2023,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1472,8 +1480,8 @@ void getByProjectName__whenFilterMetadataEqualNumber__thenReturnTracesFiltered() @Test void getByProjectName__whenFilterMetadataEqualBoolean__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1494,12 +1502,12 @@ void getByProjectName__whenFilterMetadataEqualBoolean__thenReturnTracesFiltered( .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":true,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1512,8 +1520,8 @@ void getByProjectName__whenFilterMetadataEqualBoolean__thenReturnTracesFiltered( @Test void getByProjectName__whenFilterMetadataEqualNull__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1533,12 +1541,12 @@ void getByProjectName__whenFilterMetadataEqualNull__thenReturnTracesFiltered() { .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":null,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1551,8 +1559,8 @@ void getByProjectName__whenFilterMetadataEqualNull__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterMetadataContainsString__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1572,12 +1580,12 @@ void getByProjectName__whenFilterMetadataContainsString__thenReturnTracesFiltere .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":2024,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1590,8 +1598,8 @@ void getByProjectName__whenFilterMetadataContainsString__thenReturnTracesFiltere @Test void getByProjectName__whenFilterMetadataContainsNumber__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1611,12 +1619,12 @@ void getByProjectName__whenFilterMetadataContainsNumber__thenReturnTracesFiltere .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":2023,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1629,8 +1637,8 @@ void getByProjectName__whenFilterMetadataContainsNumber__thenReturnTracesFiltere @Test void getByProjectName__whenFilterMetadataContainsBoolean__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1651,12 +1659,12 @@ void getByProjectName__whenFilterMetadataContainsBoolean__thenReturnTracesFilter .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":true,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1669,8 +1677,8 @@ void getByProjectName__whenFilterMetadataContainsBoolean__thenReturnTracesFilter @Test void getByProjectName__whenFilterMetadataContainsNull__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1690,12 +1698,12 @@ void getByProjectName__whenFilterMetadataContainsNull__thenReturnTracesFiltered( .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":null,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1708,8 +1716,8 @@ void getByProjectName__whenFilterMetadataContainsNull__thenReturnTracesFiltered( @Test void getByProjectName__whenFilterMetadataGreaterThanNumber__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1729,12 +1737,12 @@ void getByProjectName__whenFilterMetadataGreaterThanNumber__thenReturnTracesFilt .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":2024,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1747,8 +1755,8 @@ void getByProjectName__whenFilterMetadataGreaterThanNumber__thenReturnTracesFilt @Test void getByProjectName__whenFilterMetadataGreaterThanString__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1765,12 +1773,12 @@ void getByProjectName__whenFilterMetadataGreaterThanString__thenReturnTracesFilt .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1783,8 +1791,8 @@ void getByProjectName__whenFilterMetadataGreaterThanString__thenReturnTracesFilt @Test void getByProjectName__whenFilterMetadataGreaterThanBoolean__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1801,12 +1809,12 @@ void getByProjectName__whenFilterMetadataGreaterThanBoolean__thenReturnTracesFil .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1819,8 +1827,8 @@ void getByProjectName__whenFilterMetadataGreaterThanBoolean__thenReturnTracesFil @Test void getByProjectName__whenFilterMetadataGreaterThanNull__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1837,12 +1845,12 @@ void getByProjectName__whenFilterMetadataGreaterThanNull__thenReturnTracesFilter .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1855,8 +1863,8 @@ void getByProjectName__whenFilterMetadataGreaterThanNull__thenReturnTracesFilter @Test void getByProjectName__whenFilterMetadataLessThanNumber__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1876,12 +1884,12 @@ void getByProjectName__whenFilterMetadataLessThanNumber__thenReturnTracesFiltere .metadata(JsonUtils.getJsonNodeFromString("{\"model\":[{\"year\":2024,\"version\":\"OpenAI, " + "Chat-GPT 4.0\"}]}")) .build()); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1894,8 +1902,8 @@ void getByProjectName__whenFilterMetadataLessThanNumber__thenReturnTracesFiltere @Test void getByProjectName__whenFilterMetadataLessThanString__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1912,12 +1920,12 @@ void getByProjectName__whenFilterMetadataLessThanString__thenReturnTracesFiltere .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1930,8 +1938,8 @@ void getByProjectName__whenFilterMetadataLessThanString__thenReturnTracesFiltere @Test void getByProjectName__whenFilterMetadataLessThanBoolean__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1948,12 +1956,12 @@ void getByProjectName__whenFilterMetadataLessThanBoolean__thenReturnTracesFilter .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -1966,8 +1974,8 @@ void getByProjectName__whenFilterMetadataLessThanBoolean__thenReturnTracesFilter @Test void getByProjectName__whenFilterMetadataLessThanNull__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -1984,12 +1992,12 @@ void getByProjectName__whenFilterMetadataLessThanNull__thenReturnTracesFiltered( .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.METADATA) @@ -2002,8 +2010,8 @@ void getByProjectName__whenFilterMetadataLessThanNull__thenReturnTracesFiltered( @Test void getByProjectName__whenFilterTagsContains__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2017,12 +2025,12 @@ void getByProjectName__whenFilterTagsContains__thenReturnTracesFiltered() { .feedbackScores(null) .build()) .collect(Collectors.toCollection(ArrayList::new)); - traces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + traces.forEach(trace -> create(trace, apiKey, workspaceName)); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace -> TracesResourceTest.this.create(trace, apiKey, workspaceName)); + unexpectedTraces.forEach(trace -> create(trace, apiKey, workspaceName)); var filters = List.of(TraceFilter.builder() .field(TraceField.TAGS) @@ -2038,8 +2046,8 @@ void getByProjectName__whenFilterTagsContains__thenReturnTracesFiltered() { @Test void getByProjectName__whenFilterFeedbackScoresEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2061,14 +2069,14 @@ void getByProjectName__whenFilterFeedbackScoresEqual__thenReturnTracesFiltered() .feedbackScores( updateFeedbackScore(traces.get(1).feedbackScores(), traces.getFirst().feedbackScores(), 2)) .build()); - traces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + traces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); traces.forEach(trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + unexpectedTraces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); unexpectedTraces.forEach( trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); @@ -2091,8 +2099,8 @@ void getByProjectName__whenFilterFeedbackScoresEqual__thenReturnTracesFiltered() @Test void getByProjectName__whenFilterFeedbackScoresGreaterThan__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2113,14 +2121,14 @@ void getByProjectName__whenFilterFeedbackScoresGreaterThan__thenReturnTracesFilt traces.set(0, traces.getFirst().toBuilder() .feedbackScores(updateFeedbackScore(traces.getFirst().feedbackScores(), 2, 2345.6789)) .build()); - traces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + traces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); traces.forEach(trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + unexpectedTraces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); unexpectedTraces.forEach( trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); @@ -2142,8 +2150,8 @@ void getByProjectName__whenFilterFeedbackScoresGreaterThan__thenReturnTracesFilt @Test void getByProjectName__whenFilterFeedbackScoresGreaterThanEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2164,14 +2172,14 @@ void getByProjectName__whenFilterFeedbackScoresGreaterThanEqual__thenReturnTrace traces.set(0, traces.getFirst().toBuilder() .feedbackScores(updateFeedbackScore(traces.getFirst().feedbackScores(), 2, 2345.6789)) .build()); - traces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + traces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); traces.forEach(trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + unexpectedTraces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); unexpectedTraces.forEach( trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); @@ -2188,8 +2196,8 @@ void getByProjectName__whenFilterFeedbackScoresGreaterThanEqual__thenReturnTrace @Test void getByProjectName__whenFilterFeedbackScoresLessThan__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2210,14 +2218,14 @@ void getByProjectName__whenFilterFeedbackScoresLessThan__thenReturnTracesFiltere traces.set(0, traces.getFirst().toBuilder() .feedbackScores(updateFeedbackScore(traces.getFirst().feedbackScores(), 2, 1234.5678)) .build()); - traces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + traces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); traces.forEach(trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + unexpectedTraces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); unexpectedTraces.forEach( trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); @@ -2235,8 +2243,8 @@ void getByProjectName__whenFilterFeedbackScoresLessThan__thenReturnTracesFiltere @Test void getByProjectName__whenFilterFeedbackScoresLessThanEqual__thenReturnTracesFiltered() { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2257,14 +2265,14 @@ void getByProjectName__whenFilterFeedbackScoresLessThanEqual__thenReturnTracesFi traces.set(0, traces.getFirst().toBuilder() .feedbackScores(updateFeedbackScore(traces.getFirst().feedbackScores(), 2, 1234.5678)) .build()); - traces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + traces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); traces.forEach(trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); var expectedTraces = List.of(traces.getFirst()); var unexpectedTraces = List.of(factory.manufacturePojo(Trace.class).toBuilder() .projectId(null) .build()); - unexpectedTraces.forEach(trace1 -> TracesResourceTest.this.create(trace1, apiKey, workspaceName)); + unexpectedTraces.forEach(trace1 -> create(trace1, apiKey, workspaceName)); unexpectedTraces.forEach( trace -> trace.feedbackScores() .forEach(feedbackScore -> create(trace.id(), feedbackScore, workspaceName, apiKey))); @@ -2621,8 +2629,8 @@ static Stream getByProjectName__whenFilterInvalidValueOrKeyForFieldType_ @ParameterizedTest @MethodSource void getByProjectName__whenFilterInvalidValueOrKeyForFieldType__thenReturn400(Filter filter) { - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -2651,7 +2659,6 @@ void getByProjectName__whenFilterInvalidValueOrKeyForFieldType__thenReturn400(Fi assertThat(actualError).isEqualTo(expectedError); } } - private void getAndAssertPage(String workspaceName, String projectName, List filters, List traces, List expectedTraces, List unexpectedTraces, String apiKey) { @@ -2682,17 +2689,87 @@ private void getAndAssertPage(int page, int size, String projectName, List filters, + List spans, + List expectedSpans, + List unexpectedSpans, String apiKey) { + int page = 1; + int size = spans.size() + expectedSpans.size() + unexpectedSpans.size(); + getAndAssertPageSpans( + workspaceName, + projectName, + null, + null, + null, + filters, + page, + size, + expectedSpans, + expectedSpans.size(), + unexpectedSpans, apiKey); + } + + private void getAndAssertPageSpans( + String workspaceName, + String projectName, + UUID projectId, + UUID traceId, + SpanType type, + List filters, + int page, + int size, + List expectedSpans, + int expectedTotal, + List unexpectedSpans, String apiKey) { + try (var actualResponse = client.target(URL_TEMPLATE_SPANS.formatted(baseURI)) + .queryParam("page", page) + .queryParam("size", size) + .queryParam("project_name", projectName) + .queryParam("project_id", projectId) + .queryParam("trace_id", traceId) + .queryParam("type", type) + .queryParam("filters", toURLEncodedQueryParam(filters)) + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, workspaceName) + .get()) { + var actualPage = actualResponse.readEntity(Span.SpanPage.class); + var actualSpans = actualPage.content(); + + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(200); + + assertThat(actualPage.page()).isEqualTo(page); + assertThat(actualPage.size()).isEqualTo(expectedSpans.size()); + assertThat(actualPage.total()).isEqualTo(expectedTotal); + + assertThat(actualSpans.size()).isEqualTo(expectedSpans.size()); + assertThat(actualSpans) + .usingRecursiveFieldByFieldElementComparatorIgnoringFields(IGNORED_FIELDS_SPANS) + .containsExactlyElementsOf(expectedSpans); + assertIgnoredFieldsSpans(actualSpans, expectedSpans); + + if (!unexpectedSpans.isEmpty()) { + assertThat(actualSpans) + .usingRecursiveFieldByFieldElementComparatorIgnoringFields(IGNORED_FIELDS_SPANS) + .doesNotContainAnyElementsOf(unexpectedSpans); + } + } + } + private String toURLEncodedQueryParam(List filters) { return URLEncoder.encode(JsonUtils.writeValueAsString(filters), StandardCharsets.UTF_8); } @@ -2701,9 +2778,6 @@ private void assertIgnoredFields(List actualTraces, List expectedT for (int i = 0; i < actualTraces.size(); i++) { var actualTrace = actualTraces.get(i); var expectedTrace = expectedTraces.get(i); - var expectedFeedbackScores = expectedTrace.feedbackScores() == null - ? null - : expectedTrace.feedbackScores().reversed(); assertThat(actualTrace.projectId()).isNotNull(); assertThat(actualTrace.projectName()).isNull(); assertThat(actualTrace.createdAt()).isAfter(expectedTrace.createdAt()); @@ -2711,12 +2785,11 @@ private void assertIgnoredFields(List actualTraces, List expectedT assertThat(actualTrace.lastUpdatedBy()).isEqualTo(USER); assertThat(actualTrace.lastUpdatedBy()).isEqualTo(USER); assertThat(actualTrace.feedbackScores()) - .usingRecursiveComparison( - RecursiveComparisonConfiguration.builder() - .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) - .withIgnoredFields(IGNORED_FIELDS) - .build()) - .isEqualTo(expectedFeedbackScores); + .usingRecursiveComparison() + .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) + .ignoringFields(IGNORED_FIELDS_SCORES) + .ignoringCollectionOrder() + .isEqualTo(expectedTrace.feedbackScores()); if (expectedTrace.feedbackScores() != null) { actualTrace.feedbackScores().forEach(feedbackScore -> { @@ -2729,6 +2802,32 @@ private void assertIgnoredFields(List actualTraces, List expectedT } } + private void assertIgnoredFieldsSpans(List actualSpans, List expectedSpans) { + for (int i = 0; i < actualSpans.size(); i++) { + var actualSpan = actualSpans.get(i); + var expectedSpan = expectedSpans.get(i); + assertThat(actualSpan.projectId()).isNotNull(); + assertThat(actualSpan.projectName()).isNull(); + assertThat(actualSpan.createdAt()).isAfter(expectedSpan.createdAt()); + assertThat(actualSpan.lastUpdatedAt()).isAfter(expectedSpan.lastUpdatedAt()); + assertThat(actualSpan.feedbackScores()) + .usingRecursiveComparison() + .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) + .ignoringFields(IGNORED_FIELDS_SCORES) + .ignoringCollectionOrder() + .isEqualTo(expectedSpan.feedbackScores()); + + if (actualSpan.feedbackScores() != null) { + actualSpan.feedbackScores().forEach(feedbackScore -> { + assertThat(feedbackScore.createdAt()).isAfter(expectedSpan.createdAt()); + assertThat(feedbackScore.lastUpdatedAt()).isAfter(expectedSpan.lastUpdatedAt()); + assertThat(feedbackScore.createdBy()).isEqualTo(USER); + assertThat(feedbackScore.lastUpdatedBy()).isEqualTo(USER); + }); + } + } + } + private List updateFeedbackScore(List feedbackScores, int index, double val) { feedbackScores.set(index, feedbackScores.get(index).toBuilder() .value(BigDecimal.valueOf(val)) @@ -2751,8 +2850,8 @@ class GetTrace { @DisplayName("Success") void getTrace() { - String projectName = generator.generate().toString(); - Trace trace = factory.manufacturePojo(Trace.class) + var projectName = generator.generate().toString(); + var trace = factory.manufacturePojo(Trace.class) .toBuilder() .id(null) .name("OpenAPI Trace") @@ -2799,20 +2898,8 @@ void getTrace() { @Test @DisplayName("when trace does not exist, then return not found") void getTrace__whenTraceDoesNotExist__thenReturnNotFound() { - - UUID id = generator.generate(); - - Response actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path(id.toString()) - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .get(); - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(404); - assertThat(actualResponse.hasEntity()).isTrue(); - assertThat(actualResponse.readEntity(ErrorMessage.class).errors()) - .allMatch(error -> Pattern.matches("Trace not found", error)); + var id = generator.generate(); + getAndAssertTraceNotFound(id, API_KEY, TEST_WORKSPACE); } } @@ -2825,8 +2912,14 @@ private UUID create(Trace trace, String apiKey, String workspaceName) { .post(Entity.json(trace))) { assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(201); + assertThat(actualResponse.hasEntity()).isFalse(); - return TestUtils.getIdFromLocation(actualResponse.getLocation()); + var actualId = TestUtils.getIdFromLocation(actualResponse.getLocation()); + + if (trace.id() != null) { + assertThat(actualId).isEqualTo(trace.id()); + } + return actualId; } } @@ -2844,21 +2937,19 @@ private void create(UUID entityId, FeedbackScore score, String workspaceName, St } } - private Trace getAndAssert(Trace trace, UUID id, UUID projectId, Instant initialTime, String apiKey, - String workspaceName) { + private Trace getAndAssert(Trace trace, UUID projectId, String apiKey, String workspaceName) { - var actualResponse = getById(id, workspaceName, apiKey); + var actualResponse = getById(trace.id(), workspaceName, apiKey); var actualEntity = actualResponse.readEntity(Trace.class); assertThat(actualEntity) .usingRecursiveComparison( RecursiveComparisonConfiguration.builder() - .withIgnoredFields(IGNORED_FIELDS_LIST) + .withIgnoredFields(IGNORED_FIELDS_TRACES) .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) .build()) - .isEqualTo(actualEntity); + .isEqualTo(trace); - assertThat(actualEntity.id()).isEqualTo(id); assertThat(actualEntity.name()).isEqualTo(trace.name()); assertThat(actualEntity.projectId()).isEqualTo(projectId); assertThat(actualEntity.input()).isEqualTo(trace.input()); @@ -2868,12 +2959,26 @@ private Trace getAndAssert(Trace trace, UUID id, UUID projectId, Instant initial assertThat(actualEntity.endTime()).isEqualTo(trace.endTime()); assertThat(actualEntity.startTime()).isEqualTo(trace.startTime()); - assertThat(actualEntity.createdAt()).isBetween(initialTime, Instant.now()); - assertThat(actualEntity.lastUpdatedAt()).isBetween(initialTime, Instant.now()); + assertThat(actualEntity.createdAt()).isAfter(trace.createdAt()); + assertThat(actualEntity.lastUpdatedAt()).isAfter(trace.lastUpdatedAt()); return actualEntity; } + private void getAndAssertTraceNotFound(UUID id, String apiKey, String testWorkspace) { + var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) + .path(id.toString()) + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, testWorkspace) + .get(); + + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(404); + assertThat(actualResponse.hasEntity()).isTrue(); + assertThat(actualResponse.readEntity(ErrorMessage.class).errors()) + .allMatch(error -> Pattern.matches("Trace not found", error)); + } + @Nested @DisplayName("Create:") @TestInstance(TestInstance.Lifecycle.PER_CLASS) @@ -2881,11 +2986,11 @@ class CreateTrace { @Test @DisplayName("Success") - void create() { + void createTrace() { - UUID id = generator.generate(); + var id = generator.generate(); - Trace trace = Trace.builder() + var trace = factory.manufacturePojo(Trace.class).toBuilder() .id(id) .name("OpenAPI traces") .projectName(DEFAULT_PROJECT) @@ -2897,8 +3002,6 @@ void create() { .tags(Set.of("tag1", "tag2")) .build(); - Instant now = Instant.now(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)).request() .accept(MediaType.APPLICATION_JSON_TYPE) .header(HttpHeaders.AUTHORIZATION, API_KEY) @@ -2907,12 +3010,13 @@ void create() { assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(201); assertThat(actualResponse.hasEntity()).isFalse(); - assertThat(actualResponse.getHeaderString("Location")).matches(Pattern.compile(URL_PATTERN)); + var actualId = TestUtils.getIdFromLocation(actualResponse.getLocation()); + assertThat(actualId).isEqualTo(id); } - UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); + var projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); + getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); } @Test @@ -2923,81 +3027,49 @@ void create__whenCreatingTracesWithDifferentWorkspacesNames__thenReturnCreatedTr var trace1 = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(DEFAULT_PROJECT) .build(); var trace2 = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(projectName) .build(); - var createdTrace1 = Instant.now(); - UUID id1 = TracesResourceTest.this.create(trace1, API_KEY, TEST_WORKSPACE); - - var createdTrace2 = Instant.now(); - UUID id2 = TracesResourceTest.this.create(trace2, API_KEY, TEST_WORKSPACE); + create(trace1, API_KEY, TEST_WORKSPACE); + create(trace2, API_KEY, TEST_WORKSPACE); UUID projectId1 = getProjectId(DEFAULT_PROJECT, TEST_WORKSPACE, API_KEY); UUID projectId2 = getProjectId(projectName, TEST_WORKSPACE, API_KEY); - getAndAssert(trace1, id1, projectId1, createdTrace1, API_KEY, TEST_WORKSPACE); - getAndAssert(trace2, id2, projectId2, createdTrace2, API_KEY, TEST_WORKSPACE); + getAndAssert(trace1, projectId1, API_KEY, TEST_WORKSPACE); + getAndAssert(trace2, projectId2, API_KEY, TEST_WORKSPACE); } @Test - @DisplayName("when id comes from client, then accept and use id") - void create__whenIdComesFromClient__thenAcceptAndUseId() { - - var traceId = generator.generate(); - - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)).request() - .accept(MediaType.APPLICATION_JSON_TYPE) - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .post(Entity.json( - Trace.builder() - .id(traceId) - .name("OpenAPI traces") - .projectName(UUID.randomUUID().toString()) - .input(JsonUtils.getJsonNodeFromString("{ \"input\": \"data\"}")) - .output(JsonUtils.getJsonNodeFromString("{ \"output\": \"data\"}")) - .endTime(Instant.now()) - .startTime(Instant.now().minusSeconds(10)) - .metadata(JsonUtils.getJsonNodeFromString("{ \"metadata\": \"data\"}")) - .tags(Set.of("tag1", "tag2")) - .build()))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(201); - - UUID actualId = TestUtils.getIdFromLocation(actualResponse.getLocation()); + void createWithMissingId() { + var trace = factory.manufacturePojo(Trace.class).toBuilder() + .id(null) + .build(); + var id = create(trace, API_KEY, TEST_WORKSPACE); - assertThat(actualId).isEqualTo(traceId); - } + trace = trace.toBuilder().id(id).build(); + var projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); + getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); } @Test @DisplayName("when project doesn't exist, then accept and create project") void create__whenProjectDoesNotExist__thenAcceptAndCreateProject() { - String workspaceName = generator.generate().toString(); - String projectName = generator.generate().toString(); - + var workspaceName = generator.generate().toString(); + var projectName = generator.generate().toString(); + var trace = factory.manufacturePojo(Trace.class).toBuilder() + .projectName(projectName) + .build(); try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)).request() .accept(MediaType.APPLICATION_JSON_TYPE) .header(HttpHeaders.AUTHORIZATION, API_KEY) .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .post(Entity.json( - Trace.builder() - .name("OpenAPI traces") - .projectName(projectName) - .input(JsonUtils.getJsonNodeFromString("{ \"input\": \"data\"}")) - .output(JsonUtils.getJsonNodeFromString("{ \"output\": \"data\"}")) - .endTime(Instant.now()) - .startTime(Instant.now().minusSeconds(10)) - .metadata(JsonUtils.getJsonNodeFromString("{ \"metadata\": \"data\"}")) - .tags(Set.of("tag1", "tag2")) - .build()))) { + .post(Entity.json(trace))) { assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(201); } @@ -3020,15 +3092,9 @@ void create__whenProjectNameIsNull__thenAcceptAndUseDefaultProject() { var id = generator.generate(); - Trace trace = Trace.builder() + var trace = factory.manufacturePojo(Trace.class).toBuilder() .id(id) - .name("OpenAPI traces") - .input(JsonUtils.getJsonNodeFromString("{ \"input\": \"data\"}")) - .output(JsonUtils.getJsonNodeFromString("{ \"output\": \"data\"}")) - .endTime(Instant.now()) - .startTime(Instant.now().minusSeconds(10)) - .metadata(JsonUtils.getJsonNodeFromString("{ \"metadata\": \"data\"}")) - .tags(Set.of("tag1", "tag2")) + .projectName(null) .build(); try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)).request() @@ -3059,9 +3125,9 @@ class BatchInsert { @Test void batch__whenCreateTraces__thenReturnNoContent() { - String projectName = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); - UUID projectId = createProject(projectName, TEST_WORKSPACE, API_KEY); + var projectId = createProject(projectName, TEST_WORKSPACE, API_KEY); var expectedTraces = IntStream.range(0, 1000) .mapToObj(i -> factory.manufacturePojo(Trace.class).toBuilder() @@ -3072,7 +3138,7 @@ void batch__whenCreateTraces__thenReturnNoContent() { .build()) .toList(); - batchCreateAndAssert(expectedTraces, API_KEY, TEST_WORKSPACE); + batchCreateTracesAndAssert(expectedTraces, API_KEY, TEST_WORKSPACE); getAndAssertPage(TEST_WORKSPACE, projectName, List.of(), List.of(), expectedTraces.reversed(), List.of(), API_KEY); @@ -3098,7 +3164,7 @@ void batch__whenSendingMultipleTracesWithSameId__thenReturn422() { .request() .header(HttpHeaders.AUTHORIZATION, API_KEY) .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .post(Entity.json(new TraceBatch(traces)))) { + .post(Entity.json(TraceBatch.builder().traces(traces).build()))) { assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(422); assertThat(actualResponse.hasEntity()).isTrue(); @@ -3117,7 +3183,7 @@ void batch__whenBatchIsInvalid__thenReturn422(List traces, String errorMe .request() .header(HttpHeaders.AUTHORIZATION, API_KEY) .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .post(Entity.json(new TraceBatch(traces)))) { + .post(Entity.json(TraceBatch.builder().traces(traces).build()))) { assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(422); assertThat(actualResponse.hasEntity()).isTrue(); @@ -3154,23 +3220,36 @@ void batch__whenSendingMultipleTracesWithNoId__thenReturnNoContent() { List expectedTraces = List.of(newTrace, expectedTrace); - batchCreateAndAssert(expectedTraces, API_KEY, TEST_WORKSPACE); + batchCreateTracesAndAssert(expectedTraces, API_KEY, TEST_WORKSPACE); } + } - private void batchCreateAndAssert(List traces, String apiKey, String workspaceName) { + private void batchCreateTracesAndAssert(List traces, String apiKey, String workspaceName) { - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("batch") - .request() - .header(HttpHeaders.AUTHORIZATION, apiKey) - .header(WORKSPACE_HEADER, workspaceName) - .post(Entity.json(new TraceBatch(traces)))) { + try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) + .path("batch") + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, workspaceName) + .post(Entity.json(TraceBatch.builder().traces(traces).build()))) { - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); + assertThat(actualResponse.hasEntity()).isFalse(); } + } + + private void batchCreateSpansAndAssert(List expectedSpans, String apiKey, String workspaceName) { + + try (var actualResponse = client.target(URL_TEMPLATE_SPANS.formatted(baseURI)) + .path("batch") + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, workspaceName) + .post(Entity.json(SpanBatch.builder().spans(expectedSpans).build()))) { + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); + assertThat(actualResponse.hasEntity()).isFalse(); + } } @Nested @@ -3181,63 +3260,352 @@ class DeleteTrace { @Test @DisplayName("Success") void delete() { - Trace trace = factory.manufacturePojo(Trace.class) - .toBuilder() - .id(null) - .endTime(null) - .output(null) - .createdAt(null) - .lastUpdatedAt(null) - .metadata(null) - .tags(null) - .build(); + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); - var id = create(trace, API_KEY, TEST_WORKSPACE); + var projectName = RandomStringUtils.randomAlphanumeric(10); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path(id.toString()) - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .delete()) { + var traces = List.of(factory.manufacturePojo(Trace.class).toBuilder() + .projectName(projectName) + .build()); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } - } + var spans = traces.stream() + .flatMap(trace -> PodamFactoryUtils.manufacturePojoList(factory, Span.class).stream() + .map(span -> span.toBuilder() + .projectName(projectName) + .traceId(trace.id()) + .build())) + .toList(); + batchCreateSpansAndAssert(spans, apiKey, workspaceName); - @Test - @DisplayName("when trace does not exist, then return no content") - void delete__whenTraceDoesNotExist__thenReturnNotFound() { + var traceScores = traces.stream() + .flatMap(trace -> trace.feedbackScores().stream() + .map(item -> FeedbackScoreMapper.INSTANCE.toFeedbackScoreBatchItem( + trace.id(), projectName, item))) + .toList(); + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(traceScores).build(), workspaceName, apiKey); - UUID id = generator.generate(); + var spanScores = spans.stream() + .flatMap(span -> span.feedbackScores().stream() + .map(item -> FeedbackScoreMapper.INSTANCE.toFeedbackScoreBatchItem( + span.id(), projectName, item))) + .toList(); + createAndAssertForSpan(FeedbackScoreBatch.builder().scores(spanScores).build(), workspaceName, apiKey); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)).path(id.toString()) - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .delete()) { + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, spans.reversed(), List.of(), apiKey); - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + deleteAndAssert(traces.getFirst().id(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, List.of(), List.of(), apiKey); } - } + @Test + void deleteWithoutSpansScores() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); - @Nested - @DisplayName("Update:") - @TestInstance(TestInstance.Lifecycle.PER_CLASS) - class UpdateTrace { + var projectName = RandomStringUtils.randomAlphanumeric(10); - private Trace trace; - private UUID id; + var traces = List.of(factory.manufacturePojo(Trace.class).toBuilder() + .projectName(projectName) + .build()); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + var spans = traces.stream() + .flatMap(trace -> PodamFactoryUtils.manufacturePojoList(factory, Span.class).stream() + .map(span -> span.toBuilder() + .projectName(projectName) + .traceId(trace.id()) + .feedbackScores(null) + .build())) + .toList(); + batchCreateSpansAndAssert(spans, apiKey, workspaceName); + + var traceScores = traces.stream() + .flatMap(trace -> trace.feedbackScores().stream() + .map(item -> FeedbackScoreMapper.INSTANCE.toFeedbackScoreBatchItem( + trace.id(), projectName, item))) + .toList(); + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(traceScores).build(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, spans.reversed(), List.of(), apiKey); + + deleteAndAssert(traces.getFirst().id(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, List.of(), List.of(), apiKey); + } + + @Test + void deleteWithoutScores() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var projectName = RandomStringUtils.randomAlphanumeric(10); + + var traces = List.of(factory.manufacturePojo(Trace.class).toBuilder() + .projectName(projectName) + .feedbackScores(null) + .build()); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + var spans = traces.stream() + .flatMap(trace -> PodamFactoryUtils.manufacturePojoList(factory, Span.class).stream() + .map(span -> span.toBuilder() + .projectName(projectName) + .traceId(trace.id()) + .feedbackScores(null) + .build())) + .toList(); + batchCreateSpansAndAssert(spans, apiKey, workspaceName); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, spans.reversed(), List.of(), apiKey); + + deleteAndAssert(traces.getFirst().id(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, List.of(), List.of(), apiKey); + } + + @Test + void deleteWithoutSpans() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var projectName = RandomStringUtils.randomAlphanumeric(10); + + var traces = List.of(factory.manufacturePojo(Trace.class).toBuilder() + .projectName(projectName) + .feedbackScores(null) + .build()); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + + deleteAndAssert(traces.getFirst().id(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + } + + @Test + @DisplayName("when trace does not exist, then return no content") + void delete__whenTraceDoesNotExist__thenNoContent() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var id = generator.generate(); + + deleteAndAssert(id, workspaceName, apiKey); + + getAndAssertTraceNotFound(id, apiKey, workspaceName); + } + } + + @Nested + @TestInstance(TestInstance.Lifecycle.PER_CLASS) + class DeleteTraces { + + @Test + void deleteTraces() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var projectName = RandomStringUtils.randomAlphanumeric(10); + + var traces = PodamFactoryUtils.manufacturePojoList(factory, Trace.class).stream() + .map(trace -> trace.toBuilder() + .projectName(projectName) + .build()) + .toList(); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + var spans = traces.stream() + .flatMap(trace -> PodamFactoryUtils.manufacturePojoList(factory, Span.class).stream() + .map(span -> span.toBuilder() + .projectName(projectName) + .traceId(trace.id()) + .build())) + .toList(); + batchCreateSpansAndAssert(spans, apiKey, workspaceName); + + var traceScores = traces.stream() + .flatMap(trace -> trace.feedbackScores().stream() + .map(item -> FeedbackScoreMapper.INSTANCE.toFeedbackScoreBatchItem( + trace.id(), projectName, item))) + .toList(); + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(traceScores).build(), workspaceName, apiKey); + + var spanScores = spans.stream() + .flatMap(span -> span.feedbackScores().stream() + .map(item -> FeedbackScoreMapper.INSTANCE.toFeedbackScoreBatchItem( + span.id(), projectName, item))) + .toList(); + createAndAssertForSpan(FeedbackScoreBatch.builder().scores(spanScores).build(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, spans.reversed(), List.of(), apiKey); + + var request = TracesDelete.builder() + .ids(traces.stream().map(Trace::id).collect(Collectors.toUnmodifiableSet())) + .build(); + deleteAndAssert(request, workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, List.of(), List.of(), apiKey); + } + + @Test + void deleteTracesWithoutSpansScores() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var projectName = RandomStringUtils.randomAlphanumeric(10); + + var traces = PodamFactoryUtils.manufacturePojoList(factory, Trace.class).stream() + .map(trace -> trace.toBuilder() + .projectName(projectName) + .build()) + .toList(); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + var spans = traces.stream() + .flatMap(trace -> PodamFactoryUtils.manufacturePojoList(factory, Span.class).stream() + .map(span -> span.toBuilder() + .projectName(projectName) + .traceId(trace.id()) + .feedbackScores(null) + .build())) + .toList(); + batchCreateSpansAndAssert(spans, apiKey, workspaceName); + + var traceScores = traces.stream() + .flatMap(trace -> trace.feedbackScores().stream() + .map(item -> FeedbackScoreMapper.INSTANCE.toFeedbackScoreBatchItem( + trace.id(), projectName, item))) + .toList(); + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(traceScores).build(), workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, spans.reversed(), List.of(), apiKey); + + var request = TracesDelete.builder() + .ids(traces.stream().map(Trace::id).collect(Collectors.toUnmodifiableSet())) + .build(); + deleteAndAssert(request, workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, List.of(), List.of(), apiKey); + } + + @Test + void deleteTracesWithoutScores() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var projectName = RandomStringUtils.randomAlphanumeric(10); + + var traces = PodamFactoryUtils.manufacturePojoList(factory, Trace.class).stream() + .map(trace -> trace.toBuilder() + .projectName(projectName) + .feedbackScores(null) + .build()) + .toList(); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + var spans = traces.stream() + .flatMap(trace -> PodamFactoryUtils.manufacturePojoList(factory, Span.class).stream() + .map(span -> span.toBuilder() + .projectName(projectName) + .traceId(trace.id()) + .feedbackScores(null) + .build())) + .toList(); + batchCreateSpansAndAssert(spans, apiKey, workspaceName); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, spans.reversed(), List.of(), apiKey); + + var request = TracesDelete.builder() + .ids(traces.stream().map(Trace::id).collect(Collectors.toUnmodifiableSet())) + .build(); + deleteAndAssert(request, workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + getAndAssertPageSpans(workspaceName, projectName, List.of(), spans, List.of(), List.of(), apiKey); + } + + @Test + void deleteTracesWithoutSpans() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var projectName = RandomStringUtils.randomAlphanumeric(10); + + var traces = PodamFactoryUtils.manufacturePojoList(factory, Trace.class).stream() + .map(trace -> trace.toBuilder() + .projectName(projectName) + .feedbackScores(null) + .build()) + .toList(); + batchCreateTracesAndAssert(traces, apiKey, workspaceName); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, traces.reversed(), List.of(), apiKey); + + var request = TracesDelete.builder() + .ids(traces.stream().map(Trace::id).collect(Collectors.toUnmodifiableSet())) + .build(); + deleteAndAssert(request, workspaceName, apiKey); + + getAndAssertPage(workspaceName, projectName, List.of(), traces, List.of(), List.of(), apiKey); + } + + @Test + void deleteTracesWithoutTraces() { + var apiKey = UUID.randomUUID().toString(); + var workspaceName = RandomStringUtils.randomAlphanumeric(10); + var workspaceId = UUID.randomUUID().toString(); + mockTargetWorkspace(apiKey, workspaceName, workspaceId); + + var request = factory.manufacturePojo(TracesDelete.class); + deleteAndAssert(request, workspaceName, apiKey); + } + } + + @Nested + @DisplayName("Update:") + @TestInstance(TestInstance.Lifecycle.PER_CLASS) + class UpdateTrace { + + private Trace trace; + private UUID id; @BeforeEach void setUp() { trace = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .endTime(null) .output(null) .startTime(Instant.now().minusSeconds(10)) @@ -3311,9 +3679,7 @@ void when__traceUpdateAndInsertAreProcessedOutOfOther__thenReturnTrace() { .projectId(null) .build(); - var startCreation = Instant.now(); runPatchAndAssertStatus(id, traceUpdate, API_KEY, TEST_WORKSPACE); - var created = Instant.now(); var newTrace = factory.manufacturePojo(Trace.class).toBuilder() .projectName(traceUpdate.projectName()) @@ -3335,7 +3701,7 @@ void when__traceUpdateAndInsertAreProcessedOutOfOther__thenReturnTrace() { assertThat(actualEntity.name()).isEqualTo(newTrace.name()); assertThat(actualEntity.startTime()).isEqualTo(newTrace.startTime()); - assertThat(actualEntity.createdAt()).isBetween(startCreation, created); + assertThat(actualEntity.createdAt()).isBefore(newTrace.createdAt()); } @Test @@ -3411,8 +3777,7 @@ private void runPatchAndAssertStatus(UUID id, TraceUpdate traceUpdate3, String a @Test @DisplayName("Success") void update() { - - TraceUpdate traceUpdate = TraceUpdate.builder() + var traceUpdate = TraceUpdate.builder() .endTime(Instant.now()) .input(JsonUtils.getJsonNodeFromString("{ \"input\": \"data\"}")) .output(JsonUtils.getJsonNodeFromString("{ \"output\": \"data\"}")) @@ -3438,8 +3803,8 @@ void update() { assertThat(actualEntity.endTime()).isEqualTo(traceUpdate.endTime()); assertThat(actualEntity.startTime()).isEqualTo(trace.startTime()); - assertThat(actualEntity.createdAt().isBefore(traceUpdate.endTime())).isTrue(); - assertThat(actualEntity.lastUpdatedAt().isAfter(traceUpdate.endTime())).isTrue(); + assertThat(actualEntity.createdAt()).isAfter(trace.createdAt()); + assertThat(actualEntity.lastUpdatedAt()).isAfter(traceUpdate.endTime()); } @Test @@ -3515,7 +3880,7 @@ void update__whenTagsIsEmpty__thenAcceptUpdate() { UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - Trace actualTrace = getAndAssert(trace, id, projectId, trace.createdAt().minusMillis(1), API_KEY, + Trace actualTrace = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); assertThat(actualTrace.tags()).isNull(); @@ -3536,8 +3901,8 @@ void update__whenMetadataIsEmpty__thenAcceptUpdate() { UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - Trace actualTrace = getAndAssert(trace.toBuilder().metadata(metadata).build(), id, projectId, - trace.createdAt().minusMillis(1), API_KEY, TEST_WORKSPACE); + Trace actualTrace = getAndAssert(trace.toBuilder().metadata(metadata).build(), projectId, + API_KEY, TEST_WORKSPACE); assertThat(actualTrace.metadata()).isEqualTo(metadata); } @@ -3557,8 +3922,8 @@ void update__whenInputIsEmpty__thenAcceptUpdate() { UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - Trace actualTrace = getAndAssert(trace.toBuilder().input(input).build(), id, projectId, - trace.createdAt().minusMillis(1), API_KEY, TEST_WORKSPACE); + Trace actualTrace = getAndAssert(trace.toBuilder().input(input).build(), projectId, + API_KEY, TEST_WORKSPACE); assertThat(actualTrace.input()).isEqualTo(input); } @@ -3578,8 +3943,8 @@ void update__whenOutputIsEmpty__thenAcceptUpdate() { UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - Trace actualTrace = getAndAssert(trace.toBuilder().output(output).build(), id, projectId, - trace.createdAt().minusMillis(1), API_KEY, TEST_WORKSPACE); + Trace actualTrace = getAndAssert(trace.toBuilder().output(output).build(), projectId, + API_KEY, TEST_WORKSPACE); assertThat(actualTrace.output()).isEqualTo(output); } @@ -3606,7 +3971,7 @@ void update__whenUpdatingUsingProjectId__thenAcceptUpdate() { .tags(traceUpdate.tags()) .build(); - getAndAssert(updatedTrace, id, projectId, trace.createdAt().minusMillis(1), API_KEY, TEST_WORKSPACE); + getAndAssert(updatedTrace, projectId, API_KEY, TEST_WORKSPACE); } } @@ -3623,12 +3988,38 @@ private Response getById(UUID id, String workspaceName, String apiKey) { return response; } + private void deleteAndAssert(UUID id, String workspaceName, String apiKey) { + try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) + .path(id.toString()) + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, workspaceName) + .delete()) { + + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); + assertThat(actualResponse.hasEntity()).isFalse(); + } + } + + private void deleteAndAssert(TracesDelete request, String workspaceName, String apiKey) { + try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) + .path("delete") + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, workspaceName) + .post(Entity.json(request))) { + + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); + assertThat(actualResponse.hasEntity()).isFalse(); + } + } + @Nested @DisplayName("Feedback:") @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TraceFeedback { - public Stream invalidRequestBodyParams() { + Stream invalidRequestBodyParams() { return Stream.of( arguments(factory.manufacturePojo(FeedbackScore.class).toBuilder().name(null).build(), "name must not be blank"), @@ -3650,7 +4041,7 @@ public Stream invalidRequestBodyParams() { @DisplayName("when trace does not exist, then return not found") void feedback__whenTraceDoesNotExist__thenReturnNotFound() { - UUID id = generator.generate(); + var id = generator.generate(); try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) .path(id.toString()) @@ -3694,18 +4085,14 @@ void feedback__whenFeedbackWithoutCategoryNameOrReason__thenReturnNoContent() { var trace = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(DEFAULT_PROJECT) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) .build(); - var now = Instant.now(); var id = create(trace, API_KEY, TEST_WORKSPACE); FeedbackScore score = factory.manufacturePojo(FeedbackScore.class).toBuilder() @@ -3718,7 +4105,7 @@ void feedback__whenFeedbackWithoutCategoryNameOrReason__thenReturnNoContent() { UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - var actualEntity = getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); + var actualEntity = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); assertThat(actualEntity.feedbackScores()).hasSize(1); @@ -3733,18 +4120,14 @@ void feedback__whenFeedbackWithCategoryNameOrReason__thenReturnNoContent() { var trace = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(DEFAULT_PROJECT) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) .build(); - var now = Instant.now(); var id = create(trace, API_KEY, TEST_WORKSPACE); var score = factory.manufacturePojo(FeedbackScore.class).toBuilder() @@ -3755,7 +4138,7 @@ void feedback__whenFeedbackWithCategoryNameOrReason__thenReturnNoContent() { UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - Trace actualEntity = getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); + Trace actualEntity = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); assertThat(actualEntity.feedbackScores()).hasSize(1); FeedbackScore actualScore = actualEntity.feedbackScores().getFirst(); @@ -3766,22 +4149,16 @@ void feedback__whenFeedbackWithCategoryNameOrReason__thenReturnNoContent() { @Test @DisplayName("when overriding feedback value, then return no content") void feedback__whenOverridingFeedbackValue__thenReturnNoContent() { - - String workspaceName = UUID.randomUUID().toString(); var trace = factory.manufacturePojo(Trace.class) .toBuilder() .projectName(DEFAULT_PROJECT) - .id(null) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) .build(); - var now = Instant.now(); var id = create(trace, API_KEY, TEST_WORKSPACE); var score = factory.manufacturePojo(FeedbackScore.class); @@ -3792,7 +4169,7 @@ void feedback__whenOverridingFeedbackValue__thenReturnNoContent() { create(id, newScore, TEST_WORKSPACE, API_KEY); UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - var actualEntity = getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); + var actualEntity = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); assertThat(actualEntity.feedbackScores()).hasSize(1); FeedbackScore actualScore = actualEntity.feedbackScores().getFirst(); @@ -3831,7 +4208,7 @@ void deleteFeedback__whenTraceDoesNotExist__thenReturnNoContent() { @DisplayName("Success") void deleteFeedback() { - Trace trace = factory.manufacturePojo(Trace.class); + var trace = factory.manufacturePojo(Trace.class); var id = create(trace, API_KEY, TEST_WORKSPACE); var score = FeedbackScore.builder() .name("name") @@ -3869,7 +4246,7 @@ void deleteFeedback() { @TestInstance(TestInstance.Lifecycle.PER_CLASS) class BatchTracesFeedback { - public Stream invalidRequestBodyParams() { + Stream invalidRequestBodyParams() { return Stream.of( arguments(FeedbackScoreBatch.builder().build(), "scores must not be null"), arguments(FeedbackScoreBatch.builder().scores(List.of()).build(), @@ -3919,17 +4296,11 @@ public Stream invalidRequestBodyParams() { @Test @DisplayName("Success") void feedback() { - - Instant now = Instant.now(); - var trace = factory.manufacturePojo(Trace.class) .toBuilder() .projectName(DEFAULT_PROJECT) - .id(null) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) @@ -3940,11 +4311,8 @@ void feedback() { var trace2 = factory.manufacturePojo(Trace.class) .toBuilder() .projectName(UUID.randomUUID().toString()) - .id(null) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) @@ -3972,23 +4340,14 @@ void feedback() { .value(factory.manufacturePojo(BigDecimal.class)) .build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json( - new FeedbackScoreBatch(List.of(score, score2, score3))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + var feedbackScoreBatch = FeedbackScoreBatch.builder().scores(List.of(score, score2, score3)).build(); + createAndAssertForTrace(feedbackScoreBatch, TEST_WORKSPACE, API_KEY); UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); UUID projectId2 = getProjectId(trace2.projectName(), TEST_WORKSPACE, API_KEY); - var actualTrace1 = getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); - var actualTrace2 = getAndAssert(trace2, id2, projectId2, now, API_KEY, TEST_WORKSPACE); + var actualTrace1 = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); + var actualTrace2 = getAndAssert(trace2, projectId2, API_KEY, TEST_WORKSPACE); assertThat(actualTrace2.feedbackScores()).hasSize(1); assertThat(actualTrace1.feedbackScores()).hasSize(2); @@ -4000,11 +4359,9 @@ void feedback() { @Test @DisplayName("when workspace is specified, then return no content") void feedback__whenWorkspaceIsSpecified__thenReturnNoContent() { - - Instant now = Instant.now(); - String projectName = UUID.randomUUID().toString(); - String workspaceName = UUID.randomUUID().toString(); - String workspaceId = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); + var workspaceName = UUID.randomUUID().toString(); + var workspaceId = UUID.randomUUID().toString(); String apiKey = UUID.randomUUID().toString(); mockTargetWorkspace(apiKey, workspaceName, workspaceId); @@ -4044,22 +4401,14 @@ void feedback__whenWorkspaceIsSpecified__thenReturnNoContent() { .value(factory.manufacturePojo(BigDecimal.class)) .build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, apiKey) - .header(WORKSPACE_HEADER, workspaceName) - .put(Entity.json(new FeedbackScoreBatch(List.of(score, score2, score3))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + var feedbackScoreBatch = FeedbackScoreBatch.builder().scores(List.of(score, score2, score3)).build(); + createAndAssertForTrace(feedbackScoreBatch, workspaceName, apiKey); UUID projectId = getProjectId(DEFAULT_PROJECT, workspaceName, apiKey); UUID projectId2 = getProjectId(projectName, workspaceName, apiKey); - var actualTrace1 = getAndAssert(expectedTrace1, id, projectId, now, apiKey, workspaceName); - var actualTrace2 = getAndAssert(expectedTrace2, id2, projectId2, now, apiKey, workspaceName); + var actualTrace1 = getAndAssert(expectedTrace1, projectId, apiKey, workspaceName); + var actualTrace2 = getAndAssert(expectedTrace2, projectId2, apiKey, workspaceName); assertThat(actualTrace2.feedbackScores()).hasSize(1); assertThat(actualTrace1.feedbackScores()).hasSize(2); @@ -4092,18 +4441,14 @@ void feedback__whenFeedbackWithoutCategoryNameOrReason__thenReturnNoContent() { var trace = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(DEFAULT_PROJECT) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) .build(); - var now = Instant.now(); var id = create(trace, API_KEY, TEST_WORKSPACE); var score = factory.manufacturePojo(FeedbackScoreBatchItem.class).toBuilder() @@ -4114,20 +4459,12 @@ void feedback__whenFeedbackWithoutCategoryNameOrReason__thenReturnNoContent() { .reason(null) .build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json(new FeedbackScoreBatch(List.of(score))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(List.of(score)).build(), TEST_WORKSPACE, + API_KEY); UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - var actualEntity = getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); + var actualEntity = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); assertThat(actualEntity.feedbackScores()).hasSize(1); @@ -4140,22 +4477,18 @@ void feedback__whenFeedbackWithoutCategoryNameOrReason__thenReturnNoContent() { @DisplayName("when feedback with category name or reason, then return no content") void feedback__whenFeedbackWithCategoryNameOrReason__thenReturnNoContent() { - String projectName = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); Trace expectedTrace = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(projectName) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) .build(); - var now = Instant.now(); var id = create(expectedTrace, API_KEY, TEST_WORKSPACE); var score = factory.manufacturePojo(FeedbackScoreBatchItem.class).toBuilder() @@ -4164,19 +4497,11 @@ void feedback__whenFeedbackWithCategoryNameOrReason__thenReturnNoContent() { .value(factory.manufacturePojo(BigDecimal.class)) .build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json(new FeedbackScoreBatch(List.of(score))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(List.of(score)).build(), TEST_WORKSPACE, + API_KEY); - var actualEntity = getAndAssert(expectedTrace, id, - getProjectId(expectedTrace.projectName(), TEST_WORKSPACE, API_KEY), now, API_KEY, + var actualEntity = getAndAssert(expectedTrace, + getProjectId(expectedTrace.projectName(), TEST_WORKSPACE, API_KEY), API_KEY, TEST_WORKSPACE); assertThat(actualEntity.feedbackScores()).hasSize(1); @@ -4189,23 +4514,18 @@ void feedback__whenFeedbackWithCategoryNameOrReason__thenReturnNoContent() { @DisplayName("when overriding feedback value, then return no content") void feedback__whenOverridingFeedbackValue__thenReturnNoContent() { - String projectName = UUID.randomUUID().toString(); + var projectName = UUID.randomUUID().toString(); var trace = factory.manufacturePojo(Trace.class) .toBuilder() - .id(null) .projectName(projectName) .endTime(null) .output(null) - .createdAt(null) - .lastUpdatedAt(null) .metadata(null) .tags(null) .feedbackScores(null) .feedbackScores(null) .build(); - Instant now = Instant.now(); - var id = create(trace, API_KEY, TEST_WORKSPACE); var score = factory.manufacturePojo(FeedbackScoreBatchItem.class).toBuilder() @@ -4213,34 +4533,17 @@ void feedback__whenOverridingFeedbackValue__thenReturnNoContent() { .projectName(trace.projectName()) .build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json(new FeedbackScoreBatch(List.of(score))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(List.of(score)).build(), TEST_WORKSPACE, + API_KEY); FeedbackScoreBatchItem newItem = score.toBuilder().value(factory.manufacturePojo(BigDecimal.class)).build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json( - new FeedbackScoreBatch(List.of(newItem))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(List.of(newItem)).build(), TEST_WORKSPACE, + API_KEY); UUID projectId = getProjectId(trace.projectName(), TEST_WORKSPACE, API_KEY); - var actualEntity = getAndAssert(trace, id, projectId, now, API_KEY, TEST_WORKSPACE); + var actualEntity = getAndAssert(trace, projectId, API_KEY, TEST_WORKSPACE); assertThat(actualEntity.feedbackScores()).hasSize(1); FeedbackScore actualScore = actualEntity.feedbackScores().getFirst(); @@ -4259,17 +4562,8 @@ void feedback__whenTraceDoesNotExist__thenReturnNoContentAndCreateScore() { .projectName(DEFAULT_PROJECT) .build(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json(new FeedbackScoreBatch(List.of(score))))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } - + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(List.of(score)).build(), TEST_WORKSPACE, + API_KEY); } @Test @@ -4327,16 +4621,7 @@ void feedback__whenFeedbackSpanBatchHasMaxSize__thenReturnNoContentAndCreateScor .build()) .toList(); - try (var actualResponse = client.target(URL_TEMPLATE.formatted(baseURI)) - .path("feedback-scores") - .request() - .header(HttpHeaders.AUTHORIZATION, API_KEY) - .header(WORKSPACE_HEADER, TEST_WORKSPACE) - .put(Entity.json(new FeedbackScoreBatch(scores)))) { - - assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); - assertThat(actualResponse.hasEntity()).isFalse(); - } + createAndAssertForTrace(FeedbackScoreBatch.builder().scores(scores).build(), TEST_WORKSPACE, API_KEY); } @Test @@ -4363,11 +4648,32 @@ void feedback__whenFeedbackTraceIdIsNotValid__thenReturn400() { } } + private void createAndAssertForTrace(FeedbackScoreBatch request, String workspaceName, String apiKey) { + createAndAssert(URL_TEMPLATE.formatted(baseURI), request, workspaceName, apiKey); + } + + private void createAndAssertForSpan(FeedbackScoreBatch request, String workspaceName, String apiKey) { + createAndAssert(URL_TEMPLATE_SPANS.formatted(baseURI), request, workspaceName, apiKey); + } + + private void createAndAssert(String path, FeedbackScoreBatch request, String workspaceName, String apiKey) { + try (var actualResponse = client.target(path) + .path("feedback-scores") + .request() + .header(HttpHeaders.AUTHORIZATION, apiKey) + .header(WORKSPACE_HEADER, workspaceName) + .put(Entity.json(request))) { + + assertThat(actualResponse.getStatusInfo().getStatusCode()).isEqualTo(204); + assertThat(actualResponse.hasEntity()).isFalse(); + } + } + private void assertEqualsForScores(FeedbackScore actualScore, FeedbackScore expectedScore) { assertThat(actualScore) .usingRecursiveComparison() .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) - .ignoringFields(IGNORED_FIELDS) + .ignoringFields(IGNORED_FIELDS_SCORES) .isEqualTo(expectedScore); } @@ -4375,7 +4681,7 @@ private void assertEqualsForScores(FeedbackScore actualScore, FeedbackScoreBatch assertThat(actualScore) .usingRecursiveComparison() .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) - .ignoringFields(IGNORED_FIELDS) + .ignoringFields(IGNORED_FIELDS_SCORES) .isEqualTo(expectedScore); } @@ -4383,11 +4689,10 @@ private void assertEqualsForScores(List expected, List actual) { assertThat(actual) .usingRecursiveComparison( RecursiveComparisonConfiguration.builder() - .withIgnoredFields(IGNORED_FIELDS) + .withIgnoredFields(IGNORED_FIELDS_SCORES) .withComparatorForType(BigDecimal::compareTo, BigDecimal.class) .build()) .ignoringCollectionOrder() .isEqualTo(expected); } - } diff --git a/apps/opik-backend/src/test/java/com/comet/opik/podam/BigDecimalStrategy.java b/apps/opik-backend/src/test/java/com/comet/opik/podam/BigDecimalStrategy.java new file mode 100644 index 0000000000..11e1894614 --- /dev/null +++ b/apps/opik-backend/src/test/java/com/comet/opik/podam/BigDecimalStrategy.java @@ -0,0 +1,34 @@ +package com.comet.opik.podam; + +import com.comet.opik.utils.ValidationUtils; +import jakarta.validation.constraints.DecimalMax; +import jakarta.validation.constraints.DecimalMin; +import uk.co.jemos.podam.api.PodamUtils; +import uk.co.jemos.podam.common.AttributeStrategy; +import uk.co.jemos.podam.common.BeanValidationStrategy; + +import java.lang.annotation.Annotation; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.util.List; + +public class BigDecimalStrategy implements AttributeStrategy { + + public static final BigDecimalStrategy INSTANCE = new BigDecimalStrategy(); + + @Override + public BigDecimal getValue(Class attrType, List annotations) { + var min = ValidationUtils.MIN_FEEDBACK_SCORE_VALUE; + var decimalMin = BeanValidationStrategy.findTypeFromList(annotations, DecimalMin.class); + if (null != decimalMin) { + min = decimalMin.value(); + } + var max = ValidationUtils.MAX_FEEDBACK_SCORE_VALUE; + var decimalMax = BeanValidationStrategy.findTypeFromList(annotations, DecimalMax.class); + if (null != decimalMax) { + max = decimalMax.value(); + } + var value = PodamUtils.getDoubleInRange(Double.parseDouble(min), Double.parseDouble(max)); + return new BigDecimal(value).setScale(ValidationUtils.SCALE, RoundingMode.HALF_EVEN); + } +} diff --git a/apps/opik-backend/src/test/java/com/comet/opik/podam/PodamFactoryUtils.java b/apps/opik-backend/src/test/java/com/comet/opik/podam/PodamFactoryUtils.java index d7cecf72d1..333da413ed 100644 --- a/apps/opik-backend/src/test/java/com/comet/opik/podam/PodamFactoryUtils.java +++ b/apps/opik-backend/src/test/java/com/comet/opik/podam/PodamFactoryUtils.java @@ -8,6 +8,8 @@ import com.comet.opik.podam.manufacturer.NumericalFeedbackDetailTypeManufacturer; import com.comet.opik.podam.manufacturer.UUIDTypeManufacturer; import com.fasterxml.jackson.databind.JsonNode; +import jakarta.validation.constraints.DecimalMax; +import jakarta.validation.constraints.DecimalMin; import jakarta.validation.constraints.Pattern; import uk.co.jemos.podam.api.PodamFactory; import uk.co.jemos.podam.api.PodamFactoryImpl; @@ -26,9 +28,11 @@ public class PodamFactoryUtils { public static PodamFactory newPodamFactory() { var podamFactory = new PodamFactoryImpl(); var strategy = ((RandomDataProviderStrategy) podamFactory.getStrategy()); + strategy.addOrReplaceAttributeStrategy(Pattern.class, PatternStrategy.INSTANCE); + strategy.addOrReplaceAttributeStrategy(DecimalMax.class, BigDecimalStrategy.INSTANCE); + strategy.addOrReplaceAttributeStrategy(DecimalMin.class, BigDecimalStrategy.INSTANCE); strategy.addOrReplaceTypeManufacturer(BigDecimal.class, BigDecimalTypeManufacturer.INSTANCE); strategy.addOrReplaceTypeManufacturer(UUID.class, UUIDTypeManufacturer.INSTANCE); - strategy.addOrReplaceAttributeStrategy(Pattern.class, PatternStrategy.INSTANCE); strategy.addOrReplaceTypeManufacturer( NumericalFeedbackDefinition.NumericalFeedbackDetail.class, new NumericalFeedbackDetailTypeManufacturer()); diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb index 9982ed2ade..89903f3dd4 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb @@ -17,7 +17,7 @@ "\n", "[Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key.\n", "\n", - "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information." + "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information." ] }, { @@ -97,10 +97,11 @@ "outputs": [], "source": [ "# Create dataset\n", - "from opik import Opik, DatasetItem\n", + "import opik\n", + "from opik import DatasetItem\n", "import pandas as pd\n", "\n", - "client = Opik()\n", + "client = opik.Opik()\n", "\n", "try:\n", " # Create dataset\n", @@ -124,8 +125,8 @@ " \n", " dataset.insert(dataset_records)\n", "\n", - "except Exception as e:\n", - " print(e)" + "except opik.rest_api.core.ApiError as e:\n", + " print(\"Dataset already exists\")" ] }, { @@ -151,6 +152,7 @@ "from opik.evaluation.metrics import Hallucination, Equals\n", "from opik.evaluation import evaluate\n", "from opik import Opik, DatasetItem\n", + "from opik.evaluation.metrics.llm_judges.hallucination.template import generate_query\n", "\n", "# Define the evaluation task\n", "def evaluation_task(x: DatasetItem):\n", @@ -181,11 +183,17 @@ "# Define the scoring metric\n", "check_hallucinated_metric = Equals(name=\"Correct hallucination score\")\n", "\n", + "# Add the prompt template as an experiment configuration\n", + "experiment_config = {\n", + " \"prompt_template\": generate_query(input=\"{input}\",context=\"{context}\",output=\"{output}\",few_shot_examples=[])\n", + "}\n", + "\n", "res = evaluate(\n", " experiment_name=\"Evaluate Opik hallucination metric\",\n", " dataset=dataset,\n", " task=evaluation_task,\n", - " scoring_metrics=[check_hallucinated_metric]\n", + " scoring_metrics=[check_hallucinated_metric],\n", + " experiment_config=experiment_config\n", ")" ] }, @@ -197,11 +205,6 @@ "\n", "![Hallucination Evaluation](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/hallucination_metric_cookbook.png)" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] } ], "metadata": { diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md index 6bd4983301..d6e63d3359 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md @@ -6,7 +6,7 @@ For this guide we will be evaluating the Hallucination metric included in the LL [Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key. -> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information. +> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information. ```python @@ -50,10 +50,11 @@ We will be using the [HaluBench dataset](https://huggingface.co/datasets/Patronu ```python # Create dataset -from opik import Opik, DatasetItem +import opik +from opik import DatasetItem import pandas as pd -client = Opik() +client = opik.Opik() try: # Create dataset @@ -77,8 +78,8 @@ try: dataset.insert(dataset_records) -except Exception as e: - print(e) +except opik.rest_api.core.ApiError as e: + print("Dataset already exists") ``` ## Evaluating the hallucination metric @@ -95,6 +96,7 @@ By defining the evaluation task in this way, we will be able to understand how w from opik.evaluation.metrics import Hallucination, Equals from opik.evaluation import evaluate from opik import Opik, DatasetItem +from opik.evaluation.metrics.llm_judges.hallucination.template import generate_query # Define the evaluation task def evaluation_task(x: DatasetItem): @@ -125,16 +127,20 @@ dataset = client.get_dataset(name="HaluBench") # Define the scoring metric check_hallucinated_metric = Equals(name="Correct hallucination score") +# Add the prompt template as an experiment configuration +experiment_config = { + "prompt_template": generate_query(input="{input}",context="{context}",output="{output}",few_shot_examples=[]) +} + res = evaluate( experiment_name="Evaluate Opik hallucination metric", dataset=dataset, task=evaluation_task, - scoring_metrics=[check_hallucinated_metric] + scoring_metrics=[check_hallucinated_metric], + experiment_config=experiment_config ) ``` We can see that the hallucination metric is able to detect ~80% of the hallucinations contained in the dataset and we can see the specific items where hallucinations were not detected. ![Hallucination Evaluation](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/hallucination_metric_cookbook.png) - - diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb index 2976958537..4c63f1de9b 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb @@ -17,7 +17,7 @@ "\n", "[Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key.\n", "\n", - "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information." + "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information." ] }, { @@ -97,12 +97,13 @@ "outputs": [], "source": [ "# Create dataset\n", - "from opik import Opik, DatasetItem\n", + "import opik\n", + "from opik import DatasetItem\n", "import pandas as pd\n", "import requests\n", "from io import BytesIO\n", "\n", - "client = Opik()\n", + "client = opik.Opik()\n", "try:\n", " # Create dataset\n", " dataset = client.create_dataset(name=\"OpenAIModerationDataset\", description=\"OpenAI Moderation Dataset\")\n", @@ -133,8 +134,8 @@ " \n", " dataset.insert(dataset_records)\n", "\n", - "except Exception as e:\n", - " print(e)" + "except opik.rest_api.core.ApiError as e:\n", + " print(\"Dataset already exists\")" ] }, { @@ -162,7 +163,7 @@ "from opik.evaluation.metrics import Moderation, Equals\n", "from opik.evaluation import evaluate\n", "from opik import Opik, DatasetItem\n", - "\n", + "from opik.evaluation.metrics.llm_judges.moderation.template import generate_query\n", "# Define the evaluation task\n", "def evaluation_task(x: DatasetItem):\n", " metric = Moderation()\n", @@ -193,11 +194,17 @@ "# Define the scoring metric\n", "moderation_metric = Equals(name=\"Correct moderation score\")\n", "\n", + "# Add the prompt template as an experiment configuration\n", + "experiment_config = {\n", + " \"prompt_template\": generate_query(input=\"{input}\",context=\"{context}\",output=\"{output}\",few_shot_examples=[])\n", + "}\n", + "\n", "res = evaluate(\n", " experiment_name=\"Evaluate Opik moderation metric\",\n", " dataset=dataset,\n", " task=evaluation_task,\n", - " scoring_metrics=[moderation_metric]\n", + " scoring_metrics=[moderation_metric],\n", + " experiment_config=experiment_config\n", ")" ] }, diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md index 35efae80a3..495b79e1f6 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md @@ -6,7 +6,7 @@ For this guide we will be evaluating the Moderation metric included in the LLM E [Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key. -> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information. +> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information. ```python @@ -50,12 +50,13 @@ We will be using the [OpenAI Moderation API Release dataset](https://github.com/ ```python # Create dataset -from opik import Opik, DatasetItem +import opik +from opik import DatasetItem import pandas as pd import requests from io import BytesIO -client = Opik() +client = opik.Opik() try: # Create dataset dataset = client.create_dataset(name="OpenAIModerationDataset", description="OpenAI Moderation Dataset") @@ -86,8 +87,8 @@ try: dataset.insert(dataset_records) -except Exception as e: - print(e) +except opik.rest_api.core.ApiError as e: + print("Dataset already exists") ``` ## Evaluating the moderation metric @@ -106,7 +107,7 @@ We can use the Opik SDK to compute a moderation score for each item in the datas from opik.evaluation.metrics import Moderation, Equals from opik.evaluation import evaluate from opik import Opik, DatasetItem - +from opik.evaluation.metrics.llm_judges.moderation.template import generate_query # Define the evaluation task def evaluation_task(x: DatasetItem): metric = Moderation() @@ -137,11 +138,17 @@ dataset = client.get_dataset(name="OpenAIModerationDataset") # Define the scoring metric moderation_metric = Equals(name="Correct moderation score") +# Add the prompt template as an experiment configuration +experiment_config = { + "prompt_template": generate_query(input="{input}",context="{context}",output="{output}",few_shot_examples=[]) +} + res = evaluate( experiment_name="Evaluate Opik moderation metric", dataset=dataset, task=evaluation_task, - scoring_metrics=[moderation_metric] + scoring_metrics=[moderation_metric], + experiment_config=experiment_config ) ``` diff --git a/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb b/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb index ce37e7e378..18690d106b 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb @@ -23,7 +23,7 @@ "\n", "[Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key.\n", "\n", - "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information." + "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information." ] }, { @@ -177,19 +177,19 @@ "outputs": [], "source": [ "# Create the synthetic dataset\n", - "from opik import Opik\n", + "import opik\n", "from opik import DatasetItem\n", "\n", "synthetic_questions = json.loads(completion.choices[0].message.content)[\"result\"]\n", "\n", - "client = Opik()\n", + "client = opik.Opik()\n", "try:\n", " dataset = client.create_dataset(name=\"synthetic_questions\")\n", " dataset.insert([\n", " DatasetItem(input={\"question\": question}) for question in synthetic_questions\n", " ])\n", - "except Exception as e:\n", - " pass" + "except opik.rest_api.core.ApiError as e:\n", + " print(\"Dataset already exists\")" ] }, { diff --git a/apps/opik-documentation/documentation/docs/cookbook/langchain.md b/apps/opik-documentation/documentation/docs/cookbook/langchain.md index 7162456aca..30af79ac90 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/langchain.md +++ b/apps/opik-documentation/documentation/docs/cookbook/langchain.md @@ -12,7 +12,7 @@ We will highlight three different parts of the workflow: [Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key. -> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information. +> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information. ```python @@ -117,19 +117,19 @@ Now that we have our synthetic dataset, we can create a dataset in Comet and ins ```python # Create the synthetic dataset -from opik import Opik +import opik from opik import DatasetItem synthetic_questions = json.loads(completion.choices[0].message.content)["result"] -client = Opik() +client = opik.Opik() try: dataset = client.create_dataset(name="synthetic_questions") dataset.insert([ DatasetItem(input={"question": question}) for question in synthetic_questions ]) -except Exception as e: - pass +except opik.rest_api.core.ApiError as e: + print("Dataset already exists") ``` ## Creating a LangChain chain diff --git a/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb b/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb index 0900a80171..5beba6faa7 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb @@ -25,7 +25,7 @@ "\n", "[Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key.\n", "\n", - "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information." + "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information." ] }, { diff --git a/apps/opik-documentation/documentation/docs/cookbook/llama-index.md b/apps/opik-documentation/documentation/docs/cookbook/llama-index.md index bf1218fd4a..df20f7cb7b 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/llama-index.md +++ b/apps/opik-documentation/documentation/docs/cookbook/llama-index.md @@ -14,7 +14,7 @@ For this guide we will be downloading the essays from Paul Graham and use them a [Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key. -> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information. +> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information. ```python diff --git a/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb b/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb index fb2d27c4c4..0abc5d1c39 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb @@ -17,7 +17,7 @@ "\n", "[Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key.\n", "\n", - "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information." + "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information." ] }, { @@ -126,7 +126,7 @@ "source": [ "The prompt and response messages are automatically logged to Opik and can be viewed in the UI.\n", "\n", - "![OpenAI Integration](/img/cookbook/openai_trace_cookbook.png)" + "![OpenAI Integration](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/openai_trace_cookbook.png)" ] }, { diff --git a/apps/opik-documentation/documentation/docs/cookbook/openai.md b/apps/opik-documentation/documentation/docs/cookbook/openai.md index eaabd50ffe..b9264db5e3 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/openai.md +++ b/apps/opik-documentation/documentation/docs/cookbook/openai.md @@ -7,7 +7,7 @@ Opik integrates with OpenAI to provide a simple way to log traces for all OpenAI [Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key. -> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information. +> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information. ```python @@ -76,7 +76,7 @@ print(completion.choices[0].message.content) The prompt and response messages are automatically logged to Opik and can be viewed in the UI. -![OpenAI Integration](/img/cookbook/openai_trace_cookbook.png) +![OpenAI Integration](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/openai_trace_cookbook.png) ## Using it with the `track` decorator diff --git a/apps/opik-documentation/documentation/docs/cookbook/ragas.ipynb b/apps/opik-documentation/documentation/docs/cookbook/ragas.ipynb index a2e20e60ce..8a135e32d6 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/ragas.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/ragas.ipynb @@ -22,7 +22,7 @@ "\n", "[Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key.\n", "\n", - "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information." + "> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information." ] }, { diff --git a/apps/opik-documentation/documentation/docs/cookbook/ragas.md b/apps/opik-documentation/documentation/docs/cookbook/ragas.md index 3288c74a24..748fd850af 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/ragas.md +++ b/apps/opik-documentation/documentation/docs/cookbook/ragas.md @@ -11,7 +11,7 @@ There are two main ways to use Opik with Ragas: [Comet](https://www.comet.com/site) provides a hosted version of the Opik platform, [simply create an account](https://www.comet.com/signup?from=llm) and grab you API Key. -> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/self_hosting_opik/) for more information. +> You can also run the Opik platform locally, see the [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information. ```python diff --git a/apps/opik-documentation/documentation/docs/evaluation/concepts.md b/apps/opik-documentation/documentation/docs/evaluation/concepts.md new file mode 100644 index 0000000000..cdfe2e1117 --- /dev/null +++ b/apps/opik-documentation/documentation/docs/evaluation/concepts.md @@ -0,0 +1,90 @@ +--- +sidebar_position: 1 +sidebar_label: Concepts +--- + +# Evaluation Concepts + +:::tip +If you want to jump straight to running evaluations, you can head to the [Evaluate your LLM application](/docs/evaluation/evaluate_your_llm.md) section. +::: + +When working with LLM applications, the bottleneck to iterating faster is often the evaluation process. While it is possible to manually review your LLM application's output, this process is slow and not scalable. Instead of manually reviewing your LLM application's output, Opik allows you to automate the evaluation of your LLM application. + +In order to understand how to run evaluations in Opik, it is important to first become familiar with the concepts of: + +1. **Dataset**: A dataset is a collection of samples that your LLM application will be evaluated on. Datasets only store the input and expected outputs for each sample, the output from your LLM application will be computed and scored during the evaluation process. +2. **Experiment**: An experiment is a single evaluation of your LLM application. During an experiment, we process each dataset item, compute the output based on your LLM application and then score the output. + +![Evaluation Concepts](/img/evaluation/evaluation_concepts.png) + +In this section, we will walk through all the concepts associated with Opik's evaluation framework. + +## Datasets + +The first step in automating the evaluation of your LLM application is to create a dataset which is a collection of samples that your LLM application will be evaluated on. Each dataset is made up of Dataset Items which store the input, expected output and other metadata for a single sample. + +Given the importance of datasets in the evaluation process, teams often spend a significant amount of time curating and preparing their datasets. There are three main ways to create a dataset: + +1. **Manually curating examples**: As a first step, you can manually curate a set of examples based on your knowledge of the application you are building. You can also leverage subject matter experts to help in the creation of the dataset. + +2. **Using synthetic data**: If you don't have enough data to create a diverse set of examples, you can turn to synthetic data generation tools to help you create a dataset. The [LangChain cookbook](/docs/cookbook/langchain.md) has a great example of how to use synthetic data generation tools to create a dataset. + +3. **Leveraging production data**: If you application is in production, you can leverage the data that is being generated to augment your dataset. While this is often not the first step in creating a dataset, it can be a great way to to enrich your dataset with real world data. + + If you are using Opik for production monitoring, you can easily add traces to your dataset by selecting them in the UI and selecting `Add to dataset` in the `Actions` dropdown. + + +:::tip +You can learn more about how to manage your datasets in Opik in the [Manage Datasets](/docs/evaluation/manage_datasets.md) section. +::: + +## Experiments + +Experiments are the core building block of the Opik evaluation framework. Each time you run a new evaluation, a new experiment is created. Each experiment is made up of two main components: + +1. **Experiment Configuration**: The configuration object associated with each experiment allows you to track some metadata, often you would use this field to store the prompt template used for a given experiment for example. +2. **Experiment Items**: Experiment items store the input, expected output, actual output and feedback scores for each dataset sample that was processed during an experiment. + +In addition, for each experiment you will be able to see the average scores for each metric. + +### Experiment Configuration + +One of the main advantages of having an automated evaluation framework is the ability to iterate quickly. The main drawback is that it can become difficult to track what has changed between two different iterations of an experiment. + +The experiment configuration object allows you to store some metadata associated with a given experiment. This is useful for tracking things like the prompt template used for a given experiment, the model used, the temperature, etc. + +You can then compare the configuration of two different experiments from the Opik UI to see what has changed. + +![Experiment Configuration](/img/evaluation/compare_experiment_config.png) + +### Experiment Items + +Experiment items store the input, expected output, actual output and feedback scores for each dataset sample that was processed during an experiment. In addition, a trace is associated with each item to allow you to easily understand why a given item scored the way it did. + +![Experiment Items](/img/evaluation/experiment_items.png) + +## Running an evaluation + +When you run an evaluation, you will need to know the following: + +1. Dataset: The dataset you want to run the evaluation on. +2. Evaluation task: This maps the inputs stored in the dataset to the output you would like to score. The evaluation task is typically the LLM application you are building. +3. Metrics: The metrics you would like to use when scoring the outputs of your LLM + +You can then run the evaluation using the `evaluate` function: + +```python +from opik import evaluate + +evaluate( + dataset=dataset, + evaluation_task=evaluation_task, + metrics=metrics, + experiment_config={"prompt_template": "..."}, +) +``` + +:::tip +You can find a full tutorial on defining evaluations in the [Evaluate your LLM application](/docs/evaluation/evaluate_your_llm.md) section. +::: diff --git a/apps/opik-documentation/documentation/docs/evaluation/evaluate_your_llm.md b/apps/opik-documentation/documentation/docs/evaluation/evaluate_your_llm.md index 47a4ee3ca2..75facb7cc6 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/evaluate_your_llm.md +++ b/apps/opik-documentation/documentation/docs/evaluation/evaluate_your_llm.md @@ -7,12 +7,13 @@ sidebar_label: Evaluate your LLM Application Evaluating your LLM application allows you to have confidence in the performance of your LLM application. This evaluation set is often performed both during the development and as part of the testing of an application. -The evaluation is done in three steps: +The evaluation is done in five steps: -1. Define the evaluation task -2. Choose the `Dataset` that you would like to evaluate your application on -3. Choose the metrics that you would like to evaluate your application with -4. Create and run the evaluation experiment. +1. Add tracing to your LLM application +2. Define the evaluation task +3. Choose the `Dataset` that you would like to evaluate your application on +4. Choose the metrics that you would like to evaluate your application with +5. Create and run the evaluation experiment. ## 1. Add tracking to your LLM application @@ -45,7 +46,24 @@ def your_context_retriever(input: str) -> str: We have added here the `track` decorator so that this traces and all it's nested steps are logged to the platform for further analysis. ::: -## 2. Choose the evaluation Dataset +## 2. Define the evaluation task + +Once you have added instrumentation to your LLM application, we can define the evaluation task. The evaluation task takes in as an input a dataset item and needs to return a dictionary with keys that match the parameters expected by the metrics you are using. In this example we can define the evaluation task as follows: + +```python +def evaluation_task(x: DatasetItem): + return { + "input": x.input['user_question'], + "output": your_llm_application(x.input['user_question']), + "context": your_context_retriever(x.input['user_question']) + } +``` + +:::warning +If the dictionary returned does not match with the parameters expected by the metrics, you will get inconsistent evaluation results. +::: + +## 3. Choose the evaluation Dataset In order to create an evaluation experiment, you will need to have a Dataset that includes all your test cases. @@ -73,7 +91,7 @@ dataset.insert([ ]) ``` -## 3. Choose evaluation metrics +## 4. Choose evaluation metrics Comet provides a set of built-in evaluation metrics that you can choose from. These are broken down into two main categories: @@ -86,16 +104,15 @@ In the same evaluation experiment, you can use multiple metrics to evaluate your from opik.evaluation.metrics import Equals, Hallucination equals_metric = Equals() -contains_metric = Hallucination() +hallucination_metric = Hallucination() ``` :::tip Each metric expects the data in a certain format, you will need to ensure that the task you have defined in step 1. returns the data in the correct format. ::: -## 4. Run the evaluation +## 5. Run the evaluation -In order to Now that we have the task we want to evaluate, the dataset to evaluate on, the metrics we want to evalation with, we can run the evaluation: ```python @@ -103,54 +120,63 @@ from opik import Opik, track, DatasetItem from opik.evaluation import evaluate from opik.evaluation.metrics import Equals, Hallucination from opik.integrations.openai import track_openai +import openai # Define the task to evaluate openai_client = track_openai(openai.OpenAI()) +MODEL = "gpt-3.5-turbo" @track def your_llm_application(input: str) -> str: response = openai_client.chat.completions.create( - model="gpt-3.5-turbo", + model=MODEL, messages=[{"role": "user", "content": input}], ) return response.choices[0].message.content +# Define the evaluation task +def evaluation_task(x: DatasetItem): + return { + "input": x.input['user_question'], + "output": your_llm_application(x.input['user_question']), + "context": your_context_retriever(x.input['user_question']) + } @track def your_context_retriever(input: str) -> str: return ["..."] -# Fetch the dataset +# Create a simple dataset client = Opik() -dataset = client.get_dataset(name="your-dataset-name") +try: + dataset = client.create_dataset(name="your-dataset-name") + dataset.insert([ + {"input": {"user_question": "What is the capital of France?"}}, + {"input": {"user_question": "What is the capital of Germany?"}}, + ]) +except: + dataset = client.get_dataset(name="your-dataset-name") # Define the metrics -equals_metric = Equals(search_key="expected_output") hallucination_metric = Hallucination() -# Define and run the evaluation -def evaluation_task(x: DatasetItem): - return { - "input": x.input['user_question'], - "output": your_llm_application(x.input['user_question']), - "context": your_context_retriever(x.input['user_question']) - } - - evaluation = evaluate( experiment_name="My experiment", dataset=dataset, task=evaluation_task, - scoring_metrics=[contains_metric, hallucination_metric], + scoring_metrics=[hallucination_metric], + experiment_config={ + "model": MODEL + } ) ``` :::tip -We will track the traces for all evaluations and will be logged to the `evaluation` project by default. To log it to a specific project, you can pass the `project_name` parameter to the `evaluate` function. +You can use the `experiment_config` parameter to store information about your evaluation task. Typically we see teams store information about the prompt template, the model used and model parameters used to evaluate the application. ::: ## Advanced usage diff --git a/apps/opik-documentation/documentation/docs/evaluation/manage_datasets.md b/apps/opik-documentation/documentation/docs/evaluation/manage_datasets.md index 03d71536b6..db25329f4d 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/manage_datasets.md +++ b/apps/opik-documentation/documentation/docs/evaluation/manage_datasets.md @@ -1,5 +1,5 @@ --- -sidebar_position: 4 +sidebar_position: 2 sidebar_label: Manage Datasets --- @@ -42,13 +42,20 @@ except: # Add dataset items to it dataset.insert([ - DatasetItem(input={"input": "Hello, world!"}, expected_output={"output": "Hello, world!"}), - DatasetItem(input={"input": "What is the capital of France?"}, expected_output={"output": "Paris"}), + DatasetItem(input={"user_question": "Hello, world!"}, expected_output={"assistant_answer": "Hello, world!"}), + DatasetItem(input={"user_question": "What is the capital of France?"}, expected_output={"assistant_answer": "Paris"}), ]) ``` :::tip -Instead of using the `DatasetItem` class, you can also use a dictionary to insert items to a dataset. The dictionary should have the `input` key, `expected_output` and `metadata` are optional. +Instead of using the `DatasetItem` class, you can also use a dictionary to insert items to a dataset. The dictionary should have the `input` key while the `expected_output` and `metadata` are optional: + +```python +dataset.insert([ + {"input": {"user_question": "Hello, world!"}}, + {"input": {"user_question": "What is the capital of France?"}, "expected_output": {"assistant_answer": "Paris"}}, +]) +``` ::: Once the items have been inserted, you can view them them in the Opik UI: diff --git a/apps/opik-documentation/documentation/docs/evaluation/metrics/answer_relevance.md b/apps/opik-documentation/documentation/docs/evaluation/metrics/answer_relevance.md index f63e4d992e..706d32a9e0 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/metrics/answer_relevance.md +++ b/apps/opik-documentation/documentation/docs/evaluation/metrics/answer_relevance.md @@ -20,10 +20,7 @@ metric.score( context=["France is a country in Western Europe. Its capital is Paris, which is known for landmarks like the Eiffel Tower."], ) ``` - -:::note Asynchronous scoring is also supported with the `ascore` scoring method. -::: ## Detecting answer relevance diff --git a/apps/opik-documentation/documentation/docs/evaluation/metrics/context_precision.md b/apps/opik-documentation/documentation/docs/evaluation/metrics/context_precision.md index 0d836155c9..d7c3df6f1c 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/metrics/context_precision.md +++ b/apps/opik-documentation/documentation/docs/evaluation/metrics/context_precision.md @@ -23,10 +23,7 @@ metric.score( context=["France is a country in Western Europe. Its capital is Paris, which is known for landmarks like the Eiffel Tower."], ) ``` - -:::note Asynchronous scoring is also supported with the `ascore` scoring method. -::: ## ContextPrecision Prompt diff --git a/apps/opik-documentation/documentation/docs/evaluation/metrics/context_recall.md b/apps/opik-documentation/documentation/docs/evaluation/metrics/context_recall.md index ed53eae33f..86a3fc903b 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/metrics/context_recall.md +++ b/apps/opik-documentation/documentation/docs/evaluation/metrics/context_recall.md @@ -23,10 +23,7 @@ metric.score( context=["France is a country in Western Europe. Its capital is Paris, which is known for landmarks like the Eiffel Tower."], ) ``` - -:::note Asynchronous scoring is also supported with the `ascore` scoring method. -::: ## ContextRecall Prompt diff --git a/apps/opik-documentation/documentation/docs/evaluation/metrics/hallucination.md b/apps/opik-documentation/documentation/docs/evaluation/metrics/hallucination.md index 6406ff6d97..ebc1a7cbfd 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/metrics/hallucination.md +++ b/apps/opik-documentation/documentation/docs/evaluation/metrics/hallucination.md @@ -22,9 +22,10 @@ metric.score( context=["France is a country in Western Europe. Its capital is Paris, which is known for landmarks like the Eiffel Tower."], ) ``` - -:::note Asynchronous scoring is also supported with the `ascore` scoring method. + +:::tip +The hallucination score is either `0` or `1`. A score of `0` indicates that no hallucinations were detected, a score of `1` indicates that hallucinations were detected. ::: ## Hallucination Prompt diff --git a/apps/opik-documentation/documentation/docs/evaluation/metrics/moderation.md b/apps/opik-documentation/documentation/docs/evaluation/metrics/moderation.md index 1c8509a745..327abc1b7c 100644 --- a/apps/opik-documentation/documentation/docs/evaluation/metrics/moderation.md +++ b/apps/opik-documentation/documentation/docs/evaluation/metrics/moderation.md @@ -21,9 +21,10 @@ metric.score( context=["France is a country in Western Europe. Its capital is Paris, which is known for landmarks like the Eiffel Tower."], ) ``` - -:::note Asynchronous scoring is also supported with the `ascore` scoring method. + +:::tip +The moderation score is a float between `0` and `1`. A score of `0` indicates that the content was deemed safe, a score of `1` indicates that the content was deemed unsafe. ::: ## Moderation Prompt diff --git a/apps/opik-documentation/documentation/docs/home.md b/apps/opik-documentation/documentation/docs/home.md index 5e51227fff..9f1e91c159 100644 --- a/apps/opik-documentation/documentation/docs/home.md +++ b/apps/opik-documentation/documentation/docs/home.md @@ -37,4 +37,4 @@ Evaluating the output of your LLM calls is critical to ensure that your applicat [Comet](https://www.comet.com/site) provides a managed Cloud offering for Opik, simply [create an account](https://www.comet.com/signup?from=llm) to get started. -You can also run Opik locally using our [local installer](/self-host/self_hosting_opik.md#all-in-one-installation). If you are looking for a more production ready deployment, you can also use our [Kubernetes deployment option](/self-host/self_hosting_opik.md#kubernetes-installation). +You can also run Opik locally using our [local installer](/self-host/local_deployment.md). If you are looking for a more production ready deployment, you can also use our [Kubernetes deployment option](/self-host/kubernetes.md). diff --git a/apps/opik-documentation/documentation/docs/quickstart.md b/apps/opik-documentation/documentation/docs/quickstart.md index 270239aaff..6cc3cb5f89 100644 --- a/apps/opik-documentation/documentation/docs/quickstart.md +++ b/apps/opik-documentation/documentation/docs/quickstart.md @@ -9,7 +9,7 @@ This guide helps you integrate the Opik platform with your existing LLM applicat ## Set up -Getting started is as simple as creating an [account on Comet](https://www.comet.com/signup?from=llm) or [self-hosting the platform](/self-host/self_hosting_opik.md). +Getting started is as simple as creating an [account on Comet](https://www.comet.com/signup?from=llm) or [self-hosting the platform](/self-host/overview.md). Once your account is created, you can start logging traces by installing the Opik Python SDK: diff --git a/apps/opik-documentation/documentation/docs/self-host/kubernetes.md b/apps/opik-documentation/documentation/docs/self-host/kubernetes.md new file mode 100644 index 0000000000..de13807b73 --- /dev/null +++ b/apps/opik-documentation/documentation/docs/self-host/kubernetes.md @@ -0,0 +1,52 @@ +--- +sidebar_position: 1 +sidebar_label: Production (Kubernetes) +--- + +# Production ready Kubernetes deployment + +For production deployments, we recommend using our Kubernetes Helm chart. This chart is designed to be highly configurable and has been battle-tested in Comet's managed cloud offering. + + +## Prerequisites +In order to install Opik on a Kubernetes cluster, you will need to have the following tools installed: + +- [Docker](https://www.docker.com/) +- [Helm](https://helm.sh/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [kubectx](https://github.com/ahmetb/kubectx) and [kubens](https://github.com/ahmetb/kubectx) to switch between Kubernetes clusters and namespaces. + +## Installation + +You will then be able to to install Opik using the helm chart defined in the `deployment/helm_chart/opik` directory of the [Opik repository](https://github.com/comet-ml/opik): + +```bash +# Navigate to the directory +cd deployment/helm_chart/opik + +# Define the version of the Opik server you want to install +VERSION=latest + +# Add helm dependencies +helm repo add bitnami https://charts.bitnami.com/bitnami +helm dependency build + +# Install Opik +helm upgrade --install opik -n llm --create-namespace -f values.yaml \ + --set registry=docker.dev.comet.com/comet-ml \ + --set component.backend.image.tag=$VERSION --set component.frontend.image.tag=$VERSION-os \ + --set component.backend.env.ANALYTICS_DB_MIGRATIONS_PASS=opik --set component.backend.env.ANALYTICS_DB_PASS=opik \ + --set component.backend.env.STATE_DB_PASS=opik . +``` + +To access the Opik UI, you will need to port-forward the frontend service: + +```bash +kubectl port-forward -n llm svc/opik-frontend 5173 +``` + +You can now open the Opik UI at `http://localhost:5173/llm`. + +## Configuration + +You can find a full list the configuration options in the [helm chart documentation](https://github.com/comet-ml/opik/tree/main/deployment/helm_chart/opik). diff --git a/apps/opik-documentation/documentation/docs/self-host/local_deployment.md b/apps/opik-documentation/documentation/docs/self-host/local_deployment.md new file mode 100644 index 0000000000..73b678ef47 --- /dev/null +++ b/apps/opik-documentation/documentation/docs/self-host/local_deployment.md @@ -0,0 +1,112 @@ +--- +sidebar_position: 1 +sidebar_label: Local (Docker Compose) +--- + +# Local Deployments using Docker Compose + +To run Opik locally we recommend using [Docker Compose](https://docs.docker.com/compose/). It's easy to setup and allows you to get started in a couple of minutes **but** is not meant for production deployments. If you would like to run Opik in a production environment, we recommend using our [Kubernetes Helm chart](./kubernetes.md). + +Before running the installation, make sure you have Docker and Docker Compose installed: + +- [Docker](https://docs.docker.com/get-docker/) +- [Docker Compose](https://docs.docker.com/compose/install/) + +:::note +If you are using Mac or Windows, both `docker` and `docker compose` are included in the [Docker Desktop](https://docs.docker.com/desktop/) installation. +::: + +## Installation + +To install Opik, you will need to clone the Opik repository and run the `docker-compose.yaml` file: + +```bash +# Clone the Opik repository +git clone https://github.com/comet-ml/opik.git + +# Navigate to the opik/deployment/docker-compose directory +cd opik/deployment/docker-compose + +# Start the Opik platform +docker compose up --detach +``` + +Opik will now be available at `http://localhost:5173`. + +:::tip +You will need to make sure that the Opik Python SDK is configured to point to the Opik server you just started. For this, make sure you set the environment variable `OPIK_BASE_URL` to the URL of the Opik server: + +```bash +export OPIK_BASE_URL=http://localhost:5173/api +``` + +or in python: + +```python +import os + +os.environ["OPIK_BASE_URL"] = "http://localhost:5173/api" +``` +::: + +All the data logged to the Opik platform will be stored in the `~/opik` directory, which means that you can start and stop the Opik platform without losing any data. + +## Starting, stopping and upgrading Opik + +:::note +All the `docker compose` commands should be run from the `opik/deployment/docker-compose` directory. +::: + +The `docker compose up` command can be used to install, start and upgrade Opik: + +```bash +# Start, upgrade or restart the Opik platform +docker compose up --detach +``` + +To stop Opik, you can run: + +```bash +# Stop the Opik platform +docker compose down +``` + +## Removing Opik + +To remove Opik, you will need to remove the Opik containers and volumes: + +```bash +# Remove the Opik containers and volumes +docker compose down --volumes +``` + +:::warning +Removing the volumes will delete all the data stored in the Opik platform and cannot be recovered. We do not recommend this option unless you are sure that you will not need any of the data stored in the Opik platform. +::: + +## Advanced configuration + +### Running a specific version of Opik + +You can run a specific version of Opik by setting the `OPIK_VERSION` environment variable: + +```bash +OPIK_VERSION=latest docker compose up +``` + +### Building the Opik platform from source + +You can also build the Opik platform from source by running the following command: + +```bash +# Clone the Opik repository +git clone https://github.com/comet-ml/opik.git + +# Navigate to the opik directory +cd opik + +# Build the Opik platform from source +docker compose up --build +``` + +This will build the Frontend and Backend Docker images and start the Opik platform. diff --git a/apps/opik-documentation/documentation/docs/self-host/overview.md b/apps/opik-documentation/documentation/docs/self-host/overview.md new file mode 100644 index 0000000000..01aaf34de1 --- /dev/null +++ b/apps/opik-documentation/documentation/docs/self-host/overview.md @@ -0,0 +1,53 @@ +--- +sidebar_position: 1 +sidebar_label: Overview +--- + +# Self-hosting Opik + +You can use Opik through [Comet's Managed Cloud offering](https://comet.com/site) or you can self-host Opik on your own infrastructure. When choosing to self-host Opik, you get access to all Opik features including tracing, evaluation, etc but without user management features. + +If you choose to self-host Opik, you can choose between two deployment options: + +1. [Local installation](./local_deployment.md): Perfect to get started but not production-ready. +2. [Kubernetes installation](./kubernetes.md): Production ready Opik platform that runs on a Kubernetes cluster. + +## Getting started + +If you would like to try out Opik locally, we recommend using our Local installation based on `docker compose`. Assuming you have `git` and `docker` installed, you can get started in a couple of minutes: + +```bash +# Clone the Opik repository +git clone https://github.com/comet-ml/opik.git + +# Run the Opik platform +cd opik/deployment/docker-compose +docker compose up --detach + +``` +``` + +Opik will now be available at `http://localhost:5173` and all traces logged from your local machine will be logged to this local Opik instance. In order for traces and other data to be logged to your Opik instance, you need to make sure that the Opik Python SDK is configured to point to the Opik server you just started. You can do this by running the following command: + +```bash +# Configure the Python SDK to point to the local Opik platform +export OPIK_BASE_URL=http://localhost:5173/api +``` + +or in Python: + +```python +import os + +os.environ["OPIK_BASE_URL"] = "http://localhost:5173/api" +``` + +To learn more about how to manage you local Opik deployment, you can refer to our [local deployment guide](./local_deployment.md). + +## Advanced deployment options + +If you would like to deploy Opik on a Kubernetes cluster, we recommend following our Kubernetes deployment guide [here](./kubernetes.md). + +## Comet managed deployments + +The Opik platform is being developed and maintained by the Comet team. If you are looking for a managed deployment solution, feel free to reach out to the Comet team at sales@comet.com or visit the [Comet website](https://comet.com/site) to learn more. diff --git a/apps/opik-documentation/documentation/docs/self-host/self_hosting_opik.md b/apps/opik-documentation/documentation/docs/self-host/self_hosting_opik.md deleted file mode 100644 index f3c5eb0427..0000000000 --- a/apps/opik-documentation/documentation/docs/self-host/self_hosting_opik.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: Overview ---- - -# Self-host - -You can use Opik through [Comet's Managed Cloud offering](https://comet.com/site) or you can self-host Opik on your own infrastructure. When choosing to self-host Opik, you get access to all Opik features including tracing, evaluation, etc but without user management features. - -If you choose to self-host Opik, you can choose between two deployment options: - -1. All-in-one installation: The Opik platform runs on a single server. -2. Kubernetes installation: The Opik platform runs on a Kubernetes cluster. - -If you are looking at just getting started, we recommend the all-in-one installation. For more advanced use cases, you can choose the Kubernetes installation. - -## All-in-one installation - -The all-in-one installer is the easiest way to get started with Opik. - -### Installation - -To install the Opik server, run the following command: - -```bash -opik-server install -``` - -You can also run the installer in debug mode to see the details of the -installation process: - -```bash -opik-server --debug install -``` - -:::tip -We recommend installing using the `--debug` flag as the installation can take a couple of minutes -::: - -The opik installer has been tested on the following operating systems: - -- Ubuntu 22.04 -- MacOS - -By default, the installer will install the same version of the Opik as its -own version (`opik-server -v`). If you want to install a specific version, you -can specify the version using the `--opik-version` flag: - -```bash -opik-server install --opik-version 0.1.0 -``` - -By default, the installer will setup a local port forward to the Opik server -using the port `5173`. If you want to use a different port, you can specify -the port using the `--local-port` flag: - -```bash -opik-server install --local-port 5174 -``` - -The installation process takes a couple of minutes and when complete, Opik will be available at `http://localhost:5173`. - -### Upgrading the Opik server - -To upgrade the Opik server, run the following command: - -```bash -pip install --upgrade opik-server -opik-server upgrade -``` - -Or upgrade to a specific version: - -```bash -opik-server upgrade --opik-version 0.1.1 -``` - -### Uninstalling the Opik server - -To uninstall the Opik server, you can run the following command: - -```bash -minikube delete -``` - -## Kubernetes installation - -If you are looking for a more customization options, you can choose to install Opik on a Kubernetes cluster. - -In order to install Opik on a Kubernetes cluster, you will need to have the following tools installed: - -- [Docker](https://www.docker.com/) -- [Helm](https://helm.sh/) -- [kubectl](https://kubernetes.io/docs/tasks/tools/) -- [kubectx](https://github.com/ahmetb/kubectx) and [kubens](https://github.com/ahmetb/kubectx) to switch between Kubernetes clusters and namespaces. - -To install Opik, you can use the helm chart defined in the `deployment/helm_chart/opik` directory of the [Opik repository](https://github.com/comet-ml/opik): - -```bash -# Navigate to the directory -cd deployment/helm_chart/opik - -# Define the version of the Opik server you want to install -VERSION=main - -# Add helm dependencies -helm repo add bitnami https://charts.bitnami.com/bitnami -helm dependency build - -# Install Opik -helm upgrade --install opik -n llm --create-namespace -f values.yaml \ - --set registry=docker.dev.comet.com/comet-ml \ - --set component.backend.image.tag=$VERSION --set component.frontend.image.tag=$VERSION-os \ - --set component.backend.env.ANALYTICS_DB_MIGRATIONS_PASS=opik --set component.backend.env.ANALYTICS_DB_PASS=opik \ - --set component.backend.env.STATE_DB_PASS=opik . -``` - -To access the Opik UI, you will need to port-forward the frontend service: - -```bash -kubectl port-forward -n llm svc/opik-frontend 5173 -``` - -You can now open the Opik UI at `http://localhost:5173/llm`. - -### Configuration - -You can find a full list the configuration options in the [helm chart documentation](https://github.com/comet-ml/opik/tree/main/deployment/helm_chart/opik). diff --git a/apps/opik-documentation/documentation/docusaurus.config.ts b/apps/opik-documentation/documentation/docusaurus.config.ts index 5fa11bc1ba..71d2aebee3 100644 --- a/apps/opik-documentation/documentation/docusaurus.config.ts +++ b/apps/opik-documentation/documentation/docusaurus.config.ts @@ -64,6 +64,17 @@ const config: Config = { searchResultLimits: 25, docsRouteBasePath: "/docs/opik" }, + ], + [ + '@docusaurus/plugin-client-redirects', + { + redirects: [ + { + to: '/self-host/overview', + from: ['/self-host/self_hosting_opik'], + }, + ] + }, ] ], @@ -93,6 +104,7 @@ const config: Config = { prism: { theme: prismThemes.github, darkTheme: prismThemes.dracula, + additionalLanguages: ['bash'], }, } satisfies Preset.ThemeConfig, diff --git a/apps/opik-documentation/documentation/package-lock.json b/apps/opik-documentation/documentation/package-lock.json index 8994e1d108..832a1fa383 100644 --- a/apps/opik-documentation/documentation/package-lock.json +++ b/apps/opik-documentation/documentation/package-lock.json @@ -8,8 +8,9 @@ "name": "documentation", "version": "0.0.0", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/preset-classic": "3.4.0", + "@docusaurus/core": "^3.5.2", + "@docusaurus/plugin-client-redirects": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "docusaurus-plugin-sass": "^0.2.5", @@ -20,9 +21,9 @@ "sass": "^1.77.8" }, "devDependencies": { - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/tsconfig": "3.4.0", - "@docusaurus/types": "3.4.0", + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/tsconfig": "^3.5.2", + "@docusaurus/types": "^3.5.2", "concurrently": "^8.2.0", "nodemon": "^2.0.22", "typescript": "~5.2.2" @@ -111,6 +112,27 @@ "@algolia/transporter": "4.24.0" } }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "license": "MIT", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, "node_modules/@algolia/client-analytics": { "version": "4.24.0", "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", @@ -123,7 +145,7 @@ "@algolia/transporter": "4.24.0" } }, - "node_modules/@algolia/client-common": { + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-common": { "version": "4.24.0", "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", @@ -133,6 +155,27 @@ "@algolia/transporter": "4.24.0" } }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.4.1.tgz", + "integrity": "sha512-IffPD+CETiR8YJMVC1lcjnhETLpJ2L0ORZCbbRvwo/S11D1j/keR7AqKVMn4TseRJCfjmBFOcFrC+m4sXjyQWA==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 14.0.0" + } + }, "node_modules/@algolia/client-personalization": { "version": "4.24.0", "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", @@ -144,17 +187,32 @@ "@algolia/transporter": "4.24.0" } }, - "node_modules/@algolia/client-search": { + "node_modules/@algolia/client-personalization/node_modules/@algolia/client-common": { "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", - "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", "license": "MIT", "dependencies": { - "@algolia/client-common": "4.24.0", "@algolia/requester-common": "4.24.0", "@algolia/transporter": "4.24.0" } }, + "node_modules/@algolia/client-search": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.4.1.tgz", + "integrity": "sha512-nCgWY2p0tZgBqJKmA5E6B3VW+7uqxi1Orf88zNWOihJBRFeOV932pzG4vGrX9l0+p0o/vJabYxuomO35rEt5dw==", + "license": "MIT", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.4.1", + "@algolia/requester-browser-xhr": "5.4.1", + "@algolia/requester-fetch": "5.4.1", + "@algolia/requester-node-http": "5.4.1" + }, + "engines": { + "node": ">= 14.0.0" + } + }, "node_modules/@algolia/events": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", @@ -195,7 +253,28 @@ "@algolia/transporter": "4.24.0" } }, - "node_modules/@algolia/requester-browser-xhr": { + "node_modules/@algolia/recommend/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "license": "MIT", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-browser-xhr": { "version": "4.24.0", "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", @@ -204,19 +283,58 @@ "@algolia/requester-common": "4.24.0" } }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "license": "MIT", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.4.1.tgz", + "integrity": "sha512-J6+YfU+maR0nIbsYRHoq0UpneilX97hrZzPuuvSoBojQmPo8PeCXKGeT/F0D8uFI6G4CMTKEPGmQYrC9IpCbcQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.4.1" + }, + "engines": { + "node": ">= 14.0.0" + } + }, "node_modules/@algolia/requester-common": { "version": "4.24.0", "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==", "license": "MIT" }, + "node_modules/@algolia/requester-fetch": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.4.1.tgz", + "integrity": "sha512-AO/C1pqqpIS8p2IsfM5x92S+UBKkcIen5dHfMEh1rnV0ArWDreeqrtxMD2A+6AjQVwYeZNy56w7o7PVIm6mc8g==", + "license": "MIT", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.4.1" + }, + "engines": { + "node": ">= 14.0.0" + } + }, "node_modules/@algolia/requester-node-http": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", - "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.4.1.tgz", + "integrity": "sha512-2Y3vffc91egwFxz0SjXFEH4q8nvlNJHcz+0//NaWItRU68AvD+3aI/j66STPjkLQOC0Ku6ckA9ChhbOVfrv+Uw==", "license": "MIT", + "peer": true, "dependencies": { - "@algolia/requester-common": "4.24.0" + "@algolia/client-common": "5.4.1" + }, + "engines": { + "node": ">= 14.0.0" } }, "node_modules/@algolia/transporter": { @@ -2290,9 +2408,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.4.0.tgz", - "integrity": "sha512-g+0wwmN2UJsBqy2fQRQ6fhXruoEa62JDeEa5d8IdTJlMoaDaEDfHh7WjwGRn4opuTQWpjAwP/fbcgyHKlE+64w==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.5.2.tgz", + "integrity": "sha512-4Z1WkhCSkX4KO0Fw5m/Vuc7Q3NxBG53NE5u59Rs96fWkMPZVSrzEPP16/Nk6cWb/shK7xXPndTmalJtw7twL/w==", "license": "MIT", "dependencies": { "@babel/core": "^7.23.3", @@ -2305,12 +2423,12 @@ "@babel/runtime": "^7.22.6", "@babel/runtime-corejs3": "^7.22.6", "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/cssnano-preset": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "autoprefixer": "^10.4.14", "babel-loader": "^9.1.3", "babel-plugin-dynamic-import-node": "^2.3.3", @@ -2371,14 +2489,15 @@ "node": ">=18.0" }, "peerDependencies": { + "@mdx-js/react": "^3.0.0", "react": "^18.0.0", "react-dom": "^18.0.0" } }, "node_modules/@docusaurus/cssnano-preset": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.4.0.tgz", - "integrity": "sha512-qwLFSz6v/pZHy/UP32IrprmH5ORce86BGtN0eBtG75PpzQJAzp9gefspox+s8IEOr0oZKuQ/nhzZ3xwyc3jYJQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.2.tgz", + "integrity": "sha512-D3KiQXOMA8+O0tqORBrTOEQyQxNIfPm9jEaJoALjjSjc2M/ZAWcUfPQEnwr2JB2TadHw2gqWgpZckQmrVWkytA==", "license": "MIT", "dependencies": { "cssnano-preset-advanced": "^6.1.2", @@ -2391,9 +2510,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.4.0.tgz", - "integrity": "sha512-bZwkX+9SJ8lB9kVRkXw+xvHYSMGG4bpYHKGXeXFvyVc79NMeeBSGgzd4TQLHH+DYeOJoCdl8flrFJVxlZ0wo/Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.5.2.tgz", + "integrity": "sha512-LHC540SGkeLfyT3RHK3gAMK6aS5TRqOD4R72BEU/DE2M/TY8WwEUAMY576UUc/oNJXv8pGhBmQB6N9p3pt8LQw==", "license": "MIT", "dependencies": { "chalk": "^4.1.2", @@ -2404,14 +2523,14 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.4.0.tgz", - "integrity": "sha512-kSSbrrk4nTjf4d+wtBA9H+FGauf2gCax89kV8SUSJu3qaTdSIKdWERlngsiHaCFgZ7laTJ8a67UFf+xlFPtuTw==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.5.2.tgz", + "integrity": "sha512-ku3xO9vZdwpiMIVd8BzWV0DCqGEbCP5zs1iHfKX50vw6jX8vQo0ylYo1YJMZyz6e+JFJ17HYHT5FzVidz2IflA==", "license": "MIT", "dependencies": { - "@docusaurus/logger": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@mdx-js/mdx": "^3.0.0", "@slorber/remark-comment": "^1.0.0", "escape-html": "^1.0.3", @@ -2443,12 +2562,12 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.4.0.tgz", - "integrity": "sha512-A1AyS8WF5Bkjnb8s+guTDuYmUiwJzNrtchebBHpc0gz0PyHJNMaybUlSrmJjHVcGrya0LKI4YcR3lBDQfXRYLw==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.2.tgz", + "integrity": "sha512-Z+Xu3+2rvKef/YKTMxZHsEXp1y92ac0ngjDiExRdqGTmEKtCUpkbNYH8v5eXo5Ls+dnW88n6WTa+Q54kLOkwPg==", "license": "MIT", "dependencies": { - "@docusaurus/types": "3.4.0", + "@docusaurus/types": "3.5.2", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2461,20 +2580,45 @@ "react-dom": "*" } }, + "node_modules/@docusaurus/plugin-client-redirects": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.5.2.tgz", + "integrity": "sha512-GMU0ZNoVG1DEsZlBbwLPdh0iwibrVZiRfmdppvX17SnByCVP74mb/Nne7Ss7ALgxQLtM4IHbXi8ij90VVjAJ+Q==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.4.0.tgz", - "integrity": "sha512-vv6ZAj78ibR5Jh7XBUT4ndIjmlAxkijM3Sx5MAAzC1gyv0vupDQNhzuFg1USQmQVj3P5I6bquk12etPV3LJ+Xw==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", - "cheerio": "^1.0.0-rc.12", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.2.tgz", + "integrity": "sha512-R7ghWnMvjSf+aeNDH0K4fjyQnt5L0KzUEnUhmf1e3jZrv3wogeytZNN6n7X8yHcMsuZHPOrctQhXWnmxu+IRRg==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "cheerio": "1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^11.1.1", "lodash": "^4.17.21", @@ -2489,24 +2633,26 @@ "node": ">=18.0" }, "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", "react": "^18.0.0", "react-dom": "^18.0.0" } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.4.0.tgz", - "integrity": "sha512-HkUCZffhBo7ocYheD9oZvMcDloRnGhBMOZRyVcAQRFmZPmNqSyISlXA1tQCIxW+r478fty97XXAGjNYzBjpCsg==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.2.tgz", + "integrity": "sha512-Bt+OXn/CPtVqM3Di44vHjE7rPCEsRCB/DMo2qoOuozB9f7+lsdrHvD0QCHdBs0uhz6deYJDppAr2VgqybKPlVQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@types/react-router-config": "^5.0.7", "combine-promises": "^1.1.0", "fs-extra": "^11.1.1", @@ -2525,16 +2671,16 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.4.0.tgz", - "integrity": "sha512-h2+VN/0JjpR8fIkDEAoadNjfR3oLzB+v1qSXbIAKjQ46JAHx3X22n9nqS+BWSQnTnp1AjkjSvZyJMekmcwxzxg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.2.tgz", + "integrity": "sha512-WzhHjNpoQAUz/ueO10cnundRz+VUtkjFhhaQ9jApyv1a46FPURO4cef89pyNIOMny1fjDz/NUN2z6Yi+5WUrCw==", "license": "MIT", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "fs-extra": "^11.1.1", "tslib": "^2.6.0", "webpack": "^5.88.1" @@ -2548,14 +2694,14 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.4.0.tgz", - "integrity": "sha512-uV7FDUNXGyDSD3PwUaf5YijX91T5/H9SX4ErEcshzwgzWwBtK37nUWPU3ZLJfeTavX3fycTOqk9TglpOLaWkCg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.5.2.tgz", + "integrity": "sha512-kBK6GlN0itCkrmHuCS6aX1wmoWc5wpd5KJlqQ1FyrF0cLDnvsYSnh7+ftdwzt7G6lGBho8lrVwkkL9/iQvaSOA==", "license": "MIT", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", "fs-extra": "^11.1.1", "react-json-view-lite": "^1.2.0", "tslib": "^2.6.0" @@ -2569,14 +2715,14 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.4.0.tgz", - "integrity": "sha512-mCArluxEGi3cmYHqsgpGGt3IyLCrFBxPsxNZ56Mpur0xSlInnIHoeLDH7FvVVcPJRPSQ9/MfRqLsainRw+BojA==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.2.tgz", + "integrity": "sha512-rjEkJH/tJ8OXRE9bwhV2mb/WP93V441rD6XnM6MIluu7rk8qg38iSxS43ga2V2Q/2ib53PcqbDEJDG/yWQRJhQ==", "license": "MIT", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "tslib": "^2.6.0" }, "engines": { @@ -2588,14 +2734,14 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.4.0.tgz", - "integrity": "sha512-Dsgg6PLAqzZw5wZ4QjUYc8Z2KqJqXxHxq3vIoyoBWiLEEfigIs7wHR+oiWUQy3Zk9MIk6JTYj7tMoQU0Jm3nqA==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.2.tgz", + "integrity": "sha512-lm8XL3xLkTPHFKKjLjEEAHUrW0SZBSHBE1I+i/tmYMBsjCcUB5UJ52geS5PSiOCFVR74tbPGcPHEV/gaaxFeSA==", "license": "MIT", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@types/gtag.js": "^0.0.12", "tslib": "^2.6.0" }, @@ -2608,14 +2754,14 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.4.0.tgz", - "integrity": "sha512-O9tX1BTwxIhgXpOLpFDueYA9DWk69WCbDRrjYoMQtFHSkTyE7RhNgyjSPREUWJb9i+YUg3OrsvrBYRl64FCPCQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.2.tgz", + "integrity": "sha512-QkpX68PMOMu10Mvgvr5CfZAzZQFx8WLlOiUQ/Qmmcl6mjGK6H21WLT5x7xDmcpCoKA/3CegsqIqBR+nA137lQg==", "license": "MIT", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "tslib": "^2.6.0" }, "engines": { @@ -2627,17 +2773,17 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.4.0.tgz", - "integrity": "sha512-+0VDvx9SmNrFNgwPoeoCha+tRoAjopwT0+pYO1xAbyLcewXSemq+eLxEa46Q1/aoOaJQ0qqHELuQM7iS2gp33Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.2.tgz", + "integrity": "sha512-DnlqYyRAdQ4NHY28TfHuVk414ft2uruP4QWCH//jzpHjqvKyXjj2fmDtI8RPUBh9K8iZKFMHRnLtzJKySPWvFA==", "license": "MIT", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "fs-extra": "^11.1.1", "sitemap": "^7.1.1", "tslib": "^2.6.0" @@ -2651,24 +2797,24 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.4.0.tgz", - "integrity": "sha512-Ohj6KB7siKqZaQhNJVMBBUzT3Nnp6eTKqO+FXO3qu/n1hJl3YLwVKTWBg28LF7MWrKu46UuYavwMRxud0VyqHg==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/plugin-content-blog": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/plugin-content-pages": "3.4.0", - "@docusaurus/plugin-debug": "3.4.0", - "@docusaurus/plugin-google-analytics": "3.4.0", - "@docusaurus/plugin-google-gtag": "3.4.0", - "@docusaurus/plugin-google-tag-manager": "3.4.0", - "@docusaurus/plugin-sitemap": "3.4.0", - "@docusaurus/theme-classic": "3.4.0", - "@docusaurus/theme-common": "3.4.0", - "@docusaurus/theme-search-algolia": "3.4.0", - "@docusaurus/types": "3.4.0" + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.5.2.tgz", + "integrity": "sha512-3ihfXQ95aOHiLB5uCu+9PRy2gZCeSZoDcqpnDvf3B+sTrMvMTr8qRUzBvWkoIqc82yG5prCboRjk1SVILKx6sg==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/plugin-debug": "3.5.2", + "@docusaurus/plugin-google-analytics": "3.5.2", + "@docusaurus/plugin-google-gtag": "3.5.2", + "@docusaurus/plugin-google-tag-manager": "3.5.2", + "@docusaurus/plugin-sitemap": "3.5.2", + "@docusaurus/theme-classic": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-search-algolia": "3.5.2", + "@docusaurus/types": "3.5.2" }, "engines": { "node": ">=18.0" @@ -2679,27 +2825,27 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.4.0.tgz", - "integrity": "sha512-0IPtmxsBYv2adr1GnZRdMkEQt1YW6tpzrUPj02YxNpvJ5+ju4E13J5tB4nfdaen/tfR1hmpSPlTFPvTf4kwy8Q==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/plugin-content-blog": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/plugin-content-pages": "3.4.0", - "@docusaurus/theme-common": "3.4.0", - "@docusaurus/theme-translations": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.5.2.tgz", + "integrity": "sha512-XRpinSix3NBv95Rk7xeMF9k4safMkwnpSgThn0UNQNumKvmcIYjfkwfh2BhwYh/BxMXQHJ/PdmNh22TQFpIaYg==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "copy-text-to-clipboard": "^3.2.0", - "infima": "0.2.0-alpha.43", + "infima": "0.2.0-alpha.44", "lodash": "^4.17.21", "nprogress": "^0.2.0", "postcss": "^8.4.26", @@ -2719,18 +2865,15 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.4.0.tgz", - "integrity": "sha512-0A27alXuv7ZdCg28oPE8nH/Iz73/IUejVaCazqu9elS4ypjiLhK3KfzdSQBnL/g7YfHSlymZKdiOHEo8fJ0qMA==", - "license": "MIT", - "dependencies": { - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/plugin-content-blog": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/plugin-content-pages": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.5.2.tgz", + "integrity": "sha512-QXqlm9S6x9Ibwjs7I2yEDgsCocp708DrCrgHgKwg2n2AY0YQ6IjU0gAK35lHRLOvAoJUfCKpQAwUykB0R7+Eew==", + "license": "MIT", + "dependencies": { + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2744,24 +2887,25 @@ "node": ">=18.0" }, "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", "react": "^18.0.0", "react-dom": "^18.0.0" } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.4.0.tgz", - "integrity": "sha512-aiHFx7OCw4Wck1z6IoShVdUWIjntC8FHCw9c5dR8r3q4Ynh+zkS8y2eFFunN/DL6RXPzpnvKCg3vhLQYJDmT9Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.2.tgz", + "integrity": "sha512-qW53kp3VzMnEqZGjakaV90sst3iN1o32PH+nawv1uepROO8aEGxptcq2R5rsv7aBShSRbZwIobdvSYKsZ5pqvA==", "license": "MIT", "dependencies": { "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/theme-common": "3.4.0", - "@docusaurus/theme-translations": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "algoliasearch": "^4.18.0", "algoliasearch-helper": "^3.13.3", "clsx": "^2.0.0", @@ -2780,9 +2924,9 @@ } }, "node_modules/@docusaurus/theme-translations": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.4.0.tgz", - "integrity": "sha512-zSxCSpmQCCdQU5Q4CnX/ID8CSUUI3fvmq4hU/GNP/XoAWtXo9SAVnM3TzpU8Gb//H3WCsT8mJcTfyOk3d9ftNg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.5.2.tgz", + "integrity": "sha512-GPZLcu4aT1EmqSTmbdpVrDENGR2yObFEX8ssEFYTCiAIVc0EihNSdOIBTazUvgNqwvnoU1A8vIs1xyzc3LITTw==", "license": "MIT", "dependencies": { "fs-extra": "^11.1.1", @@ -2793,16 +2937,16 @@ } }, "node_modules/@docusaurus/tsconfig": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.4.0.tgz", - "integrity": "sha512-0qENiJ+TRaeTzcg4olrnh0BQ7eCxTgbYWBnWUeQDc84UYkt/T3pDNnm3SiQkqPb+YQ1qtYFlC0RriAElclo8Dg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.5.2.tgz", + "integrity": "sha512-rQ7toURCFnWAIn8ubcquDs0ewhPwviMzxh6WpRjBW7sJVCXb6yzwUaY3HMNa0VXCFw+qkIbFywrMTf+Pb4uHWQ==", "dev": true, "license": "MIT" }, "node_modules/@docusaurus/types": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.4.0.tgz", - "integrity": "sha512-4jcDO8kXi5Cf9TcyikB/yKmz14f2RZ2qTRerbHAsS+5InE9ZgSLBNLsewtFTcTOXSVcbU3FoGOzcNWAmU1TR0A==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.5.2.tgz", + "integrity": "sha512-N6GntLXoLVUwkZw7zCxwy9QiuEXIcTVzA9AkmNw16oc0AP3SXLrMmDMMBIfgqwuKWa6Ox6epHol9kMtJqekACw==", "license": "MIT", "dependencies": { "@mdx-js/mdx": "^3.0.0", @@ -2821,13 +2965,13 @@ } }, "node_modules/@docusaurus/utils": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.4.0.tgz", - "integrity": "sha512-fRwnu3L3nnWaXOgs88BVBmG1yGjcQqZNHG+vInhEa2Sz2oQB+ZjbEMO5Rh9ePFpZ0YDiDUhpaVjwmS+AU2F14g==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.5.2.tgz", + "integrity": "sha512-33QvcNFh+Gv+C2dP9Y9xWEzMgf3JzrpL2nW9PopidiohS1nDcyknKRx2DWaFvyVTTYIkkABVSr073VTj/NITNA==", "license": "MIT", "dependencies": { - "@docusaurus/logger": "3.4.0", - "@docusaurus/utils-common": "3.4.0", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils-common": "3.5.2", "@svgr/webpack": "^8.1.0", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -2860,9 +3004,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.4.0.tgz", - "integrity": "sha512-NVx54Wr4rCEKsjOH5QEVvxIqVvm+9kh7q8aYTU5WzUU9/Hctd6aTrcZ3G0Id4zYJ+AeaG5K5qHA4CY5Kcm2iyQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.5.2.tgz", + "integrity": "sha512-i0AZjHiRgJU6d7faQngIhuHKNrszpL/SHQPgF1zH4H+Ij6E9NBYGy6pkcGWToIv7IVPbs+pQLh1P3whn0gWXVg==", "license": "MIT", "dependencies": { "tslib": "^2.6.0" @@ -2880,14 +3024,14 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.4.0.tgz", - "integrity": "sha512-hYQ9fM+AXYVTWxJOT1EuNaRnrR2WGpRdLDQG07O8UOpsvCPWUVOeo26Rbm0JWY2sGLfzAb+tvJ62yF+8F+TV0g==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.5.2.tgz", + "integrity": "sha512-m+Foq7augzXqB6HufdS139PFxDC5d5q2QKZy8q0qYYvGdI6nnlNsGH4cIGsgBnV7smz+mopl3g4asbSDvMV0jA==", "license": "MIT", "dependencies": { - "@docusaurus/logger": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", "fs-extra": "^11.2.0", "joi": "^17.9.2", "js-yaml": "^4.1.0", @@ -4173,9 +4317,9 @@ } }, "node_modules/algoliasearch-helper": { - "version": "3.22.3", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.3.tgz", - "integrity": "sha512-2eoEz8mG4KHE+DzfrBTrCmDPxVXv7aZZWPojAJFtARpxxMO6lkos1dJ+XDCXdPvq7q3tpYWRi6xXmVQikejtpA==", + "version": "3.22.5", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz", + "integrity": "sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==", "license": "MIT", "dependencies": { "@algolia/events": "^4.0.1" @@ -4184,6 +4328,45 @@ "algoliasearch": ">= 3.1 < 6" } }, + "node_modules/algoliasearch/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "license": "MIT", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "license": "MIT", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "license": "MIT", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, "node_modules/ansi-align": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", @@ -6289,9 +6472,9 @@ } }, "node_modules/emoticon": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", - "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", "license": "MIT", "funding": { "type": "github", @@ -8239,9 +8422,9 @@ } }, "node_modules/infima": { - "version": "0.2.0-alpha.43", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", - "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "version": "0.2.0-alpha.44", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.44.tgz", + "integrity": "sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ==", "license": "MIT", "engines": { "node": ">=12" @@ -9145,9 +9328,9 @@ } }, "node_modules/mdast-util-gfm-autolink-literal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", - "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", "license": "MIT", "dependencies": { "@types/mdast": "^4.0.0", @@ -13159,9 +13342,9 @@ "license": "MIT" }, "node_modules/react-json-view-lite": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.4.0.tgz", - "integrity": "sha512-wh6F6uJyYAmQ4fK0e8dSQMEWuvTs2Wr3el3sLD9bambX1+pSWUVXIz1RFaoy3TI1mZ0FqdpKq9YgbgTTgyrmXA==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz", + "integrity": "sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw==", "license": "MIT", "engines": { "node": ">=14" @@ -13794,9 +13977,9 @@ "license": "BSD-3-Clause" }, "node_modules/rtlcss": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.2.0.tgz", - "integrity": "sha512-AV+V3oOVvCrqyH5Q/6RuT1IDH1Xy5kJTkEWTWZPN5rdQ3HCFOd8SrbC7c6N5Y8bPpCfZSR6yYbUATXslvfvu5g==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", "license": "MIT", "dependencies": { "escalade": "^3.1.1", @@ -14008,9 +14191,9 @@ } }, "node_modules/search-insights": { - "version": "2.15.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.15.0.tgz", - "integrity": "sha512-ch2sPCUDD4sbPQdknVl9ALSi9H7VyoeVbsxznYz6QV55jJ8CI3EtwpO1i84keN4+hF5IeHWIeGvc08530JkVXQ==", + "version": "2.17.2", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.2.tgz", + "integrity": "sha512-zFNpOpUO+tY2D85KrxJ+aqwnIfdEGi06UH2+xEb+Bp9Mwznmauqc9djbnBibJO5mpfUPPa8st6Sx65+vbeO45g==", "license": "MIT", "peer": true }, diff --git a/apps/opik-documentation/documentation/package.json b/apps/opik-documentation/documentation/package.json index 736b9c4ec8..53d49ef88b 100644 --- a/apps/opik-documentation/documentation/package.json +++ b/apps/opik-documentation/documentation/package.json @@ -16,8 +16,9 @@ "typecheck": "tsc" }, "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/preset-classic": "3.4.0", + "@docusaurus/core": "^3.5.2", + "@docusaurus/plugin-client-redirects": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "docusaurus-plugin-sass": "^0.2.5", @@ -28,9 +29,9 @@ "sass": "^1.77.8" }, "devDependencies": { - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/tsconfig": "3.4.0", - "@docusaurus/types": "3.4.0", + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/tsconfig": "^3.5.2", + "@docusaurus/types": "^3.5.2", "concurrently": "^8.2.0", "nodemon": "^2.0.22", "typescript": "~5.2.2" diff --git a/apps/opik-documentation/documentation/sidebars.ts b/apps/opik-documentation/documentation/sidebars.ts index 39b7d75d9b..42c6a743e0 100644 --- a/apps/opik-documentation/documentation/sidebars.ts +++ b/apps/opik-documentation/documentation/sidebars.ts @@ -18,7 +18,7 @@ const sidebars: SidebarsConfig = { type: 'category', label: 'Self-host', collapsed: false, - items: ['self-host/self_hosting_opik'] + items: ['self-host/overview', 'self-host/local_deployment', 'self-host/kubernetes'] }, { type: 'category', @@ -35,7 +35,7 @@ const sidebars: SidebarsConfig = { type: 'category', label: 'Evaluation', collapsed: false, - items: ['evaluation/manage_datasets', 'evaluation/evaluate_your_llm', { + items: ['evaluation/concepts', 'evaluation/manage_datasets', 'evaluation/evaluate_your_llm', { type: 'category', label: 'Metrics', items: ['evaluation/metrics/overview', 'evaluation/metrics/heuristic_metrics', 'evaluation/metrics/hallucination', diff --git a/apps/opik-documentation/documentation/static/img/evaluation/compare_experiment_config.png b/apps/opik-documentation/documentation/static/img/evaluation/compare_experiment_config.png new file mode 100644 index 0000000000..e57ba5221d Binary files /dev/null and b/apps/opik-documentation/documentation/static/img/evaluation/compare_experiment_config.png differ diff --git a/apps/opik-documentation/documentation/static/img/evaluation/evaluation_concepts.png b/apps/opik-documentation/documentation/static/img/evaluation/evaluation_concepts.png new file mode 100644 index 0000000000..60464d3c38 Binary files /dev/null and b/apps/opik-documentation/documentation/static/img/evaluation/evaluation_concepts.png differ diff --git a/apps/opik-documentation/documentation/static/img/evaluation/experiment_items.png b/apps/opik-documentation/documentation/static/img/evaluation/experiment_items.png new file mode 100644 index 0000000000..e4159f1b26 Binary files /dev/null and b/apps/opik-documentation/documentation/static/img/evaluation/experiment_items.png differ diff --git a/apps/opik-documentation/python-sdk-docs/source/index.rst b/apps/opik-documentation/python-sdk-docs/source/index.rst index 496c73f9eb..694a405dc5 100644 --- a/apps/opik-documentation/python-sdk-docs/source/index.rst +++ b/apps/opik-documentation/python-sdk-docs/source/index.rst @@ -23,8 +23,15 @@ To get start with the package, you can install it using pip:: pip install opik -By default, all traces, datasets and experiments will be logged to the Comet Cloud platform. If you -would like to self-host the platform, you can refer to our `self-serve documentation `_. +To finish configuring the Opik Python SDK, you will need to set the environment variables: + +- If you are using the Comet managed Opik platform: + + - `OPIK_API_KEY`: The API key to the Opik platform. + - `OPIK_WORKSPACE`: The workspace to log traces to, this is often the same as your Opik username. +- If you are using a self-hosted Opik platform: + + - `OPIK_BASE_URL`: The base URL of the Opik platform. ============= Using the SDK diff --git a/apps/opik-frontend/src/components/pages/CompareExperimentsPage/ConfigurationTab/CompareConfigCell.tsx b/apps/opik-frontend/src/components/pages/CompareExperimentsPage/ConfigurationTab/CompareConfigCell.tsx index 51edbb7e36..6a43265e02 100644 --- a/apps/opik-frontend/src/components/pages/CompareExperimentsPage/ConfigurationTab/CompareConfigCell.tsx +++ b/apps/opik-frontend/src/components/pages/CompareExperimentsPage/ConfigurationTab/CompareConfigCell.tsx @@ -27,7 +27,7 @@ const CompareConfigCell: React.FunctionComponent< }} className="px-3" > -

+
{String(data)}
diff --git a/sdks/python/src/opik/api_objects/opik_client.py b/sdks/python/src/opik/api_objects/opik_client.py index df62e5db2c..47900b7334 100644 --- a/sdks/python/src/opik/api_objects/opik_client.py +++ b/sdks/python/src/opik/api_objects/opik_client.py @@ -168,11 +168,13 @@ def span( start_time if start_time is not None else datetime_helpers.local_timestamp() ) - validated_usage = ( - validation_helpers.extract_supported_usage_data_and_print_result( - usage, LOGGER + parsed_usage = validation_helpers.validate_and_parse_usage(usage, LOGGER) + if parsed_usage.full_usage is not None: + metadata = ( + {"usage": parsed_usage.full_usage} + if metadata is None + else {"usage": parsed_usage.full_usage, **metadata} ) - ) if trace_id is None: trace_id = helpers.generate_id() @@ -204,7 +206,7 @@ def span( output=output, metadata=metadata, tags=tags, - usage=validated_usage, + usage=parsed_usage.supported_usage, ) self._streamer.put(create_span_message) @@ -236,10 +238,7 @@ def log_spans_feedback_scores(self, scores: List[FeedbackScoreDict]) -> None: valid_scores = [ score for score in scores - if validation_helpers.validate_feedback_score_and_print_result( - score, LOGGER - ) - is not None + if validation_helpers.validate_feedback_score(score, LOGGER) is not None ] if len(valid_scores) == 0: @@ -277,10 +276,7 @@ def log_traces_feedback_scores(self, scores: List[FeedbackScoreDict]) -> None: valid_scores = [ score for score in scores - if validation_helpers.validate_feedback_score_and_print_result( - score, LOGGER - ) - is not None + if validation_helpers.validate_feedback_score(score, LOGGER) is not None ] if len(valid_scores) == 0: @@ -367,9 +363,10 @@ def create_experiment( if isinstance(experiment_config, Mapping): metadata = jsonable_encoder.jsonable_encoder(experiment_config) - else: + elif experiment_config is not None: LOGGER.error( - "Experiment config must be dictionary, but %s was provided. Config will not be logged." + "Experiment config must be dictionary, but %s was provided. Config will not be logged.", + experiment_config, ) metadata = None diff --git a/sdks/python/src/opik/api_objects/span.py b/sdks/python/src/opik/api_objects/span.py index 10ef32f0e9..e955016738 100644 --- a/sdks/python/src/opik/api_objects/span.py +++ b/sdks/python/src/opik/api_objects/span.py @@ -92,9 +92,13 @@ def update( Returns: None """ - usage = validation_helpers.extract_supported_usage_data_and_print_result( - usage, LOGGER - ) + parsed_usage = validation_helpers.validate_and_parse_usage(usage, LOGGER) + if parsed_usage.full_usage is not None: + metadata = ( + {"usage": parsed_usage.full_usage} + if metadata is None + else {"usage": parsed_usage.full_usage, **metadata} + ) end_span_message = messages.UpdateSpanMessage( span_id=self.id, @@ -106,7 +110,7 @@ def update( input=input, output=output, tags=tags, - usage=usage, + usage=parsed_usage.supported_usage, ) self._streamer.put(end_span_message) @@ -145,11 +149,13 @@ def span( start_time = ( start_time if start_time is not None else datetime_helpers.local_timestamp() ) - validated_usage = ( - validation_helpers.extract_supported_usage_data_and_print_result( - usage, LOGGER + parsed_usage = validation_helpers.validate_and_parse_usage(usage, LOGGER) + if parsed_usage.full_usage is not None: + metadata = ( + {"usage": parsed_usage.full_usage} + if metadata is None + else {"usage": parsed_usage.full_usage, **metadata} ) - ) create_span_message = messages.CreateSpanMessage( span_id=span_id, @@ -164,7 +170,7 @@ def span( output=output, metadata=metadata, tags=tags, - usage=validated_usage, + usage=parsed_usage.supported_usage, ) self._streamer.put(create_span_message) diff --git a/sdks/python/src/opik/api_objects/trace.py b/sdks/python/src/opik/api_objects/trace.py index c8a081c510..86e75f20cf 100644 --- a/sdks/python/src/opik/api_objects/trace.py +++ b/sdks/python/src/opik/api_objects/trace.py @@ -131,11 +131,13 @@ def span( start_time = ( start_time if start_time is not None else datetime_helpers.local_timestamp() ) - validated_usage = ( - validation_helpers.extract_supported_usage_data_and_print_result( - usage, LOGGER + parsed_usage = validation_helpers.validate_and_parse_usage(usage, LOGGER) + if parsed_usage.full_usage is not None: + metadata = ( + {"usage": parsed_usage.full_usage} + if metadata is None + else {"usage": parsed_usage.full_usage, **metadata} ) - ) create_span_message = messages.CreateSpanMessage( span_id=span_id, @@ -150,7 +152,7 @@ def span( output=output, metadata=metadata, tags=tags, - usage=validated_usage, + usage=parsed_usage.supported_usage, ) self._streamer.put(create_span_message) diff --git a/sdks/python/src/opik/api_objects/validation_helpers.py b/sdks/python/src/opik/api_objects/validation_helpers.py index 9c25523d6a..6b1ff55f59 100644 --- a/sdks/python/src/opik/api_objects/validation_helpers.py +++ b/sdks/python/src/opik/api_objects/validation_helpers.py @@ -1,33 +1,30 @@ import logging from typing import Any, Optional, cast -from ..types import UsageDict, FeedbackScoreDict +from ..types import FeedbackScoreDict from ..validation import usage as usage_validator from ..validation import feedback_score as feedback_score_validator from .. import logging_messages -def extract_supported_usage_data_and_print_result( +def validate_and_parse_usage( usage: Any, logger: logging.Logger -) -> Optional[UsageDict]: +) -> usage_validator.ParsedUsage: if usage is None: - return None - - usage_with_supported_keys = usage_validator.keep_supported_keys(usage) + return usage_validator.ParsedUsage() - usage_validator_ = usage_validator.UsageValidator(usage_with_supported_keys) + usage_validator_ = usage_validator.UsageValidator(usage) if usage_validator_.validate().failed(): logger.warning( logging_messages.INVALID_USAGE_WILL_NOT_BE_LOGGED, usage, usage_validator_.failure_reason_message(), ) - return None - return cast(UsageDict, usage_with_supported_keys) + return usage_validator_.parsed_usage -def validate_feedback_score_and_print_result( +def validate_feedback_score( feedback_score: Any, logger: logging.Logger ) -> Optional[FeedbackScoreDict]: feedback_score_validator_ = feedback_score_validator.FeedbackScoreValidator( diff --git a/sdks/python/src/opik/cli.py b/sdks/python/src/opik/cli.py index 0a1a9e62e1..b51509c4b6 100644 --- a/sdks/python/src/opik/cli.py +++ b/sdks/python/src/opik/cli.py @@ -4,8 +4,6 @@ import click -from opik_installer import opik_server - __version__: str = "0.0.0+dev" if __package__: __version__ = metadata.version(__package__) @@ -19,8 +17,5 @@ def cli() -> None: """CLI tool for Opik.""" -cli.add_command(opik_server, name="server") - - if __name__ == "__main__": cli() diff --git a/sdks/python/src/opik/message_processing/jsonable_encoder.py b/sdks/python/src/opik/message_processing/jsonable_encoder.py index 0e1a672f30..cf89c5e80c 100644 --- a/sdks/python/src/opik/message_processing/jsonable_encoder.py +++ b/sdks/python/src/opik/message_processing/jsonable_encoder.py @@ -49,6 +49,13 @@ def jsonable_encoder(obj: Any) -> Any: if isinstance(obj, np.ndarray): return jsonable_encoder(obj.tolist()) + + if hasattr(obj, "to_string"): # langchain internal data objects + try: + return jsonable_encoder(obj.to_string()) + except Exception: + pass + except Exception: LOGGER.debug("Failed to serialize object.", exc_info=True) diff --git a/sdks/python/src/opik/validation/usage.py b/sdks/python/src/opik/validation/usage.py index 43d083c39b..cc43f31d24 100644 --- a/sdks/python/src/opik/validation/usage.py +++ b/sdks/python/src/opik/validation/usage.py @@ -1,6 +1,7 @@ import pydantic +import dataclasses -from typing import Any, Dict +from typing import Any, Dict, Optional from ..types import UsageDict from . import validator, result @@ -10,6 +11,12 @@ class PydanticWrapper(pydantic.BaseModel): usage: UsageDict +@dataclasses.dataclass +class ParsedUsage: + full_usage: Optional[Dict[str, Any]] = None + supported_usage: Optional[UsageDict] = None + + EXPECTED_TYPES = "{'completion_tokens': int, 'prompt_tokens': int, 'total_tokens': int}" @@ -21,9 +28,21 @@ class UsageValidator(validator.Validator): def __init__(self, usage: Any): self.usage = usage + self.parsed_usage = ParsedUsage() + def validate(self) -> result.ValidationResult: try: - PydanticWrapper(usage=self.usage) + if isinstance(self.usage, dict): + filtered_usage = _keep_supported_keys(self.usage) + PydanticWrapper(usage=filtered_usage) + supported_usage = UsageDict(**filtered_usage) # type: ignore + self.parsed_usage = ParsedUsage( + full_usage=self.usage, supported_usage=supported_usage + ) + else: + # we already know that usage is invalid but want pydantic to trigger validation error + PydanticWrapper(usage=self.usage) + self.validation_result = result.ValidationResult(failed=False) except pydantic.ValidationError as exception: failure_reasons = [] @@ -31,9 +50,9 @@ def validate(self) -> result.ValidationResult: component_name: str = ".".join(e["loc"]) component_value: str = e["input"] msg: str = ( - f"{component_name} has incorrect type.\n" - f"Value {repr(component_value)} of type {type(component_value)} was passed.\n" - f"Expected types: {EXPECTED_TYPES}." + f"{component_name} is invalid or missing.\n" + f"Expected keys to have in a dictionary: {EXPECTED_TYPES}.\n" + f"Value {repr(component_value)} of type {type(component_value)} was passed." ) failure_reasons.append(msg) self.validation_result = result.ValidationResult( @@ -49,7 +68,7 @@ def failure_reason_message(self) -> str: return self.validation_result.failure_reasons[0] -def keep_supported_keys(usage: Dict[str, Any]) -> Dict[str, Any]: +def _keep_supported_keys(usage: Dict[str, Any]) -> Dict[str, Any]: supported_keys = UsageDict.__annotations__.keys() filtered_usage = {} diff --git a/sdks/python/tests/e2e/conftest.py b/sdks/python/tests/e2e/conftest.py index 312b9a740c..fac770dabf 100644 --- a/sdks/python/tests/e2e/conftest.py +++ b/sdks/python/tests/e2e/conftest.py @@ -15,7 +15,6 @@ def _random_chars(n: int = 6) -> str: @pytest.fixture(scope="session") def configure_e2e_tests_env(): os.environ["OPIK_PROJECT_NAME"] = "e2e-tests" - os.environ["OPIK_URL_OVERRIDE"] = "http://localhost:5173/api" @pytest.fixture() diff --git a/sdks/python/tests/library_integration/langchain/test_langchain.py b/sdks/python/tests/library_integration/langchain/test_langchain.py index 4563ec9aab..2780f35358 100644 --- a/sdks/python/tests/library_integration/langchain/test_langchain.py +++ b/sdks/python/tests/library_integration/langchain/test_langchain.py @@ -6,6 +6,7 @@ SpanModel, TraceModel, ANY_BUT_NONE, + ANY_DICT, assert_equal, ) import pytest @@ -78,9 +79,7 @@ def test_langchain__happyflow( id=ANY_BUT_NONE, name="RunnableSequence", input={"title": "Documentary about Bigfoot in Paris"}, - output={ - "output": "I'm sorry, I don't think I'm talented enough to write a synopsis" - }, + output=ANY_DICT, tags=["tag1", "tag2"], metadata={"a": "b"}, start_time=ANY_BUT_NONE, @@ -91,12 +90,7 @@ def test_langchain__happyflow( type="general", name="PromptTemplate", input={"title": "Documentary about Bigfoot in Paris"}, - output={ - "output": { - "text": "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris.", - "type": "StringPromptValue", - } - }, + output=ANY_DICT, metadata={}, start_time=ANY_BUT_NONE, end_time=ANY_BUT_NONE, @@ -111,19 +105,7 @@ def test_langchain__happyflow( "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris." ] }, - output={ - "generations": [ - [ - { - "text": "I'm sorry, I don't think I'm talented enough to write a synopsis", - "generation_info": None, - "type": "Generation", - } - ] - ], - "llm_output": None, - "run": None, - }, + output=ANY_DICT, metadata={ "invocation_params": { "responses": [ @@ -206,12 +188,7 @@ def test_langchain__openai_llm_is_used__token_usage_is_logged__happyflow( type="general", name="PromptTemplate", input={"title": "Documentary about Bigfoot in Paris"}, - output={ - "output": { - "text": "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris.", - "type": "StringPromptValue", - } - }, + output={"output": ANY_BUT_NONE}, metadata={}, start_time=ANY_BUT_NONE, end_time=ANY_BUT_NONE, @@ -328,12 +305,7 @@ def f(x): input={ "title": "Documentary about Bigfoot in Paris" }, - output={ - "output": { - "text": "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris.", - "type": "StringPromptValue", - } - }, + output={"output": ANY_BUT_NONE}, metadata={}, start_time=ANY_BUT_NONE, end_time=ANY_BUT_NONE, @@ -348,19 +320,7 @@ def f(x): "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris." ] }, - output={ - "generations": [ - [ - { - "text": "I'm sorry, I don't think I'm talented enough to write a synopsis", - "generation_info": None, - "type": "Generation", - } - ] - ], - "llm_output": None, - "run": None, - }, + output=ANY_DICT, metadata={ "invocation_params": { "responses": [ @@ -474,12 +434,7 @@ def f(): type="general", name="PromptTemplate", input={"title": "Documentary about Bigfoot in Paris"}, - output={ - "output": { - "text": "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris.", - "type": "StringPromptValue", - } - }, + output=ANY_DICT, metadata={}, start_time=ANY_BUT_NONE, end_time=ANY_BUT_NONE, @@ -494,19 +449,7 @@ def f(): "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris." ] }, - output={ - "generations": [ - [ - { - "text": "I'm sorry, I don't think I'm talented enough to write a synopsis", - "generation_info": None, - "type": "Generation", - } - ] - ], - "llm_output": None, - "run": None, - }, + output=ANY_DICT, metadata={ "invocation_params": { "responses": [ @@ -616,12 +559,7 @@ def f(): type="general", name="PromptTemplate", input={"title": "Documentary about Bigfoot in Paris"}, - output={ - "output": { - "text": "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris.", - "type": "StringPromptValue", - } - }, + output={"output": ANY_BUT_NONE}, metadata={}, start_time=ANY_BUT_NONE, end_time=ANY_BUT_NONE, @@ -636,19 +574,7 @@ def f(): "Given the title of play, right a synopsys for that. Title: Documentary about Bigfoot in Paris." ] }, - output={ - "generations": [ - [ - { - "text": "I'm sorry, I don't think I'm talented enough to write a synopsis", - "generation_info": None, - "type": "Generation", - } - ] - ], - "llm_output": None, - "run": None, - }, + output=ANY_DICT, metadata={ "invocation_params": { "responses": [ diff --git a/sdks/python/tests/library_integration/openai/test_openai.py b/sdks/python/tests/library_integration/openai/test_openai.py index 82fa7b9347..244cf3cd93 100644 --- a/sdks/python/tests/library_integration/openai/test_openai.py +++ b/sdks/python/tests/library_integration/openai/test_openai.py @@ -85,6 +85,7 @@ def test_openai_client_chat_completions_create__happyflow(fake_streamer): "type": "openai_chat", "model": "gpt-3.5-turbo", "max_tokens": 10, + "usage": ANY_BUT_NONE, }, usage=ANY_BUT_NONE, start_time=ANY_BUT_NONE, @@ -230,6 +231,7 @@ def f(): "type": "openai_chat", "model": "gpt-3.5-turbo", "max_tokens": 10, + "usage": ANY_BUT_NONE, }, usage=ANY_BUT_NONE, start_time=ANY_BUT_NONE, @@ -310,6 +312,7 @@ async def async_f(): "type": "openai_chat", "model": "gpt-3.5-turbo", "max_tokens": 10, + "usage": ANY_BUT_NONE, }, usage=ANY_BUT_NONE, start_time=ANY_BUT_NONE, @@ -394,6 +397,7 @@ def test_openai_client_chat_completions_create__stream_mode_is_on__generator_tra "max_tokens": 10, "stream": True, "stream_options": {"include_usage": True}, + "usage": ANY_BUT_NONE, }, usage=ANY_BUT_NONE, start_time=ANY_BUT_NONE, @@ -478,6 +482,7 @@ async def async_f(): "max_tokens": 10, "stream": True, "stream_options": {"include_usage": True}, + "usage": ANY_BUT_NONE, }, usage=ANY_BUT_NONE, start_time=ANY_BUT_NONE, diff --git a/sdks/python/tests/testlib/__init__.py b/sdks/python/tests/testlib/__init__.py index ab5734b7c1..d88507baf6 100644 --- a/sdks/python/tests/testlib/__init__.py +++ b/sdks/python/tests/testlib/__init__.py @@ -1,13 +1,14 @@ from .backend_emulator_message_processor import BackendEmulatorMessageProcessor from .models import SpanModel, TraceModel, FeedbackScoreModel from .assert_helpers import assert_dicts_equal, prepare_difference_report, assert_equal -from .any_but_none import ANY_BUT_NONE +from .any_compare_helpers import ANY_BUT_NONE, ANY_DICT __all__ = [ "SpanModel", "TraceModel", "FeedbackScoreModel", "ANY_BUT_NONE", + "ANY_DICT", "assert_equal", "assert_dicts_equal", "prepare_difference_report", diff --git a/sdks/python/tests/testlib/any_but_none.py b/sdks/python/tests/testlib/any_compare_helpers.py similarity index 51% rename from sdks/python/tests/testlib/any_but_none.py rename to sdks/python/tests/testlib/any_compare_helpers.py index babbe715a8..6bb0f628b0 100644 --- a/sdks/python/tests/testlib/any_but_none.py +++ b/sdks/python/tests/testlib/any_compare_helpers.py @@ -14,4 +14,21 @@ def __repr__(self): return "" +class AnyDict: + "A helper object that compares equal to all dicts." + + def __eq__(self, other): + if isinstance(other, dict): + return True + + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "" + + ANY_BUT_NONE = AnyButNone() +ANY_DICT = AnyDict() diff --git a/sdks/python/tests/testlib/assert_helpers.py b/sdks/python/tests/testlib/assert_helpers.py index 8abc1d321c..0108641d1e 100644 --- a/sdks/python/tests/testlib/assert_helpers.py +++ b/sdks/python/tests/testlib/assert_helpers.py @@ -13,7 +13,23 @@ def prepare_difference_report(expected: Any, actual: Any) -> str: diff_report = deepdiff.DeepDiff( expected, actual, exclude_types=[mock.mock._ANY] ).pretty() - return diff_report + + # Remove from report lines like that "X type changed from int to ANY_BUT_NONE" + # But keep the lines like "X type changed from NoneType to ANY_BUT_NONE" + # The rest of the lines remain. + diff_report_lines = diff_report.split("\n") + diff_report_cleaned_lines = [ + diff_report_line + for diff_report_line in diff_report_lines + if ( + "NoneType to AnyButNone" in diff_report_line + or "AnyButNone to NoneType" in diff_report_line + or "AnyButNone" not in diff_report_line + ) + ] + diff_report_clean = "\n".join(diff_report_cleaned_lines) + + return diff_report_clean except Exception: LOGGER.debug("Failed to prepare difference report", exc_info=True) return "" diff --git a/sdks/python/tests/unit/validation/test_usage_validator.py b/sdks/python/tests/unit/validation/test_usage_validator.py index b45e680e71..91e509c127 100644 --- a/sdks/python/tests/unit/validation/test_usage_validator.py +++ b/sdks/python/tests/unit/validation/test_usage_validator.py @@ -1,5 +1,6 @@ import pytest from opik.validation import usage +from opik.types import UsageDict @pytest.mark.parametrize( @@ -43,7 +44,7 @@ "prompt_tokens": 32, "unknown_key": "anything", }, - False, + True, ), ({}, False), ("not-even-a-dict", False), @@ -55,3 +56,9 @@ def test_usage_validator(usage_dict, is_valid): tested = usage.UsageValidator(usage_dict) assert tested.validate().ok() is is_valid, f"Failed with {usage_dict}" + + if tested.validate().ok(): + assert tested.parsed_usage.full_usage == usage_dict + assert set(tested.parsed_usage.supported_usage.keys()) == set( + UsageDict.__annotations__.keys() + ) diff --git a/version.txt b/version.txt index 0e24a92ffa..7ac4e5e38f 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.1.12 +0.1.13