You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
It would be useful to have a FileExporter in addition to the current CollectorExporter and ConsoleExporter. This would allow users of the library to sink the telemetry locally in cases where there wasn't connectivity.
I was able to hack in a very prototype version that worked. In order to get to proof-of-concept, I copy/pasted the internal code necessary to get hands on the protobufs so I could use their built-in toProto3Json method.
classFileExporterimplements sdk.SpanExporter {
constFileExporter._internal({requiredIOSink sink}) : _sink = sink;
finalIOSink _sink;
staticFuture<FileExporter?> init({
requiredString fileName,
}) async {
try {
final dir =awaitgetApplicationSupportDirectory();
final fullPath = p.join(dir.path, fileName);
final file =LocalFileSystem().file(fullPath);
file.createSync(recursive:true); // assure that we canfinal sink =File(file.absolute.path).openWrite();
final instance =FileExporter._internal(sink: sink,);
return instance;
} onObjectcatch (obj) {
// never crash the caller!print(obj);
returnnull;
}
}
@overridevoidexport(List<sdk.ReadOnlySpan> spans) {
if (spans.isEmpty) {
return;
}
final pbs =_spansToProtobuf(spans);
_sink.writeAll(pbs.map((e) =>"${e.toProto3Json()}\n"));
}
@overridevoidforceFlush() {
_sink.flush();
}
@overridevoidshutdown() {
forceFlush();
_sink.close();
throwUnimplementedError();
}
Iterable<pb_trace.ResourceSpans> _spansToProtobuf(
List<sdk.ReadOnlySpan> spans) {
// use a map of maps to group spans by resource and instrumentation libraryfinal rsm =<sdk.Resource, Map<sdk.InstrumentationScope, List<pb_trace.Span>>>{};
for (final span in spans) {
final il = rsm[span.resource] ??<sdk.InstrumentationScope, List<pb_trace.Span>>{};
il[span.instrumentationScope] =
il[span.instrumentationScope] ??<pb_trace.Span>[]
..add(_spanToProtobuf(span));
rsm[span.resource] = il;
}
final rss =<pb_trace.ResourceSpans>[];
for (final il in rsm.entries) {
// for each distinct resource, construct the protobuf equivalentfinal attrs =<pb_common.KeyValue>[];
for (final attr in il.key.attributes.keys) {
attrs.add(pb_common.KeyValue(
key: attr,
value:_attributeValueToProtobuf(il.key.attributes.get(attr)!)));
}
final rs = pb_trace.ResourceSpans(
resource: pb_resource.Resource(attributes: attrs), scopeSpans: []);
// for each distinct instrumentation library, construct the protobuf equivalentfor (final ils in il.value.entries) {
rs.scopeSpans.add(pb_trace.ScopeSpans(
spans: ils.value,
scope: pb_common.InstrumentationScope(
name: ils.key.name, version: ils.key.version)));
}
rss.add(rs);
}
return rss;
}
pb_trace.Span_spanToProtobuf(sdk.ReadOnlySpan span) {
pb_trace.Status_StatusCode statusCode;
switch (span.status.code) {
case api.StatusCode.unset:
statusCode = pb_trace.Status_StatusCode.STATUS_CODE_UNSET;
break;
case api.StatusCode.error:
statusCode = pb_trace.Status_StatusCode.STATUS_CODE_ERROR;
break;
case api.StatusCode.ok:
statusCode = pb_trace.Status_StatusCode.STATUS_CODE_OK;
break;
}
pb_trace.Span_SpanKind spanKind;
switch (span.kind) {
case api.SpanKind.client:
spanKind = pb_trace.Span_SpanKind.SPAN_KIND_CLIENT;
break;
case api.SpanKind.consumer:
spanKind = pb_trace.Span_SpanKind.SPAN_KIND_CONSUMER;
break;
case api.SpanKind.internal:
spanKind = pb_trace.Span_SpanKind.SPAN_KIND_INTERNAL;
break;
case api.SpanKind.producer:
spanKind = pb_trace.Span_SpanKind.SPAN_KIND_PRODUCER;
break;
case api.SpanKind.server:
spanKind = pb_trace.Span_SpanKind.SPAN_KIND_SERVER;
break;
default:
spanKind = pb_trace.Span_SpanKind.SPAN_KIND_UNSPECIFIED;
}
return pb_trace.Span(
traceId: span.spanContext.traceId.get(),
spanId: span.spanContext.spanId.get(),
parentSpanId: span.parentSpanId.get(),
name: span.name,
startTimeUnixNano: span.startTime,
endTimeUnixNano: span.endTime,
attributes: span.attributes.keys.map((key) =>
pb_common.KeyValue(
key: key,
value:_attributeValueToProtobuf(span.attributes.get(key)!))),
status:
pb_trace.Status(code: statusCode, message: span.status.description),
kind: spanKind,
links:_spanLinksToProtobuf(span.links),
events:_spanEventsToPb(span.events),
);
}
Iterable<pb_trace.Span_Link> _spanLinksToProtobuf(List<api.SpanLink> links) {
final pbLinks =<pb_trace.Span_Link>[];
for (final link in links) {
final attrs =<pb_common.KeyValue>[];
for (final attr in link.attributes) {
attrs.add(pb_common.KeyValue(
key: attr.key, value:_attributeValueToProtobuf(attr.value)));
}
pbLinks.add(pb_trace.Span_Link(
traceId: link.context.traceId.get(),
spanId: link.context.spanId.get(),
traceState: link.context.traceState.toString(),
attributes: attrs));
}
return pbLinks;
}
Iterable<pb_trace.Span_Event> _spanEventsToPb(List<SpanEvent> events) {
finalList<pb_trace.Span_Event> pbEvents = [];
for (final event in events) {
final attrs =<pb_common.KeyValue>[];
for (final attr in event.attributes) {
attrs.add(pb_common.KeyValue(
key: attr.key, value:_attributeValueToProtobuf(attr.value)));
}
final e = pb_trace.Span_Event(
timeUnixNano: event.eventTime, name: event.name, attributes: attrs);
pbEvents.add(e);
}
return pbEvents;
}
pb_common.AnyValue_attributeValueToProtobuf(Object value) {
switch (value.runtimeType) {
caseString:return pb_common.AnyValue(stringValue: value asString);
casebool:return pb_common.AnyValue(boolValue: value asbool);
casedouble:return pb_common.AnyValue(doubleValue: value asdouble);
caseint:return pb_common.AnyValue(intValue:Int64(value asint));
caseList:final list = value asList;
if (list.isNotEmpty) {
switch (list[0].runtimeType) {
caseString:final values = [] asList<pb_common.AnyValue>;
for (final str in list) {
values.add(pb_common.AnyValue(stringValue: str));
}
return pb_common.AnyValue(
arrayValue: pb_common.ArrayValue(values: values));
casebool:final values = [] asList<pb_common.AnyValue>;
for (final b in list) {
values.add(pb_common.AnyValue(boolValue: b));
}
return pb_common.AnyValue(
arrayValue: pb_common.ArrayValue(values: values));
casedouble:final values = [] asList<pb_common.AnyValue>;
for (final d in list) {
values.add(pb_common.AnyValue(doubleValue: d));
}
return pb_common.AnyValue(
arrayValue: pb_common.ArrayValue(values: values));
caseint:final values = [] asList<pb_common.AnyValue>;
for (final i in list) {
values.add(pb_common.AnyValue(intValue: i));
}
return pb_common.AnyValue(
arrayValue: pb_common.ArrayValue(values: values));
}
}
}
return pb_common.AnyValue();
}
}
The text was updated successfully, but these errors were encountered:
StephenWithPH
changed the title
add a FileExporter to match (experimental) spec
feature request: add a FileExporter to match (experimental) spec
Feb 2, 2024
It would be useful to have a
FileExporter
in addition to the currentCollectorExporter
andConsoleExporter
. This would allow users of the library to sink the telemetry locally in cases where there wasn't connectivity.The spec can be found at: https://opentelemetry.io/docs/specs/otel/protocol/file-exporter
I was able to hack in a very prototype version that worked. In order to get to proof-of-concept, I copy/pasted the internal code necessary to get hands on the protobufs so I could use their built-in
toProto3Json
method.The text was updated successfully, but these errors were encountered: