Skip to content

Commit

Permalink
Bump OTLP to 1.5.0 (#3912)
Browse files Browse the repository at this point in the history
* Bump OTLP proto files to 1.5.0

* v1experimental -> v1development

* Adjust exporter to OTLP proto 1.5.0
  • Loading branch information
Kielek authored Jan 7, 2025
1 parent 1788df0 commit 849dd58
Show file tree
Hide file tree
Showing 15 changed files with 347 additions and 428 deletions.
16 changes: 8 additions & 8 deletions test/IntegrationTests/ContinuousProfilerTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#if NET

using IntegrationTests.Helpers;
using OpenTelemetry.Proto.Profiles.V1Experimental;
using OpenTelemetry.Proto.Profiles.V1Development;
using Xunit.Abstractions;

namespace IntegrationTests;
Expand All @@ -27,7 +27,7 @@ public void ExportAllocationSamples()
SetEnvironmentVariable("OTEL_DOTNET_AUTO_TRACES_ADDITIONAL_SOURCES", "TestApplication.ContinuousProfiler");
RunTestApplication();

collector.Expect(profileData => profileData.ResourceProfiles.Any(resourceProfiles => resourceProfiles.ScopeProfiles.Any(scopeProfile => scopeProfile.Profiles.Any(profileContainer => ContainAttributes(profileContainer, "allocation") && profileContainer.Profile.Sample[0].Value[0] != 0.0))));
collector.Expect(profileData => profileData.ResourceProfiles.Any(resourceProfiles => resourceProfiles.ScopeProfiles.Any(scopeProfile => scopeProfile.Profiles.Any(profile => ContainAttributes(profile, "allocation") && profile.Sample[0].Value[0] != 0.0))));
collector.ResourceExpector.Expect("todo.resource.detector.key", "todo.resource.detector.value");

collector.AssertExpectations();
Expand All @@ -47,16 +47,16 @@ public void ExportThreadSamples()

var expectedStackTrace = string.Join("\n", CreateExpectedStackTrace());

collector.Expect(profileData => profileData.ResourceProfiles.Any(resourceProfiles => resourceProfiles.ScopeProfiles.Any(scopeProfile => scopeProfile.Profiles.Any(profileContainer => ContainStackTraceForClassHierarchy(profileContainer.Profile, expectedStackTrace) && ContainAttributes(profileContainer, "cpu")))));
collector.Expect(profileData => profileData.ResourceProfiles.Any(resourceProfiles => resourceProfiles.ScopeProfiles.Any(scopeProfile => scopeProfile.Profiles.Any(profile => ContainStackTraceForClassHierarchy(profile, expectedStackTrace) && ContainAttributes(profile, "cpu")))));
collector.ResourceExpector.Expect("todo.resource.detector.key", "todo.resource.detector.value");

collector.AssertExpectations();
collector.ResourceExpector.AssertExpectations();
}

private static bool ContainAttributes(ProfileContainer profileContainer, string profilingDataType)
private static bool ContainAttributes(Profile profileContainer, string profilingDataType)
{
return profileContainer.Attributes.Any(x => x.Key == "todo.profiling.data.type" && x.Value.StringValue == profilingDataType);
return profileContainer.AttributeTable.Any(x => x.Key == "todo.profiling.data.type" && x.Value.StringValue == profilingDataType);
}

private static List<string> CreateExpectedStackTrace()
Expand Down Expand Up @@ -96,11 +96,11 @@ private static List<string> CreateExpectedStackTrace()

private bool ContainStackTraceForClassHierarchy(Profile profile, string expectedStackTrace)
{
var frames = profile.Location
var frames = profile.LocationTable
.SelectMany(location => location.Line)
.Select(line => line.FunctionIndex)
.Select(functionId => profile.Function[(int)functionId - 1])
.Select(function => profile.StringTable[(int)function.Name]);
.Select(functionId => profile.FunctionTable[functionId - 1])
.Select(function => profile.StringTable[function.NameStrindex]);

var stackTrace = string.Join("\n", frames);

Expand Down
4 changes: 2 additions & 2 deletions test/IntegrationTests/Helpers/MockProfilesCollector.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
using System.Collections.Concurrent;
using System.Text;
using Microsoft.AspNetCore.Http;
using OpenTelemetry.Proto.Collector.Profiles.V1Experimental;
using OpenTelemetry.Proto.Profiles.V1Experimental;
using OpenTelemetry.Proto.Collector.Profiles.V1Development;
using OpenTelemetry.Proto.Profiles.V1Development;
using Xunit.Abstractions;

namespace IntegrationTests.Helpers;
Expand Down
2 changes: 0 additions & 2 deletions test/IntegrationTests/SmokeTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@
using System.Reflection;
using FluentAssertions;
using IntegrationTests.Helpers;
using OpenTelemetry.Logs;
using Xunit.Abstractions;
using LogRecord = OpenTelemetry.Proto.Logs.V1.LogRecord;

#if NETFRAMEWORK
using System.Net;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@

syntax = "proto3";

package opentelemetry.proto.collector.profiles.v1experimental;
package opentelemetry.proto.collector.profiles.v1development;

import "opentelemetry/proto/profiles/v1experimental/profiles.proto";
import "opentelemetry/proto/profiles/v1development/profiles.proto";

option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental";
option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Development";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental";
option java_package = "io.opentelemetry.proto.collector.profiles.v1development";
option java_outer_classname = "ProfilesServiceProto";
option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental";
option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1development";

// Service that can be used to push profiles between one Application instrumented with
// OpenTelemetry and a collector, or between a collector and a central collector.
Expand All @@ -38,7 +38,7 @@ message ExportProfilesServiceRequest {
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
// data from multiple origins typically batch the data before forwarding further and
// in that case this array will contain multiple elements.
repeated opentelemetry.proto.profiles.v1experimental.ResourceProfiles resource_profiles = 1;
repeated opentelemetry.proto.profiles.v1development.ResourceProfiles resource_profiles = 1;
}

message ExportProfilesServiceResponse {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@ type: google.api.Service
config_version: 3
http:
rules:
- selector: opentelemetry.proto.collector.profiles.v1experimental.ProfilesService.Export
post: /v1experimental/profiles
- selector: opentelemetry.proto.collector.profiles.v1development.ProfilesService.Export
post: /v1development/profiles
body: "*"
20 changes: 18 additions & 2 deletions test/IntegrationTests/opentelemetry/proto/logs/v1/logs.proto
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ message ResourceLogs {
repeated ScopeLogs scope_logs = 2;

// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. To learn more about Schema URL see
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_logs" field which have their own schema_url field.
Expand All @@ -74,7 +75,8 @@ message ScopeLogs {
repeated LogRecord log_records = 2;

// The Schema URL, if known. This is the identifier of the Schema that the log data
// is recorded in. To learn more about Schema URL see
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all logs in the "logs" field.
string schema_url = 3;
Expand Down Expand Up @@ -208,4 +210,18 @@ message LogRecord {
// - the field is not present,
// - the field contains an invalid value.
bytes span_id = 10;

// A unique identifier of event category/type.
// All events with the same event_name are expected to conform to the same
// schema for both their attributes and their body.
//
// Recommended to be fully qualified and short (no longer than 256 characters).
//
// Presence of event_name on the log record identifies this record
// as an event.
//
// [Optional].
//
// Status: [Development]
string event_name = 12;
}
39 changes: 31 additions & 8 deletions test/IntegrationTests/opentelemetry/proto/metrics/v1/metrics.proto
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,24 @@ option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1";
// storage, OR can be embedded by other protocols that transfer OTLP metrics
// data but do not implement the OTLP protocol.
//
// MetricsData
// └─── ResourceMetrics
// ├── Resource
// ├── SchemaURL
// └── ScopeMetrics
// ├── Scope
// ├── SchemaURL
// └── Metric
// ├── Name
// ├── Description
// ├── Unit
// └── data
// ├── Gauge
// ├── Sum
// ├── Histogram
// ├── ExponentialHistogram
// └── Summary
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
Expand Down Expand Up @@ -56,7 +74,8 @@ message ResourceMetrics {
repeated ScopeMetrics scope_metrics = 2;

// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. To learn more about Schema URL see
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_metrics" field which have their own schema_url field.
Expand All @@ -74,7 +93,8 @@ message ScopeMetrics {
repeated Metric metrics = 2;

// The Schema URL, if known. This is the identifier of the Schema that the metric data
// is recorded in. To learn more about Schema URL see
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all metrics in the "metrics" field.
string schema_url = 3;
Expand All @@ -85,7 +105,6 @@ message ScopeMetrics {
//
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
//
//
// The data model and relation between entities is shown in the
// diagram below. Here, "DataPoint" is the term used to refer to any
// one of the specific data point value types, and "points" is the term used
Expand All @@ -97,7 +116,7 @@ message ScopeMetrics {
// - DataPoint contains timestamps, attributes, and one of the possible value type
// fields.
//
// Metric
// Metric
// +------------+
// |name |
// |description |
Expand Down Expand Up @@ -251,6 +270,9 @@ message ExponentialHistogram {
// data type. These data points cannot always be merged in a meaningful way.
// While they can be useful in some applications, histogram data points are
// recommended for new applications.
// Summary metrics do not have an aggregation temporality field. This is
// because the count and sum fields of a SummaryDataPoint are assumed to be
// cumulative values.
message Summary {
repeated SummaryDataPoint data_points = 1;
}
Expand Down Expand Up @@ -430,7 +452,7 @@ message HistogramDataPoint {
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
optional double sum = 5;

// bucket_counts is an optional field contains the count values of histogram
Expand Down Expand Up @@ -509,7 +531,7 @@ message ExponentialHistogramDataPoint {
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
optional double sum = 5;

// scale describes the resolution of the histogram. Boundaries are
Expand Down Expand Up @@ -589,7 +611,8 @@ message ExponentialHistogramDataPoint {
}

// SummaryDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Summary metric.
// time-varying values of a Summary metric. The count and sum fields represent
// cumulative values.
message SummaryDataPoint {
reserved 1;

Expand Down Expand Up @@ -622,7 +645,7 @@ message SummaryDataPoint {
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary
double sum = 5;

// Represents the value at a given quantile of a distribution.
Expand Down
Loading

0 comments on commit 849dd58

Please sign in to comment.