add proto files

This commit is contained in:
Simon Larsen 2023-10-10 11:16:22 +01:00
parent ff0314ae9b
commit f1b4214379
No known key found for this signature in database
GPG Key ID: AB45983AA9C81CDE
11 changed files with 1507 additions and 4 deletions

View File

@ -0,0 +1,43 @@
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from 'CommonServer/Utils/Express';
import Response from 'CommonServer/Utils/Response';
import logger from 'CommonServer/Utils/Logger';
import protobuf from 'protobufjs';
// Load proto file for OTel
// Create a root namespace
const root = protobuf.loadSync('CommonServer/ProtoFiles/otel.proto');
const router: ExpressRouter = Express.getRouter();
router.post(
'/otel/*',
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction
): Promise<void> => {
try {
logger.info('OTelIngest URL: ', req.url);
if(req.url === '/otel/v1/traces') {
}
// middleware marks the probe as alive.
// so we don't need to do anything here.
return Response.sendEmptyResponse(req, res);
} catch (err) {
return next(err);
}
}
);
export default router;

View File

@ -8,6 +8,7 @@ import RegisterAPI from './API/Register';
import MonitorAPI from './API/Monitor';
import Ingestor from './API/Probe';
import IncomingRequestAPI from './API/IncomingRequest';
import OTelIngestAPI from './API/OTelIngest';
import { ClickhouseAppInstance } from 'CommonServer/Infrastructure/ClickhouseDatabase';
import Redis from 'CommonServer/Infrastructure/Redis';
@ -21,6 +22,7 @@ app.use([`/${APP_NAME}`, '/'], RegisterAPI);
app.use([`/${APP_NAME}`, '/'], MonitorAPI);
app.use([`/${APP_NAME}`, '/'], Ingestor);
app.use([`/${APP_NAME}`, '/'], IncomingRequestAPI);
app.use([`/${APP_NAME}`, '/'], OTelIngestAPI);
const init: () => Promise<void> = async (): Promise<void> => {
try {

View File

@ -0,0 +1,81 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package opentelemetry.proto.common.v1;
option csharp_namespace = "OpenTelemetry.Proto.Common.V1";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.common.v1";
option java_outer_classname = "CommonProto";
option go_package = "go.opentelemetry.io/proto/otlp/common/v1";
// AnyValue is used to represent any type of attribute value. AnyValue may contain a
// primitive value such as a string or integer or it may contain an arbitrary nested
// object containing arrays, key-value lists and primitives.
message AnyValue {
// The value is one of the listed fields. It is valid for all values to be unspecified
// in which case this AnyValue is considered to be "empty".
oneof value {
string string_value = 1;
bool bool_value = 2;
int64 int_value = 3;
double double_value = 4;
ArrayValue array_value = 5;
KeyValueList kvlist_value = 6;
bytes bytes_value = 7;
}
}
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
// since oneof in AnyValue does not allow repeated fields.
message ArrayValue {
// Array of values. The array may be empty (contain 0 elements).
repeated AnyValue values = 1;
}
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
// are semantically equivalent.
message KeyValueList {
// A collection of key/value pairs of key-value pairs. The list may be empty (may
// contain 0 elements).
// The keys MUST be unique (it is not allowed to have more than one
// value with the same key).
repeated KeyValue values = 1;
}
// KeyValue is a key-value pair that is used to store Span attributes, Link
// attributes, etc.
message KeyValue {
string key = 1;
AnyValue value = 2;
}
// InstrumentationScope is a message representing the instrumentation scope information
// such as the fully qualified name and version.
message InstrumentationScope {
// An empty instrumentation scope name means the name is unknown.
string name = 1;
string version = 2;
// Additional attributes that describe the scope. [Optional].
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated KeyValue attributes = 3;
uint32 dropped_attributes_count = 4;
}

View File

@ -0,0 +1,211 @@
// Copyright 2020, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package opentelemetry.proto.logs.v1;
import "opentelemetry/proto/common/v1/common.proto";
import "opentelemetry/proto/resource/v1/resource.proto";
option csharp_namespace = "OpenTelemetry.Proto.Logs.V1";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.logs.v1";
option java_outer_classname = "LogsProto";
option go_package = "go.opentelemetry.io/proto/otlp/logs/v1";
// LogsData represents the logs data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP logs data but do not
// implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
message LogsData {
// An array of ResourceLogs.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
repeated ResourceLogs resource_logs = 1;
}
// A collection of ScopeLogs from a Resource.
message ResourceLogs {
reserved 1000;
// The resource for the logs in this message.
// If this field is not set then resource info is unknown.
opentelemetry.proto.resource.v1.Resource resource = 1;
// A list of ScopeLogs that originate from a resource.
repeated ScopeLogs scope_logs = 2;
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_logs" field which have their own schema_url field.
string schema_url = 3;
}
// A collection of Logs produced by a Scope.
message ScopeLogs {
// The instrumentation scope information for the logs in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
opentelemetry.proto.common.v1.InstrumentationScope scope = 1;
// A list of log records.
repeated LogRecord log_records = 2;
// The Schema URL, if known. This is the identifier of the Schema that the log data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all logs in the "logs" field.
string schema_url = 3;
}
// Possible values for LogRecord.SeverityNumber.
enum SeverityNumber {
// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
SEVERITY_NUMBER_UNSPECIFIED = 0;
SEVERITY_NUMBER_TRACE = 1;
SEVERITY_NUMBER_TRACE2 = 2;
SEVERITY_NUMBER_TRACE3 = 3;
SEVERITY_NUMBER_TRACE4 = 4;
SEVERITY_NUMBER_DEBUG = 5;
SEVERITY_NUMBER_DEBUG2 = 6;
SEVERITY_NUMBER_DEBUG3 = 7;
SEVERITY_NUMBER_DEBUG4 = 8;
SEVERITY_NUMBER_INFO = 9;
SEVERITY_NUMBER_INFO2 = 10;
SEVERITY_NUMBER_INFO3 = 11;
SEVERITY_NUMBER_INFO4 = 12;
SEVERITY_NUMBER_WARN = 13;
SEVERITY_NUMBER_WARN2 = 14;
SEVERITY_NUMBER_WARN3 = 15;
SEVERITY_NUMBER_WARN4 = 16;
SEVERITY_NUMBER_ERROR = 17;
SEVERITY_NUMBER_ERROR2 = 18;
SEVERITY_NUMBER_ERROR3 = 19;
SEVERITY_NUMBER_ERROR4 = 20;
SEVERITY_NUMBER_FATAL = 21;
SEVERITY_NUMBER_FATAL2 = 22;
SEVERITY_NUMBER_FATAL3 = 23;
SEVERITY_NUMBER_FATAL4 = 24;
}
// LogRecordFlags represents constants used to interpret the
// LogRecord.flags field, which is protobuf 'fixed32' type and is to
// be used as bit-fields. Each non-zero value defined in this enum is
// a bit-mask. To extract the bit-field, for example, use an
// expression like:
//
// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
//
enum LogRecordFlags {
// The zero value for the enum. Should not be used for comparisons.
// Instead use bitwise "and" with the appropriate mask as shown above.
LOG_RECORD_FLAGS_DO_NOT_USE = 0;
// Bits 0-7 are used for trace flags.
LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF;
// Bits 8-31 are reserved for future use.
}
// A log record according to OpenTelemetry Log Data Model:
// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md
message LogRecord {
reserved 4;
// time_unix_nano is the time when the event occurred.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
// Value of 0 indicates unknown or missing timestamp.
fixed64 time_unix_nano = 1;
// Time when the event was observed by the collection system.
// For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK)
// this timestamp is typically set at the generation time and is equal to Timestamp.
// For events originating externally and collected by OpenTelemetry (e.g. using
// Collector) this is the time when OpenTelemetry's code observed the event measured
// by the clock of the OpenTelemetry code. This field MUST be set once the event is
// observed by OpenTelemetry.
//
// For converting OpenTelemetry log data to formats that support only one timestamp or
// when receiving OpenTelemetry log data by recipients that support only one timestamp
// internally the following logic is recommended:
// - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
// Value of 0 indicates unknown or missing timestamp.
fixed64 observed_time_unix_nano = 11;
// Numerical value of the severity, normalized to values described in Log Data Model.
// [Optional].
SeverityNumber severity_number = 2;
// The severity text (also known as log level). The original string representation as
// it is known at the source. [Optional].
string severity_text = 3;
// A value containing the body of the log record. Can be for example a human-readable
// string message (including multi-line) describing the event in a free form or it can
// be a structured data composed of arrays and maps of other values. [Optional].
opentelemetry.proto.common.v1.AnyValue body = 5;
// Additional attributes that describe the specific event occurrence. [Optional].
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 6;
uint32 dropped_attributes_count = 7;
// Flags, a bit field. 8 least significant bits are the trace flags as
// defined in W3C Trace Context specification. 24 most significant bits are reserved
// and must be set to 0. Readers must not assume that 24 most significant bits
// will be zero and must correctly mask the bits when reading 8-bit trace flag (use
// flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional].
fixed32 flags = 8;
// A unique identifier for a trace. All logs from the same trace share
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is optional.
//
// The receivers SHOULD assume that the log record is not associated with a
// trace if any of the following is true:
// - the field is not present,
// - the field contains an invalid value.
bytes trace_id = 9;
// A unique identifier for a span within a trace, assigned when the span
// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
// other than 8 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is optional. If the sender specifies a valid span_id then it SHOULD also
// specify a valid trace_id.
//
// The receivers SHOULD assume that the log record is not associated with a
// span if any of the following is true:
// - the field is not present,
// - the field contains an invalid value.
bytes span_id = 10;
}

View File

@ -0,0 +1,682 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package opentelemetry.proto.metrics.v1;
import "opentelemetry/proto/common/v1/common.proto";
import "opentelemetry/proto/resource/v1/resource.proto";
option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.metrics.v1";
option java_outer_classname = "MetricsProto";
option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1";
// MetricsData represents the metrics data that can be stored in a persistent
// storage, OR can be embedded by other protocols that transfer OTLP metrics
// data but do not implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
message MetricsData {
// An array of ResourceMetrics.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
repeated ResourceMetrics resource_metrics = 1;
}
// A collection of ScopeMetrics from a Resource.
message ResourceMetrics {
reserved 1000;
// The resource for the metrics in this message.
// If this field is not set then no resource info is known.
opentelemetry.proto.resource.v1.Resource resource = 1;
// A list of metrics that originate from a resource.
repeated ScopeMetrics scope_metrics = 2;
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_metrics" field which have their own schema_url field.
string schema_url = 3;
}
// A collection of Metrics produced by an Scope.
message ScopeMetrics {
// The instrumentation scope information for the metrics in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
opentelemetry.proto.common.v1.InstrumentationScope scope = 1;
// A list of metrics that originate from an instrumentation library.
repeated Metric metrics = 2;
// The Schema URL, if known. This is the identifier of the Schema that the metric data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all metrics in the "metrics" field.
string schema_url = 3;
}
// Defines a Metric which has one or more timeseries. The following is a
// brief summary of the Metric data model. For more details, see:
//
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
//
//
// The data model and relation between entities is shown in the
// diagram below. Here, "DataPoint" is the term used to refer to any
// one of the specific data point value types, and "points" is the term used
// to refer to any one of the lists of points contained in the Metric.
//
// - Metric is composed of a metadata and data.
// - Metadata part contains a name, description, unit.
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
// - DataPoint contains timestamps, attributes, and one of the possible value type
// fields.
//
// Metric
// +------------+
// |name |
// |description |
// |unit | +------------------------------------+
// |data |---> |Gauge, Sum, Histogram, Summary, ... |
// +------------+ +------------------------------------+
//
// Data [One of Gauge, Sum, Histogram, Summary, ...]
// +-----------+
// |... | // Metadata about the Data.
// |points |--+
// +-----------+ |
// | +---------------------------+
// | |DataPoint 1 |
// v |+------+------+ +------+ |
// +-----+ ||label |label |...|label | |
// | 1 |-->||value1|value2|...|valueN| |
// +-----+ |+------+------+ +------+ |
// | . | |+-----+ |
// | . | ||value| |
// | . | |+-----+ |
// | . | +---------------------------+
// | . | .
// | . | .
// | . | .
// | . | +---------------------------+
// | . | |DataPoint M |
// +-----+ |+------+------+ +------+ |
// | M |-->||label |label |...|label | |
// +-----+ ||value1|value2|...|valueN| |
// |+------+------+ +------+ |
// |+-----+ |
// ||value| |
// |+-----+ |
// +---------------------------+
//
// Each distinct type of DataPoint represents the output of a specific
// aggregation function, the result of applying the DataPoint's
// associated function of to one or more measurements.
//
// All DataPoint types have three common fields:
// - Attributes includes key-value pairs associated with the data point
// - TimeUnixNano is required, set to the end time of the aggregation
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
// having an AggregationTemporality field, as discussed below.
//
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// # TimeUnixNano
//
// This field is required, having consistent interpretation across
// DataPoint types. TimeUnixNano is the moment corresponding to when
// the data point's aggregate value was captured.
//
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
// by consumers.
//
// # StartTimeUnixNano
//
// StartTimeUnixNano in general allows detecting when a sequence of
// observations is unbroken. This field indicates to consumers the
// start time for points with cumulative and delta
// AggregationTemporality, and it should be included whenever possible
// to support correct rate calculation. Although it may be omitted
// when the start time is truly unknown, setting StartTimeUnixNano is
// strongly encouraged.
message Metric {
reserved 4, 6, 8;
// name of the metric, including its DNS name prefix. It must be unique.
string name = 1;
// description of the metric, which can be used in documentation.
string description = 2;
// unit in which the metric value is reported. Follows the format
// described by http://unitsofmeasure.org/ucum.html.
string unit = 3;
// Data determines the aggregation type (if any) of the metric, what is the
// reported value type for the data points, as well as the relatationship to
// the time interval over which they are reported.
oneof data {
Gauge gauge = 5;
Sum sum = 7;
Histogram histogram = 9;
ExponentialHistogram exponential_histogram = 10;
Summary summary = 11;
}
}
// Gauge represents the type of a scalar metric that always exports the
// "current value" for every data point. It should be used for an "unknown"
// aggregation.
//
// A Gauge does not support different aggregation temporalities. Given the
// aggregation is unknown, points cannot be combined using the same
// aggregation, regardless of aggregation temporalities. Therefore,
// AggregationTemporality is not included. Consequently, this also means
// "StartTimeUnixNano" is ignored for all data points.
message Gauge {
repeated NumberDataPoint data_points = 1;
}
// Sum represents the type of a scalar metric that is calculated as a sum of all
// reported measurements over a time interval.
message Sum {
repeated NumberDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality aggregation_temporality = 2;
// If "true" means that the sum is monotonic.
bool is_monotonic = 3;
}
// Histogram represents the type of a metric that is calculated by aggregating
// as a Histogram of all reported measurements over a time interval.
message Histogram {
repeated HistogramDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality aggregation_temporality = 2;
}
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
// as a ExponentialHistogram of all reported double measurements over a time interval.
message ExponentialHistogram {
repeated ExponentialHistogramDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality aggregation_temporality = 2;
}
// Summary metric data are used to convey quantile summaries,
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
// data type. These data points cannot always be merged in a meaningful way.
// While they can be useful in some applications, histogram data points are
// recommended for new applications.
message Summary {
repeated SummaryDataPoint data_points = 1;
}
// AggregationTemporality defines how a metric aggregator reports aggregated
// values. It describes how those values relate to the time interval over
// which they are aggregated.
enum AggregationTemporality {
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
AGGREGATION_TEMPORALITY_UNSPECIFIED = 0;
// DELTA is an AggregationTemporality for a metric aggregator which reports
// changes since last report time. Successive metrics contain aggregation of
// values from continuous and non-overlapping intervals.
//
// The values for a DELTA metric are based only on the time interval
// associated with one measurement cycle. There is no dependency on
// previous measurements like is the case for CUMULATIVE metrics.
//
// For example, consider a system measuring the number of requests that
// it receives and reports the sum of these requests every second as a
// DELTA metric:
//
// 1. The system starts receiving at time=t_0.
// 2. A request is received, the system measures 1 request.
// 3. A request is received, the system measures 1 request.
// 4. A request is received, the system measures 1 request.
// 5. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0 to
// t_0+1 with a value of 3.
// 6. A request is received, the system measures 1 request.
// 7. A request is received, the system measures 1 request.
// 8. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0+1 to
// t_0+2 with a value of 2.
AGGREGATION_TEMPORALITY_DELTA = 1;
// CUMULATIVE is an AggregationTemporality for a metric aggregator which
// reports changes since a fixed start time. This means that current values
// of a CUMULATIVE metric depend on all previous measurements since the
// start time. Because of this, the sender is required to retain this state
// in some form. If this state is lost or invalidated, the CUMULATIVE metric
// values MUST be reset and a new fixed start time following the last
// reported measurement time sent MUST be used.
//
// For example, consider a system measuring the number of requests that
// it receives and reports the sum of these requests every second as a
// CUMULATIVE metric:
//
// 1. The system starts receiving at time=t_0.
// 2. A request is received, the system measures 1 request.
// 3. A request is received, the system measures 1 request.
// 4. A request is received, the system measures 1 request.
// 5. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0 to
// t_0+1 with a value of 3.
// 6. A request is received, the system measures 1 request.
// 7. A request is received, the system measures 1 request.
// 8. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0 to
// t_0+2 with a value of 5.
// 9. The system experiences a fault and loses state.
// 10. The system recovers and resumes receiving at time=t_1.
// 11. A request is received, the system measures 1 request.
// 12. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_1 to
// t_0+1 with a value of 1.
//
// Note: Even though, when reporting changes since last report time, using
// CUMULATIVE is valid, it is not recommended. This may cause problems for
// systems that do not use start_time to determine when the aggregation
// value was reset (e.g. Prometheus).
AGGREGATION_TEMPORALITY_CUMULATIVE = 2;
}
// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
// bit-field representing 32 distinct boolean flags. Each flag defined in this
// enum is a bit-mask. To test the presence of a single flag in the flags of
// a data point, for example, use an expression like:
//
// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
//
enum DataPointFlags {
// The zero value for the enum. Should not be used for comparisons.
// Instead use bitwise "and" with the appropriate mask as shown above.
DATA_POINT_FLAGS_DO_NOT_USE = 0;
// This DataPoint is valid but has no recorded value. This value
// SHOULD be used to reflect explicitly missing data in a series, as
// for an equivalent to the Prometheus "staleness marker".
DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1;
// Bits 2-31 are reserved for future use.
}
// NumberDataPoint is a single data point in a timeseries that describes the
// time-varying scalar value of a metric.
message NumberDataPoint {
reserved 1;
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 start_time_unix_nano = 2;
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
// The value itself. A point is considered invalid when one of the recognized
// value fields is not present inside this oneof.
oneof value {
double as_double = 4;
sfixed64 as_int = 6;
}
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
repeated Exemplar exemplars = 5;
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
uint32 flags = 8;
}
// HistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Histogram. A Histogram contains summary statistics
// for a population of values, it may optionally contain the distribution of
// those values across a set of buckets.
//
// If the histogram contains the distribution of values, then both
// "explicit_bounds" and "bucket counts" fields must be defined.
// If the histogram does not contain the distribution of values, then both
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
// "sum" are known.
message HistogramDataPoint {
reserved 1;
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 9;
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 start_time_unix_nano = 2;
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
// count is the number of values in the population. Must be non-negative. This
// value must be equal to the sum of the "count" fields in buckets if a
// histogram is provided.
fixed64 count = 4;
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
optional double sum = 5;
// bucket_counts is an optional field contains the count values of histogram
// for each bucket.
//
// The sum of the bucket_counts must equal the value in the count field.
//
// The number of elements in bucket_counts array must be by one greater than
// the number of elements in explicit_bounds array.
repeated fixed64 bucket_counts = 6;
// explicit_bounds specifies buckets with explicitly defined bounds for values.
//
// The boundaries for bucket at index i are:
//
// (-infinity, explicit_bounds[i]] for i == 0
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
//
// The values in the explicit_bounds array must be strictly increasing.
//
// Histogram buckets are inclusive of their upper boundary, except the last
// bucket where the boundary is at infinity. This format is intentionally
// compatible with the OpenMetrics histogram definition.
repeated double explicit_bounds = 7;
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
repeated Exemplar exemplars = 8;
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
uint32 flags = 10;
// min is the minimum value over (start_time, end_time].
optional double min = 11;
// max is the maximum value over (start_time, end_time].
optional double max = 12;
}
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
//
message ExponentialHistogramDataPoint {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 1;
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 start_time_unix_nano = 2;
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
// count is the number of values in the population. Must be
// non-negative. This value must be equal to the sum of the "bucket_counts"
// values in the positive and negative Buckets plus the "zero_count" field.
fixed64 count = 4;
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
optional double sum = 5;
// scale describes the resolution of the histogram. Boundaries are
// located at powers of the base, where:
//
// base = (2^(2^-scale))
//
// The histogram bucket identified by `index`, a signed integer,
// contains values that are greater than (base^index) and
// less than or equal to (base^(index+1)).
//
// The positive and negative ranges of the histogram are expressed
// separately. Negative values are mapped by their absolute value
// into the negative range using the same scale as the positive range.
//
// scale is not restricted by the protocol, as the permissible
// values depend on the range of the data.
sint32 scale = 6;
// zero_count is the count of values that are either exactly zero or
// within the region considered zero by the instrumentation at the
// tolerated degree of precision. This bucket stores values that
// cannot be expressed using the standard exponential formula as
// well as values that have been rounded to zero.
//
// Implementations MAY consider the zero bucket to have probability
// mass equal to (zero_count / count).
fixed64 zero_count = 7;
// positive carries the positive range of exponential bucket counts.
Buckets positive = 8;
// negative carries the negative range of exponential bucket counts.
Buckets negative = 9;
// Buckets are a set of bucket counts, encoded in a contiguous array
// of counts.
message Buckets {
// Offset is the bucket index of the first entry in the bucket_counts array.
//
// Note: This uses a varint encoding as a simple form of compression.
sint32 offset = 1;
// bucket_counts is an array of count values, where bucket_counts[i] carries
// the count of the bucket at index (offset+i). bucket_counts[i] is the count
// of values greater than base^(offset+i) and less than or equal to
// base^(offset+i+1).
//
// Note: By contrast, the explicit HistogramDataPoint uses
// fixed64. This field is expected to have many buckets,
// especially zeros, so uint64 has been selected to ensure
// varint encoding.
repeated uint64 bucket_counts = 2;
}
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
uint32 flags = 10;
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
repeated Exemplar exemplars = 11;
// min is the minimum value over (start_time, end_time].
optional double min = 12;
// max is the maximum value over (start_time, end_time].
optional double max = 13;
// ZeroThreshold may be optionally set to convey the width of the zero
// region. Where the zero region is defined as the closed interval
// [-ZeroThreshold, ZeroThreshold].
// When ZeroThreshold is 0, zero count bucket stores values that cannot be
// expressed using the standard exponential formula as well as values that
// have been rounded to zero.
double zero_threshold = 14;
}
// SummaryDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Summary metric.
message SummaryDataPoint {
reserved 1;
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 start_time_unix_nano = 2;
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
// count is the number of values in the population. Must be non-negative.
fixed64 count = 4;
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
double sum = 5;
// Represents the value at a given quantile of a distribution.
//
// To record Min and Max values following conventions are used:
// - The 1.0 quantile is equivalent to the maximum value observed.
// - The 0.0 quantile is equivalent to the minimum value observed.
//
// See the following issue for more context:
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
message ValueAtQuantile {
// The quantile of a distribution. Must be in the interval
// [0.0, 1.0].
double quantile = 1;
// The value at the given quantile of a distribution.
//
// Quantile values must NOT be negative.
double value = 2;
}
// (Optional) list of values at different quantiles of the distribution calculated
// from the current snapshot. The quantiles must be strictly increasing.
repeated ValueAtQuantile quantile_values = 6;
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
uint32 flags = 8;
}
// A representation of an exemplar, which is a sample input measurement.
// Exemplars also hold information about the environment when the measurement
// was recorded, for example the span and trace ID of the active span when the
// exemplar was recorded.
message Exemplar {
reserved 1;
// The set of key/value pairs that were filtered out by the aggregator, but
// recorded alongside the original measurement. Only key/value pairs that were
// filtered out by the aggregator should be included
repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7;
// time_unix_nano is the exact time when this exemplar was recorded
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 2;
// The value of the measurement that was recorded. An exemplar is
// considered invalid when one of the recognized value fields is not present
// inside this oneof.
oneof value {
double as_double = 3;
sfixed64 as_int = 6;
}
// (Optional) Span ID of the exemplar trace.
// span_id may be missing if the measurement is not recorded inside a trace
// or if the trace is not sampled.
bytes span_id = 4;
// (Optional) Trace ID of the exemplar trace.
// trace_id may be missing if the measurement is not recorded inside a trace
// or if the trace is not sampled.
bytes trace_id = 5;
}

View File

@ -0,0 +1,37 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package opentelemetry.proto.resource.v1;
import "opentelemetry/proto/common/v1/common.proto";
option csharp_namespace = "OpenTelemetry.Proto.Resource.V1";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.resource.v1";
option java_outer_classname = "ResourceProto";
option go_package = "go.opentelemetry.io/proto/otlp/resource/v1";
// Resource information.
message Resource {
// Set of attributes that describe the resource.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 1;
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
// no attributes were dropped.
uint32 dropped_attributes_count = 2;
}

View File

@ -0,0 +1,333 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package opentelemetry.proto.trace.v1;
import "opentelemetry/proto/common/v1/common.proto";
import "opentelemetry/proto/resource/v1/resource.proto";
option csharp_namespace = "OpenTelemetry.Proto.Trace.V1";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.trace.v1";
option java_outer_classname = "TraceProto";
option go_package = "go.opentelemetry.io/proto/otlp/trace/v1";
// TracesData represents the traces data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP traces data but do
// not implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
message TracesData {
// An array of ResourceSpans.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
repeated ResourceSpans resource_spans = 1;
}
// A collection of ScopeSpans from a Resource.
message ResourceSpans {
reserved 1000;
// The resource for the spans in this message.
// If this field is not set then no resource info is known.
opentelemetry.proto.resource.v1.Resource resource = 1;
// A list of ScopeSpans that originate from a resource.
repeated ScopeSpans scope_spans = 2;
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_spans" field which have their own schema_url field.
string schema_url = 3;
}
// A collection of Spans produced by an InstrumentationScope.
message ScopeSpans {
// The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
opentelemetry.proto.common.v1.InstrumentationScope scope = 1;
// A list of Spans that originate from an instrumentation scope.
repeated Span spans = 2;
// The Schema URL, if known. This is the identifier of the Schema that the span data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all spans and span events in the "spans" field.
string schema_url = 3;
}
// A Span represents a single operation performed by a single component of the system.
//
// The next available field id is 17.
message Span {
// A unique identifier for a trace. All spans from the same trace share
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is required.
bytes trace_id = 1;
// A unique identifier for a span within a trace, assigned when the span
// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
// other than 8 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is required.
bytes span_id = 2;
// trace_state conveys information about request position in multiple distributed tracing graphs.
// It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
// See also https://github.com/w3c/distributed-tracing for more details about this field.
string trace_state = 3;
// The `span_id` of this span's parent span. If this is a root span, then this
// field must be empty. The ID is an 8-byte array.
bytes parent_span_id = 4;
// Flags, a bit field. 8 least significant bits are the trace
// flags as defined in W3C Trace Context specification. Readers
// MUST not assume that 24 most significant bits will be zero.
// To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
//
// When creating span messages, if the message is logically forwarded from another source
// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
// be copied as-is. If creating from a source that does not have an equivalent flags field
// (such as a runtime representation of an OpenTelemetry span), the high 24 bits MUST
// be set to zero.
//
// [Optional].
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
fixed32 flags = 16;
// A description of the span's operation.
//
// For example, the name can be a qualified method name or a file name
// and a line number where the operation is called. A best practice is to use
// the same display name at the same call point in an application.
// This makes it easier to correlate spans in different traces.
//
// This field is semantically required to be set to non-empty string.
// Empty value is equivalent to an unknown span name.
//
// This field is required.
string name = 5;
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
enum SpanKind {
// Unspecified. Do NOT use as default.
// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
SPAN_KIND_UNSPECIFIED = 0;
// Indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
SPAN_KIND_INTERNAL = 1;
// Indicates that the span covers server-side handling of an RPC or other
// remote network request.
SPAN_KIND_SERVER = 2;
// Indicates that the span describes a request to some remote service.
SPAN_KIND_CLIENT = 3;
// Indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans. A PRODUCER span ends when the message was accepted
// by the broker while the logical processing of the message might span a much longer time.
SPAN_KIND_PRODUCER = 4;
// Indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship
// between producer and consumer spans.
SPAN_KIND_CONSUMER = 5;
}
// Distinguishes between spans generated in a particular context. For example,
// two spans with the same name may be distinguished using `CLIENT` (caller)
// and `SERVER` (callee) to identify queueing latency associated with the span.
SpanKind kind = 6;
// start_time_unix_nano is the start time of the span. On the client side, this is the time
// kept by the local machine where the span execution starts. On the server side, this
// is the time when the server's application handler starts running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
fixed64 start_time_unix_nano = 7;
// end_time_unix_nano is the end time of the span. On the client side, this is the time
// kept by the local machine where the span execution ends. On the server side, this
// is the time when the server application handler stops running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
fixed64 end_time_unix_nano = 8;
// attributes is a collection of key/value pairs. Note, global attributes
// like server name can be set using the resource API. Examples of attributes:
//
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
// "/http/server_latency": 300
// "example.com/myattribute": true
// "example.com/score": 10.239
//
// The OpenTelemetry API specification further restricts the allowed value types:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 9;
// dropped_attributes_count is the number of attributes that were discarded. Attributes
// can be discarded because their keys are too long or because there are too many
// attributes. If this value is 0, then no attributes were dropped.
uint32 dropped_attributes_count = 10;
// Event is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs.
message Event {
// time_unix_nano is the time the event occurred.
fixed64 time_unix_nano = 1;
// name of the event.
// This field is semantically required to be set to non-empty string.
string name = 2;
// attributes is a collection of attribute key/value pairs on the event.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 3;
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
uint32 dropped_attributes_count = 4;
}
// events is a collection of Event items.
repeated Event events = 11;
// dropped_events_count is the number of dropped events. If the value is 0, then no
// events were dropped.
uint32 dropped_events_count = 12;
// A pointer from the current span to another span in the same trace or in a
// different trace. For example, this can be used in batching operations,
// where a single batch handler processes multiple requests from different
// traces or when the handler receives a request from a different project.
message Link {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
bytes trace_id = 1;
// A unique identifier for the linked span. The ID is an 8-byte array.
bytes span_id = 2;
// The trace_state associated with the link.
string trace_state = 3;
// attributes is a collection of attribute key/value pairs on the link.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 4;
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
uint32 dropped_attributes_count = 5;
// Flags, a bit field. 8 least significant bits are the trace
// flags as defined in W3C Trace Context specification. Readers
// MUST not assume that 24 most significant bits will be zero.
// When creating new spans, the most-significant 24-bits MUST be
// zero. To read the 8-bit W3C trace flag (use flags &
// SPAN_FLAGS_TRACE_FLAGS_MASK). [Optional].
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
fixed32 flags = 6;
}
// links is a collection of Links, which are references from this span to a span
// in the same or different trace.
repeated Link links = 13;
// dropped_links_count is the number of dropped links after the maximum size was
// enforced. If this value is 0, then no links were dropped.
uint32 dropped_links_count = 14;
// An optional final status for this span. Semantically when Status isn't set, it means
// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
Status status = 15;
}
// The Status type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs.
message Status {
reserved 1;
// A developer-facing human readable error message.
string message = 2;
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
enum StatusCode {
// The default status.
STATUS_CODE_UNSET = 0;
// The Span has been validated by an Application developer or Operator to
// have completed successfully.
STATUS_CODE_OK = 1;
// The Span contains an error.
STATUS_CODE_ERROR = 2;
};
// The status code.
StatusCode code = 3;
}
// SpanFlags represents constants used to interpret the
// Span.flags field, which is protobuf 'fixed32' type and is to
// be used as bit-fields. Each non-zero value defined in this enum is
// a bit-mask. To extract the bit-field, for example, use an
// expression like:
//
// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Note that Span flags were introduced in version 1.1 of the
// OpenTelemetry protocol. Older Span producers do not set this
// field, consequently consumers should not rely on the absence of a
// particular flag bit to indicate the presence of a particular feature.
enum SpanFlags {
// The zero value for the enum. Should not be used for comparisons.
// Instead use bitwise "and" with the appropriate mask as shown above.
SPAN_FLAGS_DO_NOT_USE = 0;
// Bits 0-7 are used for trace flags.
SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF;
// Bits 8-31 are reserved for future use.
}

View File

@ -14,13 +14,14 @@
"cron-parser": "^4.8.1",
"ejs": "^3.1.8",
"Model": "file:../Model",
"protobufjs": "^7.2.5",
"ts-node": "^10.9.1"
},
"devDependencies": {
"@types/jest": "^27.5.0",
"@types/node": "^17.0.31",
"jest": "^28.1.0",
"nodemon": "^2.0.22",
"nodemon": "^2.0.20",
"ts-jest": "^28.0.2"
}
},
@ -39,7 +40,7 @@
"moment-timezone": "^0.5.40",
"nanoid": "^3.3.2",
"nanoid-dictionary": "^4.3.0",
"posthog-js": "^1.37.0",
"posthog-js": "^1.77.0",
"process": "^0.11.10",
"reflect-metadata": "^0.1.13",
"slugify": "^1.6.5",
@ -47,7 +48,7 @@
"uuid": "^8.3.2"
},
"devDependencies": {
"@faker-js/faker": "^6.3.1",
"@faker-js/faker": "^8.0.2",
"@types/jest": "^27.5.2",
"@types/node": "^17.0.22",
"jest": "^27.5.1",
@ -4338,6 +4339,7 @@
"version": "1.0.0",
"license": "MIT",
"dependencies": {
"@clickhouse/client": "^0.2.1",
"@elastic/elasticsearch": "^8.1.0",
"@opentelemetry/api": "^1.1.0",
"@opentelemetry/auto-instrumentations-node": "^0.31.0",
@ -18617,6 +18619,60 @@
"@jridgewell/sourcemap-codec": "1.4.14"
}
},
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ=="
},
"node_modules/@protobufjs/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="
},
"node_modules/@protobufjs/codegen": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="
},
"node_modules/@protobufjs/eventemitter": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q=="
},
"node_modules/@protobufjs/fetch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
"dependencies": {
"@protobufjs/aspromise": "^1.1.1",
"@protobufjs/inquire": "^1.1.0"
}
},
"node_modules/@protobufjs/float": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ=="
},
"node_modules/@protobufjs/inquire": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q=="
},
"node_modules/@protobufjs/path": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA=="
},
"node_modules/@protobufjs/pool": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw=="
},
"node_modules/@protobufjs/utf8": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw=="
},
"node_modules/@sinclair/typebox": {
"version": "0.24.51",
"dev": true,
@ -20757,6 +20813,11 @@
"dev": true,
"license": "MIT"
},
"node_modules/long": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz",
"integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q=="
},
"node_modules/lru-cache": {
"version": "6.0.0",
"dev": true,
@ -21136,6 +21197,29 @@
"node": ">= 6"
}
},
"node_modules/protobufjs": {
"version": "7.2.5",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz",
"integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==",
"hasInstallScript": true,
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
"@protobufjs/base64": "^1.1.2",
"@protobufjs/codegen": "^2.0.4",
"@protobufjs/eventemitter": "^1.1.0",
"@protobufjs/fetch": "^1.1.0",
"@protobufjs/float": "^1.0.2",
"@protobufjs/inquire": "^1.1.0",
"@protobufjs/path": "^1.1.2",
"@protobufjs/pool": "^1.1.0",
"@protobufjs/utf8": "^1.1.0",
"@types/node": ">=13.7.0",
"long": "^5.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/pstree.remy": {
"version": "1.1.8",
"dev": true,

View File

@ -19,6 +19,7 @@
"cron-parser": "^4.8.1",
"ejs": "^3.1.8",
"Model": "file:../Model",
"protobufjs": "^7.2.5",
"ts-node": "^10.9.1"
},
"devDependencies": {

View File

@ -0,0 +1,29 @@
processors:
receivers:
otlp:
protocols:
grpc:
http:
exporters:
otlphttp:
endpoint: "http://ingestor:3400/otel"
headers: {"x-api-key": "****************"}
service:
pipelines:
traces:
receivers: [otlp]
processors: []
exporters: [otlphttp]
metrics:
receivers: [otlp]
processors: []
exporters: [otlphttp]
logs:
receivers: [otlp]
processors: []
exporters: [otlphttp]

View File

@ -46,7 +46,7 @@
"fix": "npm run fix-lint",
"status-check": "bash ./Tests/Scripts/status-check.sh $npm_config_services",
"start": "export $(grep -v '^#' config.env | xargs) && docker compose up --remove-orphans -d $npm_config_services && npm run status-check",
"ps": "docker compose ps",
"ps": "export $(grep -v '^#' config.env | xargs) && docker compose ps",
"save-logs": "export $(grep -v '^#' config.env | xargs) && docker compose logs --tail=100000 $npm_config_services > logs.txt",
"logs": "export $(grep -v '^#' config.env | xargs) && docker compose logs --tail=100 -f $npm_config_services",
"write-logs": "export $(grep -v '^#' config.env | xargs) && docker compose logs -f $npm_config_services > logs.txt",