From 7d3351a74c28b5e4056b90a762ef6715ca29f168 Mon Sep 17 00:00:00 2001 From: Michael Smithhisler Date: Thu, 18 Dec 2025 16:07:56 -0500 Subject: [PATCH] drivers: creates independent module for importing driver interface and structs --- go.mod | 2 + plugin_interface/base/base.go | 222 + plugin_interface/base/client.go | 68 + plugin_interface/base/plugin.go | 86 + plugin_interface/base/proto/base.pb.go | 908 ++++ plugin_interface/base/proto/base.proto | 132 + plugin_interface/base/server.go | 89 + plugin_interface/base/structs/errors.go | 12 + plugin_interface/base/testing.go | 96 + plugin_interface/base/topology.go | 122 + plugin_interface/csi/client.go | 961 ++++ plugin_interface/csi/client_test.go | 1678 ++++++ plugin_interface/csi/errors.go | 11 + plugin_interface/csi/fake/client.go | 375 ++ plugin_interface/csi/plugin.go | 1083 ++++ plugin_interface/csi/structs/csi.go | 181 + plugin_interface/csi/structs/volumes.go | 207 + plugin_interface/csi/testing/client.go | 199 + plugin_interface/device/client.go | 153 + plugin_interface/device/cmd/example/README.md | 121 + .../device/cmd/example/cmd/main.go | 21 + plugin_interface/device/cmd/example/device.go | 377 ++ plugin_interface/device/device.go | 229 + plugin_interface/device/mock.go | 111 + plugin_interface/device/plugin.go | 54 + plugin_interface/device/plugin_test.go | 726 +++ plugin_interface/device/proto/device.pb.go | 1083 ++++ plugin_interface/device/proto/device.proto | 179 + plugin_interface/device/server.go | 123 + plugin_interface/device/util.go | 390 ++ plugin_interface/device/versions.go | 9 + plugin_interface/drivers/client.go | 519 ++ plugin_interface/drivers/cstructs.go | 32 + plugin_interface/drivers/driver.go | 631 +++ plugin_interface/drivers/errors.go | 16 + plugin_interface/drivers/execstreaming.go | 188 + .../drivers/filesystem/taskdir.go | 146 + .../drivers/fsisolation/isolation.go | 25 + plugin_interface/drivers/mock.go | 96 + plugin_interface/drivers/plugin.go | 65 + plugin_interface/drivers/plugin_test.go | 974 ++++ plugin_interface/drivers/proto/driver.pb.go | 4826 +++++++++++++++++ plugin_interface/drivers/proto/driver.proto | 787 +++ plugin_interface/drivers/server.go | 431 ++ plugin_interface/drivers/stats.go | 94 + plugin_interface/drivers/structs.go | 738 +++ plugin_interface/drivers/task_handle.go | 50 + .../drivers/testutils/dns_testing.go | 60 + .../drivers/testutils/exec_testing.go | 358 ++ plugin_interface/drivers/testutils/testing.go | 155 + .../drivers/testutils/testing_test.go | 290 + plugin_interface/drivers/utils.go | 733 +++ plugin_interface/drivers/utils/utils_unix.go | 16 + .../drivers/utils/utils_windows.go | 9 + plugin_interface/drivers/utils_test.go | 142 + plugin_interface/drivers/versions.go | 9 + plugin_interface/go.mod | 53 + plugin_interface/go.sum | 187 + plugin_interface/helper/grpc.go | 139 + plugin_interface/helper/logging.go | 89 + plugin_interface/helper/pointer.go | 6 + plugin_interface/helper/slice.go | 42 + plugin_interface/lib/cpustats/stats.go | 77 + plugin_interface/lib/hw/hw.go | 25 + plugin_interface/lib/idset/idset.go | 219 + plugin_interface/lib/idset/idset_test.go | 81 + plugin_interface/serve.go | 53 + .../shared/hclspec/hcl_spec.pb.go | 1124 ++++ .../shared/hclspec/hcl_spec.proto | 425 ++ plugin_interface/shared/hclspec/spec.go | 191 + plugin_interface/shared/structs/attribute.go | 460 ++ .../shared/structs/attribute_test.go | 696 +++ plugin_interface/shared/structs/errors.go | 62 + plugin_interface/shared/structs/msgpack.go | 44 + .../shared/structs/plugin_reattach_config.go | 75 + .../shared/structs/proto/attribute.pb.go | 169 + .../shared/structs/proto/attribute.proto | 28 + .../structs/proto/recoverable_error.pb.go | 83 + .../structs/proto/recoverable_error.proto | 12 + .../shared/structs/proto/stats.pb.go | 224 + .../shared/structs/proto/stats.proto | 47 + plugin_interface/shared/structs/stats.go | 41 + plugin_interface/shared/structs/units.go | 263 + plugin_interface/shared/structs/util.go | 257 + 84 files changed, 26570 insertions(+) create mode 100644 plugin_interface/base/base.go create mode 100644 plugin_interface/base/client.go create mode 100644 plugin_interface/base/plugin.go create mode 100644 plugin_interface/base/proto/base.pb.go create mode 100644 plugin_interface/base/proto/base.proto create mode 100644 plugin_interface/base/server.go create mode 100644 plugin_interface/base/structs/errors.go create mode 100644 plugin_interface/base/testing.go create mode 100644 plugin_interface/base/topology.go create mode 100644 plugin_interface/csi/client.go create mode 100644 plugin_interface/csi/client_test.go create mode 100644 plugin_interface/csi/errors.go create mode 100644 plugin_interface/csi/fake/client.go create mode 100644 plugin_interface/csi/plugin.go create mode 100644 plugin_interface/csi/structs/csi.go create mode 100644 plugin_interface/csi/structs/volumes.go create mode 100644 plugin_interface/csi/testing/client.go create mode 100644 plugin_interface/device/client.go create mode 100644 plugin_interface/device/cmd/example/README.md create mode 100644 plugin_interface/device/cmd/example/cmd/main.go create mode 100644 plugin_interface/device/cmd/example/device.go create mode 100644 plugin_interface/device/device.go create mode 100644 plugin_interface/device/mock.go create mode 100644 plugin_interface/device/plugin.go create mode 100644 plugin_interface/device/plugin_test.go create mode 100644 plugin_interface/device/proto/device.pb.go create mode 100644 plugin_interface/device/proto/device.proto create mode 100644 plugin_interface/device/server.go create mode 100644 plugin_interface/device/util.go create mode 100644 plugin_interface/device/versions.go create mode 100644 plugin_interface/drivers/client.go create mode 100644 plugin_interface/drivers/cstructs.go create mode 100644 plugin_interface/drivers/driver.go create mode 100644 plugin_interface/drivers/errors.go create mode 100644 plugin_interface/drivers/execstreaming.go create mode 100644 plugin_interface/drivers/filesystem/taskdir.go create mode 100644 plugin_interface/drivers/fsisolation/isolation.go create mode 100644 plugin_interface/drivers/mock.go create mode 100644 plugin_interface/drivers/plugin.go create mode 100644 plugin_interface/drivers/plugin_test.go create mode 100644 plugin_interface/drivers/proto/driver.pb.go create mode 100644 plugin_interface/drivers/proto/driver.proto create mode 100644 plugin_interface/drivers/server.go create mode 100644 plugin_interface/drivers/stats.go create mode 100644 plugin_interface/drivers/structs.go create mode 100644 plugin_interface/drivers/task_handle.go create mode 100644 plugin_interface/drivers/testutils/dns_testing.go create mode 100644 plugin_interface/drivers/testutils/exec_testing.go create mode 100644 plugin_interface/drivers/testutils/testing.go create mode 100644 plugin_interface/drivers/testutils/testing_test.go create mode 100644 plugin_interface/drivers/utils.go create mode 100644 plugin_interface/drivers/utils/utils_unix.go create mode 100644 plugin_interface/drivers/utils/utils_windows.go create mode 100644 plugin_interface/drivers/utils_test.go create mode 100644 plugin_interface/drivers/versions.go create mode 100644 plugin_interface/go.mod create mode 100644 plugin_interface/go.sum create mode 100644 plugin_interface/helper/grpc.go create mode 100644 plugin_interface/helper/logging.go create mode 100644 plugin_interface/helper/pointer.go create mode 100644 plugin_interface/helper/slice.go create mode 100644 plugin_interface/lib/cpustats/stats.go create mode 100644 plugin_interface/lib/hw/hw.go create mode 100644 plugin_interface/lib/idset/idset.go create mode 100644 plugin_interface/lib/idset/idset_test.go create mode 100644 plugin_interface/serve.go create mode 100644 plugin_interface/shared/hclspec/hcl_spec.pb.go create mode 100644 plugin_interface/shared/hclspec/hcl_spec.proto create mode 100644 plugin_interface/shared/hclspec/spec.go create mode 100644 plugin_interface/shared/structs/attribute.go create mode 100644 plugin_interface/shared/structs/attribute_test.go create mode 100644 plugin_interface/shared/structs/errors.go create mode 100644 plugin_interface/shared/structs/msgpack.go create mode 100644 plugin_interface/shared/structs/plugin_reattach_config.go create mode 100644 plugin_interface/shared/structs/proto/attribute.pb.go create mode 100644 plugin_interface/shared/structs/proto/attribute.proto create mode 100644 plugin_interface/shared/structs/proto/recoverable_error.pb.go create mode 100644 plugin_interface/shared/structs/proto/recoverable_error.proto create mode 100644 plugin_interface/shared/structs/proto/stats.pb.go create mode 100644 plugin_interface/shared/structs/proto/stats.proto create mode 100644 plugin_interface/shared/structs/stats.go create mode 100644 plugin_interface/shared/structs/units.go create mode 100644 plugin_interface/shared/structs/util.go diff --git a/go.mod b/go.mod index d1ddd4b883a..4b7124e3a22 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ go 1.25.7 replace ( github.com/Microsoft/go-winio => github.com/endocrimes/go-winio v0.4.13-0.20190628114223-fb47a8b41948 github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-nomad-1 + + github.com/hashicorp/nomad/plugin_interface => ./plugin_interface ) // Nomad is built using the current source of the API module. diff --git a/plugin_interface/base/base.go b/plugin_interface/base/base.go new file mode 100644 index 00000000000..a974c33c7d2 --- /dev/null +++ b/plugin_interface/base/base.go @@ -0,0 +1,222 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package base + +import ( + "github.com/hashicorp/nomad/plugin-interface/base/proto" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/lib/idset" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" +) + +// BasePlugin is the interface that all Nomad plugins must support. +type BasePlugin interface { + // PluginInfo describes the type and version of a plugin. + PluginInfo() (*PluginInfoResponse, error) + + // ConfigSchema returns the schema for parsing the plugins configuration. + ConfigSchema() (*hclspec.Spec, error) + + // SetConfig is used to set the configuration by passing a MessagePack + // encoding of it. + SetConfig(c *Config) error +} + +// PluginInfoResponse returns basic information about the plugin such that Nomad +// can decide whether to load the plugin or not. +type PluginInfoResponse struct { + // Type returns the plugins type + Type string + + // PluginApiVersions returns the versions of the Nomad plugin API that the + // plugin supports. + PluginApiVersions []string + + // PluginVersion is the version of the plugin. + PluginVersion string + + // Name is the plugins name. + Name string +} + +// Config contains the configuration for the plugin. +type Config struct { + // ApiVersion is the negotiated plugin API version to use. + ApiVersion string + + // PluginConfig is the MessagePack encoding of the plugins user + // configuration. + PluginConfig []byte + + // AgentConfig is the Nomad agents configuration as applicable to plugins + AgentConfig *AgentConfig +} + +// AgentConfig is the Nomad agent's configuration sent to all plugins +type AgentConfig struct { + Driver *ClientDriverConfig +} + +// Compute gets the basic cpu compute availablility necessary for drivers. +func (ac *AgentConfig) Compute() Compute { + if ac == nil || ac.Driver == nil || ac.Driver.Topology == nil { + return Compute{} + } + return ac.Driver.Topology.Compute() +} + +// ClientDriverConfig is the driver specific configuration for all driver plugins +type ClientDriverConfig struct { + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMaxPort uint + + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMinPort uint + + // Topology is the system hardware topology that is the result of scanning + // hardware combined with client configuration. + // Topology_deprecated *numalib.Topology + + Topology *Topology +} + +func (ac *AgentConfig) toProto() *proto.NomadConfig { + if ac == nil { + return nil + } + cfg := &proto.NomadConfig{} + if ac.Driver != nil { + cfg.Driver = &proto.NomadDriverConfig{ + ClientMaxPort: uint32(ac.Driver.ClientMaxPort), + ClientMinPort: uint32(ac.Driver.ClientMinPort), + Topology: nomadTopologyToProto(ac.Driver.Topology), + } + } + return cfg +} + +func nomadConfigFromProto(pb *proto.NomadConfig) *AgentConfig { + if pb == nil { + return nil + } + cfg := &AgentConfig{} + if pb.Driver != nil { + cfg.Driver = &ClientDriverConfig{ + ClientMaxPort: uint(pb.Driver.ClientMaxPort), + ClientMinPort: uint(pb.Driver.ClientMinPort), + Topology: nomadTopologyFromProto(pb.Driver.Topology), + } + } + return cfg +} + +func nomadTopologyFromProto(pb *proto.ClientTopology) *Topology { + if pb == nil { + return nil + } + t := &Topology{ + Distances: nomadTopologyDistancesFromProto(pb.Distances), + Cores: nomadTopologyCoresFromProto(pb.Cores), + OverrideTotalCompute: MHz(pb.OverrideTotalCompute), + OverrideWitholdCompute: MHz(pb.OverrideWitholdCompute), + } + t.SetNodes(idset.FromFunc(pb.NodeIds, func(i uint32) NodeID { return NodeID(i) })) + + return t +} + +func nomadTopologyDistancesFromProto(pb *proto.ClientTopologySLIT) SLIT { + if pb == nil { + return nil + } + size := int(pb.Dimension) + slit := make(SLIT, size) + for row := 0; row < size; row++ { + slit[row] = make([]Cost, size) + for col := 0; col < size; col++ { + index := row*size + col + slit[row][col] = Cost(pb.Values[index]) + } + } + return slit +} + +func nomadTopologyCoresFromProto(pb []*proto.ClientTopologyCore) []Core { + if len(pb) == 0 { + return nil + } + return helper.ConvertSlice(pb, func(pbcore *proto.ClientTopologyCore) Core { + return Core{ + SocketID: SocketID(pbcore.SocketId), + NodeID: NodeID(pbcore.NodeId), + ID: CoreID(pbcore.CoreId), + Grade: nomadCoreGradeFromProto(pbcore.CoreGrade), + Disable: pbcore.Disable, + BaseSpeed: MHz(pbcore.BaseSpeed), + MaxSpeed: MHz(pbcore.MaxSpeed), + GuessSpeed: MHz(pbcore.GuessSpeed), + } + }) +} + +func nomadTopologyToProto(top *Topology) *proto.ClientTopology { + if top == nil { + return nil + } + return &proto.ClientTopology{ + NodeIds: helper.ConvertSlice(top.GetNodes().Slice(), func(id NodeID) uint32 { return uint32(id) }), + Distances: nomadTopologyDistancesToProto(top.Distances), + Cores: nomadTopologyCoresToProto(top.Cores), + OverrideTotalCompute: uint64(top.OverrideTotalCompute), + OverrideWitholdCompute: uint64(top.OverrideWitholdCompute), + } +} + +func nomadTopologyDistancesToProto(slit SLIT) *proto.ClientTopologySLIT { + dimension := len(slit) + values := make([]uint32, 0, dimension) + for row := 0; row < dimension; row++ { + for col := 0; col < dimension; col++ { + values = append(values, uint32(slit[row][col])) + } + } + return &proto.ClientTopologySLIT{ + Dimension: uint32(dimension), + Values: values, + } +} + +func nomadTopologyCoresToProto(cores []Core) []*proto.ClientTopologyCore { + if len(cores) == 0 { + return nil + } + return helper.ConvertSlice(cores, func(core Core) *proto.ClientTopologyCore { + return &proto.ClientTopologyCore{ + SocketId: uint32(core.SocketID), + NodeId: uint32(core.NodeID), + CoreId: uint32(core.ID), + CoreGrade: nomadCoreGradeToProto(core.Grade), + Disable: core.Disable, + BaseSpeed: uint64(core.BaseSpeed), + MaxSpeed: uint64(core.MaxSpeed), + GuessSpeed: uint64(core.GuessSpeed), + } + }) +} + +func nomadCoreGradeFromProto(grade proto.CoreGrade) CoreGrade { + if grade == proto.CoreGrade_Performance { + return Performance + } + return Efficiency +} + +func nomadCoreGradeToProto(grade CoreGrade) proto.CoreGrade { + if grade == Performance { + return proto.CoreGrade_Performance + } + return proto.CoreGrade_Efficiency +} diff --git a/plugin_interface/base/client.go b/plugin_interface/base/client.go new file mode 100644 index 00000000000..78b33ca3444 --- /dev/null +++ b/plugin_interface/base/client.go @@ -0,0 +1,68 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package base + +import ( + "context" + "fmt" + + "github.com/hashicorp/nomad/plugin-interface/base/proto" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" +) + +// BasePluginClient implements the client side of a remote base plugin, using +// gRPC to communicate to the remote plugin. +type BasePluginClient struct { + Client proto.BasePluginClient + + // DoneCtx is closed when the plugin exits + DoneCtx context.Context +} + +func (b *BasePluginClient) PluginInfo() (*PluginInfoResponse, error) { + presp, err := b.Client.PluginInfo(b.DoneCtx, &proto.PluginInfoRequest{}) + if err != nil { + return nil, helper.HandleGrpcErr(err, b.DoneCtx) + } + + var ptype string + switch presp.GetType() { + case proto.PluginType_DRIVER: + ptype = PluginTypeDriver + case proto.PluginType_DEVICE: + ptype = PluginTypeDevice + default: + return nil, fmt.Errorf("plugin is of unknown type: %q", presp.GetType().String()) + } + + resp := &PluginInfoResponse{ + Type: ptype, + PluginApiVersions: presp.GetPluginApiVersions(), + PluginVersion: presp.GetPluginVersion(), + Name: presp.GetName(), + } + + return resp, nil +} + +func (b *BasePluginClient) ConfigSchema() (*hclspec.Spec, error) { + presp, err := b.Client.ConfigSchema(b.DoneCtx, &proto.ConfigSchemaRequest{}) + if err != nil { + return nil, helper.HandleGrpcErr(err, b.DoneCtx) + } + + return presp.GetSpec(), nil +} + +func (b *BasePluginClient) SetConfig(c *Config) error { + // Send the config + _, err := b.Client.SetConfig(b.DoneCtx, &proto.SetConfigRequest{ + MsgpackConfig: c.PluginConfig, + NomadConfig: c.AgentConfig.toProto(), + PluginApiVersion: c.ApiVersion, + }) + + return helper.HandleGrpcErr(err, b.DoneCtx) +} diff --git a/plugin_interface/base/plugin.go b/plugin_interface/base/plugin.go new file mode 100644 index 00000000000..f34e17025b9 --- /dev/null +++ b/plugin_interface/base/plugin.go @@ -0,0 +1,86 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package base + +import ( + "bytes" + "context" + "reflect" + + "github.com/hashicorp/go-msgpack/v2/codec" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base/proto" + "google.golang.org/grpc" +) + +const ( + // PluginTypeBase implements the base plugin interface + PluginTypeBase = "base" + + // PluginTypeDriver implements the driver plugin interface + PluginTypeDriver = "driver" + + // PluginTypeDevice implements the device plugin interface + PluginTypeDevice = "device" +) + +var ( + // Handshake is a common handshake that is shared by all plugins and Nomad. + Handshake = plugin.HandshakeConfig{ + // ProtocolVersion for the executor protocol. + // Version 1: pre 0.9 netrpc based executor + // Version 2: 0.9+ grpc based executor + ProtocolVersion: 2, + MagicCookieKey: "NOMAD_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "e4327c2e01eabfd75a8a67adb114fb34a757d57eee7728d857a8cec6e91a7255", + } +) + +// PluginBase is wraps a BasePlugin and implements go-plugins GRPCPlugin +// interface to expose the interface over gRPC. +type PluginBase struct { + plugin.NetRPCUnsupportedPlugin + Impl BasePlugin +} + +func (p *PluginBase) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterBasePluginServer(s, &basePluginServer{ + impl: p.Impl, + broker: broker, + }) + return nil +} + +func (p *PluginBase) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (any, error) { + return &BasePluginClient{ + Client: proto.NewBasePluginClient(c), + DoneCtx: ctx, + }, nil +} + +// MsgpackHandle is a shared handle for encoding/decoding of structs +var MsgpackHandle = func() *codec.MsgpackHandle { + h := &codec.MsgpackHandle{} + h.RawToString = true + + // maintain binary format from time prior to upgrading latest ugorji + h.BasicHandle.TimeNotBuiltin = true + + h.MapType = reflect.TypeOf(map[string]any(nil)) + + // only review struct codec tags - ignore `json` flags + h.TypeInfos = codec.NewTypeInfos([]string{"codec"}) + + return h +}() + +// MsgPackDecode is used to decode a MsgPack encoded object +func MsgPackDecode(buf []byte, out any) error { + return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) +} + +// MsgPackEncode is used to encode an object to MsgPack +func MsgPackEncode(b *[]byte, in any) error { + return codec.NewEncoderBytes(b, MsgpackHandle).Encode(in) +} diff --git a/plugin_interface/base/proto/base.pb.go b/plugin_interface/base/proto/base.pb.go new file mode 100644 index 00000000000..eeb4b248214 --- /dev/null +++ b/plugin_interface/base/proto/base.pb.go @@ -0,0 +1,908 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/base/proto/base.proto + +package proto + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + hclspec "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// PluginType enumerates the type of plugins Nomad supports +type PluginType int32 + +const ( + PluginType_UNKNOWN PluginType = 0 + PluginType_DRIVER PluginType = 2 + PluginType_DEVICE PluginType = 3 +) + +var PluginType_name = map[int32]string{ + 0: "UNKNOWN", + 2: "DRIVER", + 3: "DEVICE", +} + +var PluginType_value = map[string]int32{ + "UNKNOWN": 0, + "DRIVER": 2, + "DEVICE": 3, +} + +func (x PluginType) String() string { + return proto.EnumName(PluginType_name, int32(x)) +} + +func (PluginType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{0} +} + +type CoreGrade int32 + +const ( + CoreGrade_Performance CoreGrade = 0 + CoreGrade_Efficiency CoreGrade = 1 +) + +var CoreGrade_name = map[int32]string{ + 0: "Performance", + 1: "Efficiency", +} + +var CoreGrade_value = map[string]int32{ + "Performance": 0, + "Efficiency": 1, +} + +func (x CoreGrade) String() string { + return proto.EnumName(CoreGrade_name, int32(x)) +} + +func (CoreGrade) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{1} +} + +// PluginInfoRequest is used to request the plugins basic information. +type PluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginInfoRequest) Reset() { *m = PluginInfoRequest{} } +func (m *PluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*PluginInfoRequest) ProtoMessage() {} +func (*PluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{0} +} + +func (m *PluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginInfoRequest.Unmarshal(m, b) +} +func (m *PluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginInfoRequest.Marshal(b, m, deterministic) +} +func (m *PluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginInfoRequest.Merge(m, src) +} +func (m *PluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_PluginInfoRequest.Size(m) +} +func (m *PluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginInfoRequest proto.InternalMessageInfo + +// PluginInfoResponse returns basic information about the plugin such +// that Nomad can decide whether to load the plugin or not. +type PluginInfoResponse struct { + // type indicates what type of plugin this is. + Type PluginType `protobuf:"varint,1,opt,name=type,proto3,enum=hashicorp.nomad.plugins.base.proto.PluginType" json:"type,omitempty"` + // plugin_api_versions indicates the versions of the Nomad Plugin API + // this plugin supports. + PluginApiVersions []string `protobuf:"bytes,2,rep,name=plugin_api_versions,json=pluginApiVersions,proto3" json:"plugin_api_versions,omitempty"` + // plugin_version is the semver version of this individual plugin. + // This is divorce from Nomad’s development and versioning. + PluginVersion string `protobuf:"bytes,3,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` + // name is the name of the plugin + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginInfoResponse) Reset() { *m = PluginInfoResponse{} } +func (m *PluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*PluginInfoResponse) ProtoMessage() {} +func (*PluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{1} +} + +func (m *PluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginInfoResponse.Unmarshal(m, b) +} +func (m *PluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginInfoResponse.Marshal(b, m, deterministic) +} +func (m *PluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginInfoResponse.Merge(m, src) +} +func (m *PluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_PluginInfoResponse.Size(m) +} +func (m *PluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginInfoResponse proto.InternalMessageInfo + +func (m *PluginInfoResponse) GetType() PluginType { + if m != nil { + return m.Type + } + return PluginType_UNKNOWN +} + +func (m *PluginInfoResponse) GetPluginApiVersions() []string { + if m != nil { + return m.PluginApiVersions + } + return nil +} + +func (m *PluginInfoResponse) GetPluginVersion() string { + if m != nil { + return m.PluginVersion + } + return "" +} + +func (m *PluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ConfigSchemaRequest is used to request the configurations schema. +type ConfigSchemaRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSchemaRequest) Reset() { *m = ConfigSchemaRequest{} } +func (m *ConfigSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*ConfigSchemaRequest) ProtoMessage() {} +func (*ConfigSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{2} +} + +func (m *ConfigSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSchemaRequest.Unmarshal(m, b) +} +func (m *ConfigSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSchemaRequest.Marshal(b, m, deterministic) +} +func (m *ConfigSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSchemaRequest.Merge(m, src) +} +func (m *ConfigSchemaRequest) XXX_Size() int { + return xxx_messageInfo_ConfigSchemaRequest.Size(m) +} +func (m *ConfigSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSchemaRequest proto.InternalMessageInfo + +// ConfigSchemaResponse returns the plugins configuration schema. +type ConfigSchemaResponse struct { + // spec is the plugins configuration schema + Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSchemaResponse) Reset() { *m = ConfigSchemaResponse{} } +func (m *ConfigSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*ConfigSchemaResponse) ProtoMessage() {} +func (*ConfigSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{3} +} + +func (m *ConfigSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSchemaResponse.Unmarshal(m, b) +} +func (m *ConfigSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSchemaResponse.Marshal(b, m, deterministic) +} +func (m *ConfigSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSchemaResponse.Merge(m, src) +} +func (m *ConfigSchemaResponse) XXX_Size() int { + return xxx_messageInfo_ConfigSchemaResponse.Size(m) +} +func (m *ConfigSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSchemaResponse proto.InternalMessageInfo + +func (m *ConfigSchemaResponse) GetSpec() *hclspec.Spec { + if m != nil { + return m.Spec + } + return nil +} + +// SetConfigRequest is used to set the configuration +type SetConfigRequest struct { + // msgpack_config is the configuration encoded as MessagePack. + MsgpackConfig []byte `protobuf:"bytes,1,opt,name=msgpack_config,json=msgpackConfig,proto3" json:"msgpack_config,omitempty"` + // nomad_config is the nomad client configuration sent to all plugins. + NomadConfig *NomadConfig `protobuf:"bytes,2,opt,name=nomad_config,json=nomadConfig,proto3" json:"nomad_config,omitempty"` + // plugin_api_version is the api version to use. + PluginApiVersion string `protobuf:"bytes,3,opt,name=plugin_api_version,json=pluginApiVersion,proto3" json:"plugin_api_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetConfigRequest) Reset() { *m = SetConfigRequest{} } +func (m *SetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*SetConfigRequest) ProtoMessage() {} +func (*SetConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{4} +} + +func (m *SetConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetConfigRequest.Unmarshal(m, b) +} +func (m *SetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetConfigRequest.Marshal(b, m, deterministic) +} +func (m *SetConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetConfigRequest.Merge(m, src) +} +func (m *SetConfigRequest) XXX_Size() int { + return xxx_messageInfo_SetConfigRequest.Size(m) +} +func (m *SetConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetConfigRequest proto.InternalMessageInfo + +func (m *SetConfigRequest) GetMsgpackConfig() []byte { + if m != nil { + return m.MsgpackConfig + } + return nil +} + +func (m *SetConfigRequest) GetNomadConfig() *NomadConfig { + if m != nil { + return m.NomadConfig + } + return nil +} + +func (m *SetConfigRequest) GetPluginApiVersion() string { + if m != nil { + return m.PluginApiVersion + } + return "" +} + +// NomadConfig is the client configuration sent to all plugins +type NomadConfig struct { + // driver specific configuration sent to all plugins + Driver *NomadDriverConfig `protobuf:"bytes,1,opt,name=driver,proto3" json:"driver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NomadConfig) Reset() { *m = NomadConfig{} } +func (m *NomadConfig) String() string { return proto.CompactTextString(m) } +func (*NomadConfig) ProtoMessage() {} +func (*NomadConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{5} +} + +func (m *NomadConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NomadConfig.Unmarshal(m, b) +} +func (m *NomadConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NomadConfig.Marshal(b, m, deterministic) +} +func (m *NomadConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NomadConfig.Merge(m, src) +} +func (m *NomadConfig) XXX_Size() int { + return xxx_messageInfo_NomadConfig.Size(m) +} +func (m *NomadConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NomadConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NomadConfig proto.InternalMessageInfo + +func (m *NomadConfig) GetDriver() *NomadDriverConfig { + if m != nil { + return m.Driver + } + return nil +} + +// NomadDriverConfig is the driver specific client configuration sent to all +// driver plugins +type NomadDriverConfig struct { + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + ClientMaxPort uint32 `protobuf:"varint,1,opt,name=ClientMaxPort,proto3" json:"ClientMaxPort,omitempty"` + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + ClientMinPort uint32 `protobuf:"varint,2,opt,name=ClientMinPort,proto3" json:"ClientMinPort,omitempty"` + // Topology is the complex hardware topology detected by the client + // combined with client configuration. + Topology *ClientTopology `protobuf:"bytes,3,opt,name=Topology,proto3" json:"Topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NomadDriverConfig) Reset() { *m = NomadDriverConfig{} } +func (m *NomadDriverConfig) String() string { return proto.CompactTextString(m) } +func (*NomadDriverConfig) ProtoMessage() {} +func (*NomadDriverConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{6} +} + +func (m *NomadDriverConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NomadDriverConfig.Unmarshal(m, b) +} +func (m *NomadDriverConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NomadDriverConfig.Marshal(b, m, deterministic) +} +func (m *NomadDriverConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NomadDriverConfig.Merge(m, src) +} +func (m *NomadDriverConfig) XXX_Size() int { + return xxx_messageInfo_NomadDriverConfig.Size(m) +} +func (m *NomadDriverConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NomadDriverConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NomadDriverConfig proto.InternalMessageInfo + +func (m *NomadDriverConfig) GetClientMaxPort() uint32 { + if m != nil { + return m.ClientMaxPort + } + return 0 +} + +func (m *NomadDriverConfig) GetClientMinPort() uint32 { + if m != nil { + return m.ClientMinPort + } + return 0 +} + +func (m *NomadDriverConfig) GetTopology() *ClientTopology { + if m != nil { + return m.Topology + } + return nil +} + +// numalib/Topology +type ClientTopology struct { + NodeIds []uint32 `protobuf:"varint,1,rep,packed,name=node_ids,json=nodeIds,proto3" json:"node_ids,omitempty"` + Distances *ClientTopologySLIT `protobuf:"bytes,2,opt,name=distances,proto3" json:"distances,omitempty"` + Cores []*ClientTopologyCore `protobuf:"bytes,3,rep,name=cores,proto3" json:"cores,omitempty"` + OverrideTotalCompute uint64 `protobuf:"varint,4,opt,name=override_total_compute,json=overrideTotalCompute,proto3" json:"override_total_compute,omitempty"` + OverrideWitholdCompute uint64 `protobuf:"varint,5,opt,name=override_withold_compute,json=overrideWitholdCompute,proto3" json:"override_withold_compute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientTopology) Reset() { *m = ClientTopology{} } +func (m *ClientTopology) String() string { return proto.CompactTextString(m) } +func (*ClientTopology) ProtoMessage() {} +func (*ClientTopology) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{7} +} + +func (m *ClientTopology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientTopology.Unmarshal(m, b) +} +func (m *ClientTopology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientTopology.Marshal(b, m, deterministic) +} +func (m *ClientTopology) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientTopology.Merge(m, src) +} +func (m *ClientTopology) XXX_Size() int { + return xxx_messageInfo_ClientTopology.Size(m) +} +func (m *ClientTopology) XXX_DiscardUnknown() { + xxx_messageInfo_ClientTopology.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientTopology proto.InternalMessageInfo + +func (m *ClientTopology) GetNodeIds() []uint32 { + if m != nil { + return m.NodeIds + } + return nil +} + +func (m *ClientTopology) GetDistances() *ClientTopologySLIT { + if m != nil { + return m.Distances + } + return nil +} + +func (m *ClientTopology) GetCores() []*ClientTopologyCore { + if m != nil { + return m.Cores + } + return nil +} + +func (m *ClientTopology) GetOverrideTotalCompute() uint64 { + if m != nil { + return m.OverrideTotalCompute + } + return 0 +} + +func (m *ClientTopology) GetOverrideWitholdCompute() uint64 { + if m != nil { + return m.OverrideWitholdCompute + } + return 0 +} + +// numalib/SLIT +type ClientTopologySLIT struct { + // dimension is the row and column size of the slit matrix + Dimension uint32 `protobuf:"varint,1,opt,name=dimension,proto3" json:"dimension,omitempty"` + // values is the flattened matrix of slit values + Values []uint32 `protobuf:"varint,2,rep,packed,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientTopologySLIT) Reset() { *m = ClientTopologySLIT{} } +func (m *ClientTopologySLIT) String() string { return proto.CompactTextString(m) } +func (*ClientTopologySLIT) ProtoMessage() {} +func (*ClientTopologySLIT) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{8} +} + +func (m *ClientTopologySLIT) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientTopologySLIT.Unmarshal(m, b) +} +func (m *ClientTopologySLIT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientTopologySLIT.Marshal(b, m, deterministic) +} +func (m *ClientTopologySLIT) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientTopologySLIT.Merge(m, src) +} +func (m *ClientTopologySLIT) XXX_Size() int { + return xxx_messageInfo_ClientTopologySLIT.Size(m) +} +func (m *ClientTopologySLIT) XXX_DiscardUnknown() { + xxx_messageInfo_ClientTopologySLIT.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientTopologySLIT proto.InternalMessageInfo + +func (m *ClientTopologySLIT) GetDimension() uint32 { + if m != nil { + return m.Dimension + } + return 0 +} + +func (m *ClientTopologySLIT) GetValues() []uint32 { + if m != nil { + return m.Values + } + return nil +} + +// numalib/Core +type ClientTopologyCore struct { + SocketId uint32 `protobuf:"varint,1,opt,name=socket_id,json=socketId,proto3" json:"socket_id,omitempty"` + NodeId uint32 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + CoreId uint32 `protobuf:"varint,3,opt,name=core_id,json=coreId,proto3" json:"core_id,omitempty"` + CoreGrade CoreGrade `protobuf:"varint,4,opt,name=core_grade,json=coreGrade,proto3,enum=hashicorp.nomad.plugins.base.proto.CoreGrade" json:"core_grade,omitempty"` + Disable bool `protobuf:"varint,5,opt,name=disable,proto3" json:"disable,omitempty"` + BaseSpeed uint64 `protobuf:"varint,6,opt,name=base_speed,json=baseSpeed,proto3" json:"base_speed,omitempty"` + MaxSpeed uint64 `protobuf:"varint,7,opt,name=max_speed,json=maxSpeed,proto3" json:"max_speed,omitempty"` + GuessSpeed uint64 `protobuf:"varint,8,opt,name=guess_speed,json=guessSpeed,proto3" json:"guess_speed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientTopologyCore) Reset() { *m = ClientTopologyCore{} } +func (m *ClientTopologyCore) String() string { return proto.CompactTextString(m) } +func (*ClientTopologyCore) ProtoMessage() {} +func (*ClientTopologyCore) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{9} +} + +func (m *ClientTopologyCore) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientTopologyCore.Unmarshal(m, b) +} +func (m *ClientTopologyCore) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientTopologyCore.Marshal(b, m, deterministic) +} +func (m *ClientTopologyCore) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientTopologyCore.Merge(m, src) +} +func (m *ClientTopologyCore) XXX_Size() int { + return xxx_messageInfo_ClientTopologyCore.Size(m) +} +func (m *ClientTopologyCore) XXX_DiscardUnknown() { + xxx_messageInfo_ClientTopologyCore.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientTopologyCore proto.InternalMessageInfo + +func (m *ClientTopologyCore) GetSocketId() uint32 { + if m != nil { + return m.SocketId + } + return 0 +} + +func (m *ClientTopologyCore) GetNodeId() uint32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *ClientTopologyCore) GetCoreId() uint32 { + if m != nil { + return m.CoreId + } + return 0 +} + +func (m *ClientTopologyCore) GetCoreGrade() CoreGrade { + if m != nil { + return m.CoreGrade + } + return CoreGrade_Performance +} + +func (m *ClientTopologyCore) GetDisable() bool { + if m != nil { + return m.Disable + } + return false +} + +func (m *ClientTopologyCore) GetBaseSpeed() uint64 { + if m != nil { + return m.BaseSpeed + } + return 0 +} + +func (m *ClientTopologyCore) GetMaxSpeed() uint64 { + if m != nil { + return m.MaxSpeed + } + return 0 +} + +func (m *ClientTopologyCore) GetGuessSpeed() uint64 { + if m != nil { + return m.GuessSpeed + } + return 0 +} + +// SetConfigResponse is used to respond to setting the configuration +type SetConfigResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetConfigResponse) Reset() { *m = SetConfigResponse{} } +func (m *SetConfigResponse) String() string { return proto.CompactTextString(m) } +func (*SetConfigResponse) ProtoMessage() {} +func (*SetConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_19edef855873449e, []int{10} +} + +func (m *SetConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetConfigResponse.Unmarshal(m, b) +} +func (m *SetConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetConfigResponse.Marshal(b, m, deterministic) +} +func (m *SetConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetConfigResponse.Merge(m, src) +} +func (m *SetConfigResponse) XXX_Size() int { + return xxx_messageInfo_SetConfigResponse.Size(m) +} +func (m *SetConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetConfigResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("hashicorp.nomad.plugins.base.proto.PluginType", PluginType_name, PluginType_value) + proto.RegisterEnum("hashicorp.nomad.plugins.base.proto.CoreGrade", CoreGrade_name, CoreGrade_value) + proto.RegisterType((*PluginInfoRequest)(nil), "hashicorp.nomad.plugins.base.proto.PluginInfoRequest") + proto.RegisterType((*PluginInfoResponse)(nil), "hashicorp.nomad.plugins.base.proto.PluginInfoResponse") + proto.RegisterType((*ConfigSchemaRequest)(nil), "hashicorp.nomad.plugins.base.proto.ConfigSchemaRequest") + proto.RegisterType((*ConfigSchemaResponse)(nil), "hashicorp.nomad.plugins.base.proto.ConfigSchemaResponse") + proto.RegisterType((*SetConfigRequest)(nil), "hashicorp.nomad.plugins.base.proto.SetConfigRequest") + proto.RegisterType((*NomadConfig)(nil), "hashicorp.nomad.plugins.base.proto.NomadConfig") + proto.RegisterType((*NomadDriverConfig)(nil), "hashicorp.nomad.plugins.base.proto.NomadDriverConfig") + proto.RegisterType((*ClientTopology)(nil), "hashicorp.nomad.plugins.base.proto.ClientTopology") + proto.RegisterType((*ClientTopologySLIT)(nil), "hashicorp.nomad.plugins.base.proto.ClientTopologySLIT") + proto.RegisterType((*ClientTopologyCore)(nil), "hashicorp.nomad.plugins.base.proto.ClientTopologyCore") + proto.RegisterType((*SetConfigResponse)(nil), "hashicorp.nomad.plugins.base.proto.SetConfigResponse") +} + +func init() { + proto.RegisterFile("plugins/base/proto/base.proto", fileDescriptor_19edef855873449e) +} + +var fileDescriptor_19edef855873449e = []byte{ + // 861 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4d, 0x6f, 0x23, 0x45, + 0x10, 0xcd, 0xd8, 0x8e, 0x3f, 0xca, 0xb1, 0x71, 0x2a, 0x0b, 0x0c, 0x86, 0x15, 0xd6, 0x88, 0x95, + 0xa2, 0x55, 0x98, 0x08, 0xb3, 0x59, 0xf6, 0x08, 0xf1, 0x46, 0xc8, 0x22, 0x6b, 0xa2, 0xb6, 0xc9, + 0x22, 0x84, 0x64, 0x75, 0x66, 0xda, 0x76, 0x6b, 0x3d, 0xd3, 0xc3, 0xf4, 0x38, 0x24, 0x48, 0x9c, + 0x38, 0xf3, 0x3f, 0xb8, 0xf1, 0x03, 0x38, 0x70, 0xe0, 0x8f, 0xa1, 0xfe, 0x18, 0xdb, 0x49, 0x84, + 0xb0, 0xf7, 0xe4, 0xee, 0x7a, 0xaf, 0x5e, 0x75, 0xbd, 0x1e, 0x57, 0xc3, 0xe3, 0x64, 0xbe, 0x98, + 0xf2, 0x58, 0x1e, 0x5f, 0x51, 0xc9, 0x8e, 0x93, 0x54, 0x64, 0x42, 0x2f, 0x7d, 0xbd, 0x44, 0x6f, + 0x46, 0xe5, 0x8c, 0x07, 0x22, 0x4d, 0xfc, 0x58, 0x44, 0x34, 0xf4, 0x2d, 0xdd, 0x5f, 0x71, 0xda, + 0x4f, 0x72, 0x09, 0x39, 0xa3, 0x29, 0x0b, 0x8f, 0x67, 0xc1, 0x5c, 0x26, 0x2c, 0x50, 0xbf, 0x63, + 0xb5, 0x30, 0x34, 0xef, 0x00, 0xf6, 0x2f, 0x34, 0xb1, 0x1f, 0x4f, 0x04, 0x61, 0x3f, 0x2d, 0x98, + 0xcc, 0xbc, 0x7f, 0x1c, 0xc0, 0xf5, 0xa8, 0x4c, 0x44, 0x2c, 0x19, 0x9e, 0x42, 0x29, 0xbb, 0x4d, + 0x98, 0xeb, 0x74, 0x9c, 0xc3, 0x66, 0xd7, 0xf7, 0xff, 0xff, 0x14, 0xbe, 0x51, 0x19, 0xdd, 0x26, + 0x8c, 0xe8, 0x5c, 0xf4, 0xe1, 0xc0, 0xd0, 0xc6, 0x34, 0xe1, 0xe3, 0x6b, 0x96, 0x4a, 0x2e, 0x62, + 0xe9, 0x16, 0x3a, 0xc5, 0xc3, 0x1a, 0xd9, 0x37, 0xd0, 0x57, 0x09, 0xbf, 0xb4, 0x00, 0x3e, 0x81, + 0xa6, 0xe5, 0x5b, 0xae, 0x5b, 0xec, 0x38, 0x87, 0x35, 0xd2, 0x30, 0x51, 0xcb, 0x43, 0x84, 0x52, + 0x4c, 0x23, 0xe6, 0x96, 0x34, 0xa8, 0xd7, 0xde, 0xbb, 0x70, 0xd0, 0x13, 0xf1, 0x84, 0x4f, 0x87, + 0xc1, 0x8c, 0x45, 0x34, 0x6f, 0xee, 0x7b, 0x78, 0x74, 0x37, 0x6c, 0xbb, 0xfb, 0x12, 0x4a, 0xca, + 0x17, 0xdd, 0x5d, 0xbd, 0x7b, 0xf4, 0x9f, 0xdd, 0x19, 0x3f, 0x7d, 0xeb, 0xa7, 0x3f, 0x4c, 0x58, + 0x40, 0x74, 0xa6, 0xf7, 0x97, 0x03, 0xad, 0x21, 0xcb, 0x8c, 0xba, 0x2d, 0xa7, 0x1a, 0x88, 0xe4, + 0x34, 0xa1, 0xc1, 0x9b, 0x71, 0xa0, 0x01, 0x5d, 0x60, 0x8f, 0x34, 0x6c, 0xd4, 0xb0, 0x91, 0xc0, + 0x9e, 0x2e, 0x93, 0x93, 0x0a, 0xfa, 0x14, 0xc7, 0x9b, 0x78, 0x3c, 0x50, 0x80, 0x2d, 0x5a, 0x8f, + 0x57, 0x1b, 0x3c, 0x02, 0x7c, 0xe8, 0xb5, 0xf5, 0xaf, 0x75, 0xdf, 0x6a, 0xef, 0x47, 0xa8, 0xaf, + 0x29, 0xe1, 0x2b, 0x28, 0x87, 0x29, 0xbf, 0x66, 0xa9, 0x35, 0xe4, 0x64, 0xe3, 0xa3, 0xbc, 0xd4, + 0x69, 0xf6, 0x40, 0x56, 0xc4, 0xfb, 0xd3, 0x81, 0xfd, 0x07, 0x28, 0x7e, 0x02, 0x8d, 0xde, 0x9c, + 0xb3, 0x38, 0x7b, 0x45, 0x6f, 0x2e, 0x44, 0x9a, 0xe9, 0x5a, 0x0d, 0x72, 0x37, 0xb8, 0xc6, 0xe2, + 0xb1, 0x66, 0x15, 0xee, 0xb0, 0x4c, 0x10, 0x07, 0x50, 0x1d, 0x89, 0x44, 0xcc, 0xc5, 0xf4, 0x56, + 0xf7, 0x58, 0xef, 0x76, 0x37, 0x39, 0xb2, 0x11, 0xc9, 0x33, 0xc9, 0x52, 0xc3, 0xfb, 0xbb, 0x00, + 0xcd, 0xbb, 0x20, 0x7e, 0x00, 0xd5, 0x58, 0x84, 0x6c, 0xcc, 0x43, 0xe9, 0x3a, 0x9d, 0xe2, 0x61, + 0x83, 0x54, 0xd4, 0xbe, 0x1f, 0x4a, 0x1c, 0x41, 0x2d, 0xe4, 0x32, 0xa3, 0x71, 0xc0, 0xa4, 0xbd, + 0xbc, 0xe7, 0xdb, 0x97, 0x1f, 0x9e, 0xf7, 0x47, 0x64, 0x25, 0x84, 0xe7, 0xb0, 0x1b, 0x88, 0x94, + 0x49, 0xb7, 0xd8, 0x29, 0xbe, 0x9d, 0x62, 0x4f, 0xa4, 0x8c, 0x18, 0x11, 0x7c, 0x06, 0xef, 0x89, + 0x6b, 0x96, 0xa6, 0x3c, 0x64, 0xe3, 0x4c, 0x64, 0x74, 0x3e, 0x0e, 0x44, 0x94, 0x2c, 0x32, 0xf3, + 0xb7, 0x29, 0x91, 0x47, 0x39, 0x3a, 0x52, 0x60, 0xcf, 0x60, 0xf8, 0x02, 0xdc, 0x65, 0xd6, 0xcf, + 0x3c, 0x9b, 0x89, 0x79, 0xb8, 0xcc, 0xdb, 0xd5, 0x79, 0x4b, 0xd5, 0xd7, 0x06, 0xb6, 0x99, 0xde, + 0x00, 0xf0, 0x61, 0x7b, 0xf8, 0x91, 0x72, 0x2a, 0x62, 0xb1, 0xfe, 0x18, 0xcd, 0x7d, 0xaf, 0x02, + 0xd8, 0x86, 0xf2, 0x35, 0x9d, 0x2f, 0x98, 0x19, 0x09, 0x8d, 0xd3, 0x42, 0xcb, 0x21, 0x36, 0xe2, + 0xfd, 0x51, 0xb8, 0x2f, 0xa8, 0xba, 0xc3, 0x0f, 0xa1, 0x26, 0x45, 0xf0, 0x86, 0x65, 0x63, 0x1e, + 0x5a, 0xc1, 0xaa, 0x09, 0xf4, 0x43, 0x7c, 0x1f, 0x2a, 0xf6, 0xca, 0xec, 0x57, 0x53, 0x36, 0x37, + 0xa6, 0x00, 0xe5, 0x8a, 0x02, 0x8a, 0x06, 0x50, 0xdb, 0x7e, 0x88, 0xe7, 0x00, 0x1a, 0x98, 0xa6, + 0x34, 0x34, 0xce, 0x34, 0xbb, 0x9f, 0x6e, 0x64, 0xbc, 0x48, 0xd9, 0xd7, 0x2a, 0x89, 0xd4, 0x82, + 0x7c, 0x89, 0x2e, 0x54, 0x42, 0x2e, 0xe9, 0xd5, 0xdc, 0x98, 0x55, 0x25, 0xf9, 0x16, 0x1f, 0x03, + 0xa8, 0x64, 0x35, 0x8c, 0x59, 0xe8, 0x96, 0xb5, 0x93, 0x35, 0x15, 0x19, 0xaa, 0x80, 0xea, 0x2a, + 0xa2, 0x37, 0x16, 0xad, 0x68, 0xb4, 0x1a, 0xd1, 0x1b, 0x03, 0x7e, 0x0c, 0xf5, 0xe9, 0x82, 0x49, + 0x69, 0xe1, 0xaa, 0x86, 0x41, 0x87, 0x34, 0x41, 0x8d, 0xf5, 0xb5, 0x49, 0x64, 0x26, 0xdc, 0xd3, + 0xcf, 0x00, 0x56, 0xf3, 0x18, 0xeb, 0x50, 0xf9, 0x6e, 0xf0, 0xcd, 0xe0, 0xdb, 0xd7, 0x83, 0xd6, + 0x0e, 0x02, 0x94, 0x5f, 0x92, 0xfe, 0xe5, 0x19, 0x69, 0x15, 0xf4, 0xfa, 0xec, 0xb2, 0xdf, 0x3b, + 0x6b, 0x15, 0x9f, 0x1e, 0x41, 0x6d, 0xd9, 0x16, 0xbe, 0x03, 0xf5, 0x0b, 0x96, 0x4e, 0x44, 0x1a, + 0xa9, 0xaf, 0xb3, 0xb5, 0x83, 0x4d, 0x80, 0xb3, 0xc9, 0x84, 0x07, 0x9c, 0xc5, 0xc1, 0x6d, 0xcb, + 0xe9, 0xfe, 0x5e, 0x04, 0x38, 0xa5, 0x92, 0x99, 0x2a, 0xf8, 0x6b, 0x5e, 0x4f, 0xbd, 0x22, 0x78, + 0xb2, 0xf9, 0x7b, 0xb1, 0xf6, 0x16, 0xb5, 0x9f, 0x6f, 0x9b, 0x66, 0x9a, 0xf5, 0x76, 0xf0, 0x37, + 0x07, 0xf6, 0xd6, 0x27, 0x3d, 0x7e, 0xb1, 0xd9, 0x2d, 0x3e, 0x78, 0x32, 0xda, 0x2f, 0xb6, 0x4f, + 0x5c, 0x9e, 0xe2, 0x17, 0xa8, 0x2d, 0x6f, 0x02, 0x9f, 0x6d, 0x22, 0x74, 0xff, 0x09, 0x69, 0x9f, + 0x6c, 0x99, 0x95, 0xd7, 0x3e, 0xad, 0xfc, 0xb0, 0xab, 0xc1, 0xab, 0xb2, 0xfe, 0xf9, 0xfc, 0xdf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xe9, 0x67, 0x7b, 0x58, 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// BasePluginClient is the client API for BasePlugin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BasePluginClient interface { + // PluginInfo describes the type and version of a plugin. + PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error) + // ConfigSchema returns the schema for parsing the plugins configuration. + ConfigSchema(ctx context.Context, in *ConfigSchemaRequest, opts ...grpc.CallOption) (*ConfigSchemaResponse, error) + // SetConfig is used to set the configuration. + SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) +} + +type basePluginClient struct { + cc grpc.ClientConnInterface +} + +func NewBasePluginClient(cc grpc.ClientConnInterface) BasePluginClient { + return &basePluginClient{cc} +} + +func (c *basePluginClient) PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error) { + out := new(PluginInfoResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.proto.BasePlugin/PluginInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *basePluginClient) ConfigSchema(ctx context.Context, in *ConfigSchemaRequest, opts ...grpc.CallOption) (*ConfigSchemaResponse, error) { + out := new(ConfigSchemaResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.proto.BasePlugin/ConfigSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *basePluginClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { + out := new(SetConfigResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.proto.BasePlugin/SetConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BasePluginServer is the server API for BasePlugin service. +type BasePluginServer interface { + // PluginInfo describes the type and version of a plugin. + PluginInfo(context.Context, *PluginInfoRequest) (*PluginInfoResponse, error) + // ConfigSchema returns the schema for parsing the plugins configuration. + ConfigSchema(context.Context, *ConfigSchemaRequest) (*ConfigSchemaResponse, error) + // SetConfig is used to set the configuration. + SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) +} + +// UnimplementedBasePluginServer can be embedded to have forward compatible implementations. +type UnimplementedBasePluginServer struct { +} + +func (*UnimplementedBasePluginServer) PluginInfo(ctx context.Context, req *PluginInfoRequest) (*PluginInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PluginInfo not implemented") +} +func (*UnimplementedBasePluginServer) ConfigSchema(ctx context.Context, req *ConfigSchemaRequest) (*ConfigSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigSchema not implemented") +} +func (*UnimplementedBasePluginServer) SetConfig(ctx context.Context, req *SetConfigRequest) (*SetConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetConfig not implemented") +} + +func RegisterBasePluginServer(s *grpc.Server, srv BasePluginServer) { + s.RegisterService(&_BasePlugin_serviceDesc, srv) +} + +func _BasePlugin_PluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BasePluginServer).PluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.base.proto.BasePlugin/PluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BasePluginServer).PluginInfo(ctx, req.(*PluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BasePlugin_ConfigSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BasePluginServer).ConfigSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.base.proto.BasePlugin/ConfigSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BasePluginServer).ConfigSchema(ctx, req.(*ConfigSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BasePlugin_SetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BasePluginServer).SetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.base.proto.BasePlugin/SetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BasePluginServer).SetConfig(ctx, req.(*SetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BasePlugin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hashicorp.nomad.plugins.base.proto.BasePlugin", + HandlerType: (*BasePluginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PluginInfo", + Handler: _BasePlugin_PluginInfo_Handler, + }, + { + MethodName: "ConfigSchema", + Handler: _BasePlugin_ConfigSchema_Handler, + }, + { + MethodName: "SetConfig", + Handler: _BasePlugin_SetConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "plugins/base/proto/base.proto", +} diff --git a/plugin_interface/base/proto/base.proto b/plugin_interface/base/proto/base.proto new file mode 100644 index 00000000000..f176e6944dd --- /dev/null +++ b/plugin_interface/base/proto/base.proto @@ -0,0 +1,132 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package hashicorp.nomad.plugins.base.proto; +option go_package = "proto"; + +import "plugins/shared/hclspec/hcl_spec.proto"; + +// BasePlugin is the methods that all Nomad plugins must support. +service BasePlugin { + + // PluginInfo describes the type and version of a plugin. + rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse) {} + + // ConfigSchema returns the schema for parsing the plugins configuration. + rpc ConfigSchema(ConfigSchemaRequest) returns (ConfigSchemaResponse) {} + + // SetConfig is used to set the configuration. + rpc SetConfig(SetConfigRequest) returns (SetConfigResponse) {} +} + +// PluginType enumerates the type of plugins Nomad supports +enum PluginType { + UNKNOWN = 0; + DRIVER = 2; + DEVICE = 3; +} + +// PluginInfoRequest is used to request the plugins basic information. +message PluginInfoRequest {} + +// PluginInfoResponse returns basic information about the plugin such +// that Nomad can decide whether to load the plugin or not. +message PluginInfoResponse { + // type indicates what type of plugin this is. + PluginType type = 1; + + // plugin_api_versions indicates the versions of the Nomad Plugin API + // this plugin supports. + repeated string plugin_api_versions = 2; + + // plugin_version is the semver version of this individual plugin. + // This is divorce from Nomad’s development and versioning. + string plugin_version = 3; + + // name is the name of the plugin + string name = 4; +} + +// ConfigSchemaRequest is used to request the configurations schema. +message ConfigSchemaRequest {} + +// ConfigSchemaResponse returns the plugins configuration schema. +message ConfigSchemaResponse { + // spec is the plugins configuration schema + hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1; +} + +// SetConfigRequest is used to set the configuration +message SetConfigRequest { + // msgpack_config is the configuration encoded as MessagePack. + bytes msgpack_config = 1; + + // nomad_config is the nomad client configuration sent to all plugins. + NomadConfig nomad_config = 2; + + // plugin_api_version is the api version to use. + string plugin_api_version = 3; +} + +// NomadConfig is the client configuration sent to all plugins +message NomadConfig { + // driver specific configuration sent to all plugins + NomadDriverConfig driver = 1; +} + +// NomadDriverConfig is the driver specific client configuration sent to all +// driver plugins +message NomadDriverConfig { + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + uint32 ClientMaxPort = 1; + + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + uint32 ClientMinPort = 2; + + // Topology is the complex hardware topology detected by the client + // combined with client configuration. + ClientTopology Topology = 3; +} + +// numalib/Topology +message ClientTopology { + repeated uint32 node_ids = 1; + ClientTopologySLIT distances = 2; + repeated ClientTopologyCore cores = 3; + uint64 override_total_compute = 4; + uint64 override_withold_compute = 5; +} + +// numalib/SLIT +message ClientTopologySLIT { + // dimension is the row and column size of the slit matrix + uint32 dimension = 1; + + // values is the flattened matrix of slit values + repeated uint32 values = 2 [packed=true]; +} + +enum CoreGrade { + Performance = 0; + Efficiency = 1; +} + +// numalib/Core +message ClientTopologyCore { + uint32 socket_id = 1; + uint32 node_id = 2; + uint32 core_id = 3; + CoreGrade core_grade = 4; + bool disable = 5; + uint64 base_speed = 6; + uint64 max_speed = 7; + uint64 guess_speed = 8; +} + +// SetConfigResponse is used to respond to setting the configuration +message SetConfigResponse {} diff --git a/plugin_interface/base/server.go b/plugin_interface/base/server.go new file mode 100644 index 00000000000..29e4fc70cb7 --- /dev/null +++ b/plugin_interface/base/server.go @@ -0,0 +1,89 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package base + +import ( + "context" + "fmt" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base/proto" +) + +// basePluginServer wraps a base plugin and exposes it via gRPC. +type basePluginServer struct { + broker *plugin.GRPCBroker + impl BasePlugin +} + +func (b *basePluginServer) PluginInfo(context.Context, *proto.PluginInfoRequest) (*proto.PluginInfoResponse, error) { + resp, err := b.impl.PluginInfo() + if err != nil { + return nil, err + } + + var ptype proto.PluginType + switch resp.Type { + case PluginTypeDriver: + ptype = proto.PluginType_DRIVER + case PluginTypeDevice: + ptype = proto.PluginType_DEVICE + default: + return nil, fmt.Errorf("plugin is of unknown type: %q", resp.Type) + } + + presp := &proto.PluginInfoResponse{ + Type: ptype, + PluginApiVersions: resp.PluginApiVersions, + PluginVersion: resp.PluginVersion, + Name: resp.Name, + } + + return presp, nil +} + +func (b *basePluginServer) ConfigSchema(context.Context, *proto.ConfigSchemaRequest) (*proto.ConfigSchemaResponse, error) { + spec, err := b.impl.ConfigSchema() + if err != nil { + return nil, err + } + + presp := &proto.ConfigSchemaResponse{ + Spec: spec, + } + + return presp, nil +} + +func (b *basePluginServer) SetConfig(ctx context.Context, req *proto.SetConfigRequest) (*proto.SetConfigResponse, error) { + info, err := b.impl.PluginInfo() + if err != nil { + return nil, err + } + + // Client configuration is filtered based on plugin type + cfg := nomadConfigFromProto(req.GetNomadConfig()) + filteredCfg := new(AgentConfig) + + if cfg != nil { + switch info.Type { + case PluginTypeDriver: + filteredCfg.Driver = cfg.Driver + } + } + + // Build the config request + c := &Config{ + ApiVersion: req.GetPluginApiVersion(), + PluginConfig: req.GetMsgpackConfig(), + AgentConfig: filteredCfg, + } + + // Set the config + if err := b.impl.SetConfig(c); err != nil { + return nil, fmt.Errorf("SetConfig failed: %v", err) + } + + return &proto.SetConfigResponse{}, nil +} diff --git a/plugin_interface/base/structs/errors.go b/plugin_interface/base/structs/errors.go new file mode 100644 index 00000000000..0a5a7a6d6a7 --- /dev/null +++ b/plugin_interface/base/structs/errors.go @@ -0,0 +1,12 @@ +package structs + +import "errors" + +const ( + errPluginShutdown = "plugin is shut down" +) + +var ( + // ErrPluginShutdown is returned when the plugin has shutdown. + ErrPluginShutdown = errors.New(errPluginShutdown) +) diff --git a/plugin_interface/base/testing.go b/plugin_interface/base/testing.go new file mode 100644 index 00000000000..6ad475ad4d4 --- /dev/null +++ b/plugin_interface/base/testing.go @@ -0,0 +1,96 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package base + +import ( + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" +) + +var ( + // TestSpec is an hcl Spec for testing + TestSpec = &hclspec.Spec{ + Block: &hclspec.Spec_Object{ + Object: &hclspec.Object{ + Attributes: map[string]*hclspec.Spec{ + "foo": { + Block: &hclspec.Spec_Attr{ + Attr: &hclspec.Attr{ + Type: "string", + Required: false, + }, + }, + }, + "bar": { + Block: &hclspec.Spec_Attr{ + Attr: &hclspec.Attr{ + Type: "number", + Required: false, + }, + }, + }, + "baz": { + Block: &hclspec.Spec_Attr{ + Attr: &hclspec.Attr{ + Type: "bool", + }, + }, + }, + }, + }, + }, + } +) + +// TestConfig is used to decode a config from the TestSpec +type TestConfig struct { + Foo string `cty:"foo" codec:"foo"` + Bar int64 `cty:"bar" codec:"bar"` + Baz bool `cty:"baz" codec:"baz"` +} + +type PluginInfoFn func() (*PluginInfoResponse, error) +type ConfigSchemaFn func() (*hclspec.Spec, error) +type SetConfigFn func(*Config) error + +// MockPlugin is used for testing. +// Each function can be set as a closure to make assertions about how data +// is passed through the base plugin layer. +type MockPlugin struct { + PluginInfoF PluginInfoFn + ConfigSchemaF ConfigSchemaFn + SetConfigF SetConfigFn +} + +func (p *MockPlugin) PluginInfo() (*PluginInfoResponse, error) { return p.PluginInfoF() } +func (p *MockPlugin) ConfigSchema() (*hclspec.Spec, error) { return p.ConfigSchemaF() } +func (p *MockPlugin) SetConfig(cfg *Config) error { + return p.SetConfigF(cfg) +} + +// Below are static implementations of the base plugin functions + +// StaticInfo returns the passed PluginInfoResponse with no error +func StaticInfo(out *PluginInfoResponse) PluginInfoFn { + return func() (*PluginInfoResponse, error) { + return out, nil + } +} + +// StaticConfigSchema returns the passed Spec with no error +func StaticConfigSchema(out *hclspec.Spec) ConfigSchemaFn { + return func() (*hclspec.Spec, error) { + return out, nil + } +} + +// TestConfigSchema returns a ConfigSchemaFn that statically returns the +// TestSpec +func TestConfigSchema() ConfigSchemaFn { + return StaticConfigSchema(TestSpec) +} + +// NoopSetConfig is a noop implementation of set config +func NoopSetConfig() SetConfigFn { + return func(_ *Config) error { return nil } +} diff --git a/plugin_interface/base/topology.go b/plugin_interface/base/topology.go new file mode 100644 index 00000000000..6b232b16221 --- /dev/null +++ b/plugin_interface/base/topology.go @@ -0,0 +1,122 @@ +package base + +import ( + "strconv" + + "github.com/hashicorp/nomad/plugin-interface/lib/idset" +) + +type Topology struct { + // COMPAT: idset.Set wasn't being serialized correctly but we can't change + // the encoding of a field once its shipped. Nodes is the wire + // representation + nodeIDs *idset.Set[NodeID] + Nodes []uint8 + + Distances SLIT + Cores []Core + + // BusAssociativity maps the specific bus each PCI device is plugged into + // with its hardware associated numa node + // + // e.g. "0000:03:00.0" -> 1 + // + // Note that the key may not exactly match the Locality.PciBusID from the + // fingerprint of the device with regard to the domain value. + // + // + // 0000:03:00.0 + // ^ ^ ^ ^ + // | | | |-- function (identifies functionality of device) + // | | |-- device (identifies the device number on the bus) + // | | + // | |-- bus (identifies which bus segment the device is connected to) + // | + // |-- domain (basically always 0, may be 0000 or 00000000) + BusAssociativity map[string]NodeID + + // explicit overrides from client configuration + OverrideTotalCompute MHz + OverrideWitholdCompute MHz +} + +func (t *Topology) Compute() Compute { + return Compute{} +} + +func (st *Topology) SetNodes(nodes *idset.Set[NodeID]) { + st.nodeIDs = nodes + if !nodes.Empty() { + st.Nodes = nodes.Slice() + } else { + st.Nodes = []uint8{} + } +} + +// GetNodes returns the set of NUMA Node IDs. +func (st *Topology) GetNodes() *idset.Set[NodeID] { + if st.nodeIDs.Empty() { + st.nodeIDs = idset.From[NodeID](st.Nodes) + } + return st.nodeIDs +} + +type Compute struct { + TotalCompute MHz `json:"tc"` + NumCores int `json:"nc"` +} + +type SLIT [][]Cost + +type Cost uint8 + +// A Core represents one logical (vCPU) core on a processor. Basically the slice +// of cores detected should match up with the vCPU description in cloud providers. +type Core struct { + SocketID SocketID + NodeID NodeID + ID CoreID + Grade CoreGrade + Disable bool // indicates whether Nomad must not use this core + BaseSpeed MHz // cpuinfo_base_freq (primary choice) + MaxSpeed MHz // cpuinfo_max_freq (second choice) + GuessSpeed MHz // best effort (fallback) +} + +type CoreGrade bool + +const ( + Performance CoreGrade = true + Efficiency CoreGrade = false +) + +type ( + MHz uint64 + KHz uint64 +) + +func (khz KHz) MHz() MHz { + return MHz(khz / 1000) +} + +func (mhz MHz) KHz() KHz { + return KHz(mhz * 1000) +} + +func (khz KHz) String() string { + return strconv.FormatUint(uint64(khz.MHz()), 10) +} + +type ( + // A NodeID represents a NUMA node. There could be more than + // one NUMA node per socket. + // + // Must be an alias because go-msgpack cannot handle the real type. + NodeID = uint8 + + // A SocketID represents a physical CPU socket. + SocketID uint8 + + // A CoreID represents one logical (vCPU) core. + CoreID uint16 +) diff --git a/plugin_interface/csi/client.go b/plugin_interface/csi/client.go new file mode 100644 index 00000000000..c2569e61754 --- /dev/null +++ b/plugin_interface/csi/client.go @@ -0,0 +1,961 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package csi + +import ( + "context" + "fmt" + "maps" + "math" + "net" + "os" + "time" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" +) + +// PluginTypeCSI implements the CSI plugin interface +const PluginTypeCSI = "csi" + +type NodeGetInfoResponse struct { + NodeID string + MaxVolumes int64 + AccessibleTopology *Topology +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// +// According to CSI, there are a few requirements for the keys within this map: +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string +} + +// CSIControllerClient defines the minimal CSI Controller Plugin interface used +// by nomad to simplify the interface required for testing. +type CSIControllerClient interface { + ControllerGetCapabilities(ctx context.Context, in *csipbv1.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ControllerGetCapabilitiesResponse, error) + ControllerPublishVolume(ctx context.Context, in *csipbv1.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *csipbv1.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) + CreateVolume(ctx context.Context, in *csipbv1.CreateVolumeRequest, opts ...grpc.CallOption) (*csipbv1.CreateVolumeResponse, error) + ListVolumes(ctx context.Context, in *csipbv1.ListVolumesRequest, opts ...grpc.CallOption) (*csipbv1.ListVolumesResponse, error) + DeleteVolume(ctx context.Context, in *csipbv1.DeleteVolumeRequest, opts ...grpc.CallOption) (*csipbv1.DeleteVolumeResponse, error) + ControllerExpandVolume(ctx context.Context, in *csipbv1.ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerExpandVolumeResponse, error) + CreateSnapshot(ctx context.Context, in *csipbv1.CreateSnapshotRequest, opts ...grpc.CallOption) (*csipbv1.CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *csipbv1.DeleteSnapshotRequest, opts ...grpc.CallOption) (*csipbv1.DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *csipbv1.ListSnapshotsRequest, opts ...grpc.CallOption) (*csipbv1.ListSnapshotsResponse, error) +} + +// CSINodeClient defines the minimal CSI Node Plugin interface used +// by nomad to simplify the interface required for testing. +type CSINodeClient interface { + NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) + NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *csipbv1.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnpublishVolumeResponse, error) + NodeExpandVolume(ctx context.Context, in *csipbv1.NodeExpandVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeExpandVolumeResponse, error) +} + +type client struct { + addr string + conn *grpc.ClientConn + identityClient csipbv1.IdentityClient + controllerClient CSIControllerClient + nodeClient CSINodeClient + logger hclog.Logger +} + +func (c *client) Close() error { + if c.conn != nil { + return c.conn.Close() + } + return nil +} + +func NewClient(addr string, logger hclog.Logger) CSIPlugin { + return &client{ + addr: addr, + logger: logger, + } +} + +func (c *client) ensureConnected(ctx context.Context) error { + if c == nil { + return fmt.Errorf("client not initialized") + } + if c.conn != nil { + return nil + } + if c.addr == "" { + return fmt.Errorf("address is empty") + } + var conn *grpc.ClientConn + var err error + t := time.NewTimer(0) + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout while connecting to gRPC socket: %v", err) + case <-t.C: + _, err = os.Stat(c.addr) + if err != nil { + err = fmt.Errorf("failed to stat socket: %v", err) + t.Reset(5 * time.Second) + continue + } + conn, err = newGrpcConn(c.addr, c.logger) + if err != nil { + err = fmt.Errorf("failed to create gRPC connection: %v", err) + t.Reset(time.Second * 5) + continue + } + c.conn = conn + c.identityClient = csipbv1.NewIdentityClient(conn) + c.controllerClient = csipbv1.NewControllerClient(conn) + c.nodeClient = csipbv1.NewNodeClient(conn) + return nil + } + } +} + +func newGrpcConn(addr string, logger hclog.Logger) (*grpc.ClientConn, error) { + // after DialContext returns w/ initial connection, closing this + // context is a no-op + connectCtx, cancel := context.WithTimeout(context.Background(), time.Second*1) + defer cancel() + conn, err := grpc.DialContext( + connectCtx, + addr, + grpc.WithBlock(), + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(helper.UnaryClientInterceptor(logger)), + grpc.WithStreamInterceptor(helper.StreamClientInterceptor(logger)), + grpc.WithAuthority("localhost"), + grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", target, timeout) + }), + ) + + if err != nil { + return nil, fmt.Errorf("failed to open grpc connection to addr: %s, err: %v", addr, err) + } + + return conn, nil +} + +// PluginInfo describes the type and version of a plugin as required by the nomad +// base.BasePlugin interface. +func (c *client) PluginInfo() (*base.PluginInfoResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + // note: no grpc retries needed here, as this is called in + // fingerprinting and will get retried by the caller. + name, version, err := c.PluginGetInfo(ctx) + if err != nil { + return nil, err + } + + return &base.PluginInfoResponse{ + Type: PluginTypeCSI, // note: this isn't a Nomad go-plugin type + PluginApiVersions: []string{"1.0.0"}, // TODO(tgross): we want to fingerprint spec version, but this isn't included as a field from the plugins + PluginVersion: version, + Name: name, + }, nil +} + +// ConfigSchema returns the schema for parsing the plugins configuration as +// required by the base.BasePlugin interface. It will always return nil. +func (c *client) ConfigSchema() (*hclspec.Spec, error) { + return nil, nil +} + +// SetConfig is used to set the configuration by passing a MessagePack +// encoding of it. +func (c *client) SetConfig(_ *base.Config) error { + return fmt.Errorf("unsupported") +} + +func (c *client) PluginProbe(ctx context.Context) (bool, error) { + if err := c.ensureConnected(ctx); err != nil { + return false, err + } + + // note: no grpc retries should be done here + req, err := c.identityClient.Probe(ctx, &csipbv1.ProbeRequest{}) + if err != nil { + return false, err + } + + wrapper := req.GetReady() + + // wrapper.GetValue() protects against wrapper being `nil`, and returns false. + ready := wrapper.GetValue() + + if wrapper == nil { + // If the plugin returns a nil value for ready, then it should be + // interpreted as the plugin is ready for compatibility with plugins that + // do not do health checks. + ready = true + } + + return ready, nil +} + +func (c *client) PluginGetInfo(ctx context.Context) (string, string, error) { + if err := c.ensureConnected(ctx); err != nil { + return "", "", err + } + + resp, err := c.identityClient.GetPluginInfo(ctx, &csipbv1.GetPluginInfoRequest{}) + if err != nil { + return "", "", err + } + + name := resp.GetName() + if name == "" { + return "", "", fmt.Errorf("PluginGetInfo: plugin returned empty name field") + } + version := resp.GetVendorVersion() + + return name, version, nil +} + +func (c *client) PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + // note: no grpc retries needed here, as this is called in + // fingerprinting and will get retried by the caller + resp, err := c.identityClient.GetPluginCapabilities(ctx, + &csipbv1.GetPluginCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewPluginCapabilitySet(resp), nil +} + +// +// Controller Endpoints +// + +func (c *client) ControllerGetCapabilities(ctx context.Context) (*ControllerCapabilitySet, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + // note: no grpc retries needed here, as this is called in + // fingerprinting and will get retried by the caller + resp, err := c.controllerClient.ControllerGetCapabilities(ctx, + &csipbv1.ControllerGetCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewControllerCapabilitySet(resp), nil +} + +func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + err := req.Validate() + if err != nil { + return nil, err + } + + pbrequest := req.ToCSIRepresentation() + resp, err := c.controllerClient.ControllerPublishVolume(ctx, pbrequest, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + err = fmt.Errorf("volume %q or node %q could not be found: %v", + req.ExternalID, req.NodeID, err) + case codes.AlreadyExists: + err = fmt.Errorf( + "volume %q is already published at node %q but with capabilities or a read_only setting incompatible with this request: %v", + req.ExternalID, req.NodeID, err) + case codes.ResourceExhausted: + err = fmt.Errorf("node %q has reached the maximum allowable number of attached volumes: %v", + req.NodeID, err) + case codes.FailedPrecondition: + err = fmt.Errorf("volume %q is already published on another node and does not have MULTI_NODE volume capability: %v", + req.ExternalID, err) + case codes.Internal: + err = fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return nil, err + } + + return &ControllerPublishVolumeResponse{ + PublishContext: maps.Clone(resp.PublishContext), + }, nil +} + +func (c *client) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + err := req.Validate() + if err != nil { + return nil, err + } + + upbrequest := req.ToCSIRepresentation() + _, err = c.controllerClient.ControllerUnpublishVolume(ctx, upbrequest, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + // we'll have validated the volume and node *should* exist at the + // server, so if we get a not-found here it's because we've previously + // checkpointed. we'll return an error so the caller can log it for + // diagnostic purposes. + err = fmt.Errorf("%w: volume %q or node %q could not be found: %v", + ErrCSIClientRPCIgnorable, req.ExternalID, req.NodeID, err) + case codes.Internal: + err = fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return nil, err + } + + return &ControllerUnpublishVolumeResponse{}, nil +} + +func (c *client) ControllerValidateCapabilities(ctx context.Context, req *ControllerValidateVolumeRequest, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + if req.ExternalID == "" { + return fmt.Errorf("missing volume ID") + } + + if req.Capabilities == nil { + return fmt.Errorf("missing Capabilities") + } + + creq := req.ToCSIRepresentation() + resp, err := c.controllerClient.ValidateVolumeCapabilities(ctx, creq, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + err = fmt.Errorf("volume %q could not be found: %v", req.ExternalID, err) + case codes.Internal: + err = fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return err + } + + if resp.Message != "" { + // this should only ever be set if Confirmed isn't set, but + // it's not a validation failure. + c.logger.Debug(resp.Message) + } + + // The protobuf accessors below safely handle nil pointers. + // The CSI spec says we can only assert the plugin has + // confirmed the volume capabilities, not that it hasn't + // confirmed them, so if the field is nil we have to assume + // the volume is ok. + confirmedCaps := resp.GetConfirmed().GetVolumeCapabilities() + if confirmedCaps != nil { + for _, requestedCap := range creq.VolumeCapabilities { + err := compareCapabilities(requestedCap, confirmedCaps) + if err != nil { + return fmt.Errorf("volume capability validation failed: %v", err) + } + } + } + + return nil +} + +func (c *client) ControllerCreateVolume(ctx context.Context, req *ControllerCreateVolumeRequest, opts ...grpc.CallOption) (*ControllerCreateVolumeResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + err := req.Validate() + if err != nil { + return nil, err + } + creq := req.ToCSIRepresentation() + resp, err := c.controllerClient.CreateVolume(ctx, creq, opts...) + + // these standard gRPC error codes are overloaded with CSI-specific + // meanings, so translate them into user-understandable terms + // https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume-errors + if err != nil { + code := status.Code(err) + switch code { + case codes.InvalidArgument: + return nil, fmt.Errorf( + "volume %q snapshot source %q is not compatible with these parameters: %v", + req.Name, req.ContentSource, err) + case codes.NotFound: + return nil, fmt.Errorf( + "volume %q content source %q does not exist: %v", + req.Name, req.ContentSource, err) + case codes.AlreadyExists: + return nil, fmt.Errorf( + "volume %q already exists but is incompatible with these parameters: %v", + req.Name, err) + case codes.ResourceExhausted: + return nil, fmt.Errorf( + "unable to provision %q in accessible_topology: %v", + req.Name, err) + case codes.OutOfRange: + return nil, fmt.Errorf( + "unsupported capacity_range for volume %q: %v", req.Name, err) + case codes.Internal: + return nil, fmt.Errorf( + "controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return nil, err + } + + return NewCreateVolumeResponse(resp), nil +} + +func (c *client) ControllerListVolumes(ctx context.Context, req *ControllerListVolumesRequest, opts ...grpc.CallOption) (*ControllerListVolumesResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + err := req.Validate() + if err != nil { + return nil, err + } + creq := req.ToCSIRepresentation() + resp, err := c.controllerClient.ListVolumes(ctx, creq, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.Aborted: + return nil, fmt.Errorf( + "invalid starting token %q: %v", req.StartingToken, err) + case codes.Internal: + return nil, fmt.Errorf( + "controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return nil, err + } + return NewListVolumesResponse(resp), nil +} + +func (c *client) ControllerDeleteVolume(ctx context.Context, req *ControllerDeleteVolumeRequest, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + + err := req.Validate() + if err != nil { + return err + } + creq := req.ToCSIRepresentation() + _, err = c.controllerClient.DeleteVolume(ctx, creq, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.FailedPrecondition: + return fmt.Errorf("volume %q is in use: %v", req.ExternalVolumeID, err) + case codes.Internal: + return fmt.Errorf( + "controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + } + return err +} + +func (c *client) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) { + if err := req.Validate(); err != nil { + return nil, err + } + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + exReq := req.ToCSIRepresentation() + resp, err := c.controllerClient.ControllerExpandVolume(ctx, exReq, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.InvalidArgument: + return nil, fmt.Errorf( + "requested capabilities not compatible with volume %q: %v", + req.ExternalVolumeID, err) + case codes.NotFound: + err = fmt.Errorf("volume %q could not be found: %v", req.ExternalVolumeID, err) + case codes.FailedPrecondition: + err = fmt.Errorf("volume %q cannot be expanded online: %v", req.ExternalVolumeID, err) + case codes.OutOfRange: + return nil, fmt.Errorf( + "unsupported capacity_range for volume %q: %v", req.ExternalVolumeID, err) + case codes.Internal: + err = fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + default: + err = fmt.Errorf("controller plugin returned an error: %v", err) + } + return nil, err + } + + return &ControllerExpandVolumeResponse{ + CapacityBytes: resp.GetCapacityBytes(), + NodeExpansionRequired: resp.GetNodeExpansionRequired(), + }, nil +} + +// compareCapabilities returns an error if the 'got' capabilities aren't found +// within the 'expected' capability. +// +// Note that plugins in the wild are known to return incomplete +// VolumeCapability responses, so we can't require that all capabilities we +// expect have been validated, only that the ones that have been validated +// match. This appears to violate the CSI specification but until that's been +// resolved in upstream we have to loosen our validation requirements. The +// tradeoff is that we're more likely to have runtime errors during +// NodeStageVolume. +func compareCapabilities(expected *csipbv1.VolumeCapability, got []*csipbv1.VolumeCapability) error { + var err multierror.Error +NEXT_CAP: + for _, cap := range got { + + expectedMode := expected.GetAccessMode().GetMode() + capMode := cap.GetAccessMode().GetMode() + + // The plugin may not validate AccessMode, in which case we'll + // get UNKNOWN as our response + if capMode != csipbv1.VolumeCapability_AccessMode_UNKNOWN { + if expectedMode != capMode { + multierror.Append(&err, + fmt.Errorf("requested access mode %v, got %v", expectedMode, capMode)) + continue NEXT_CAP + } + } + + capBlock := cap.GetBlock() + capMount := cap.GetMount() + expectedBlock := expected.GetBlock() + expectedMount := expected.GetMount() + + if capBlock != nil && expectedBlock == nil { + multierror.Append(&err, fmt.Errorf( + "'block-device' access type was not requested but was validated by the controller")) + continue NEXT_CAP + } + + if capMount == nil { + continue NEXT_CAP + } + + if expectedMount == nil { + multierror.Append(&err, fmt.Errorf( + "'file-system' access type was not requested but was validated by the controller")) + continue NEXT_CAP + } + + if expectedMount.FsType != capMount.FsType { + multierror.Append(&err, fmt.Errorf( + "requested filesystem type %v, got %v", + expectedMount.FsType, capMount.FsType)) + continue NEXT_CAP + } + + for _, expectedFlag := range expectedMount.MountFlags { + var ok bool + for _, flag := range capMount.MountFlags { + if expectedFlag == flag { + ok = true + break + } + } + if !ok { + // mount flags can contain sensitive data, so we can't log details + multierror.Append(&err, fmt.Errorf( + "requested mount flags did not match available capabilities")) + continue NEXT_CAP + } + } + + return nil + } + return err.ErrorOrNil() +} + +func (c *client) ControllerCreateSnapshot(ctx context.Context, req *ControllerCreateSnapshotRequest, opts ...grpc.CallOption) (*ControllerCreateSnapshotResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + err := req.Validate() + if err != nil { + return nil, err + } + creq := req.ToCSIRepresentation() + resp, err := c.controllerClient.CreateSnapshot(ctx, creq, opts...) + + // these standard gRPC error codes are overloaded with CSI-specific + // meanings, so translate them into user-understandable terms + // https://github.com/container-storage-interface/spec/blob/master/spec.md#createsnapshot-errors + if err != nil { + code := status.Code(err) + switch code { + case codes.AlreadyExists: + return nil, fmt.Errorf( + "snapshot %q already exists but is incompatible with volume ID %q: %v", + req.Name, req.VolumeID, err) + case codes.Aborted: + return nil, fmt.Errorf( + "snapshot %q is already pending: %v", + req.Name, err) + case codes.ResourceExhausted: + return nil, fmt.Errorf( + "storage provider does not have enough space for this snapshot: %v", err) + case codes.Internal: + return nil, fmt.Errorf( + "controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return nil, err + } + + snap := resp.GetSnapshot() + return &ControllerCreateSnapshotResponse{ + Snapshot: &Snapshot{ + ID: snap.GetSnapshotId(), + SourceVolumeID: snap.GetSourceVolumeId(), + SizeBytes: snap.GetSizeBytes(), + CreateTime: snap.GetCreationTime().GetSeconds(), + IsReady: snap.GetReadyToUse(), + }, + }, nil +} + +func (c *client) ControllerDeleteSnapshot(ctx context.Context, req *ControllerDeleteSnapshotRequest, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + + err := req.Validate() + if err != nil { + return err + } + creq := req.ToCSIRepresentation() + _, err = c.controllerClient.DeleteSnapshot(ctx, creq, opts...) + + // these standard gRPC error codes are overloaded with CSI-specific + // meanings, so translate them into user-understandable terms + // https://github.com/container-storage-interface/spec/blob/master/spec.md#deletesnapshot-errors + if err != nil { + code := status.Code(err) + switch code { + case codes.FailedPrecondition: + return fmt.Errorf( + "snapshot %q could not be deleted because it is in use: %v", + req.SnapshotID, err) + case codes.Aborted: + return fmt.Errorf("snapshot %q has a pending operation: %v", req.SnapshotID, err) + case codes.Internal: + return fmt.Errorf( + "controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return err + } + + return nil +} + +func (c *client) ControllerListSnapshots(ctx context.Context, req *ControllerListSnapshotsRequest, opts ...grpc.CallOption) (*ControllerListSnapshotsResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + err := req.Validate() + if err != nil { + return nil, err + } + creq := req.ToCSIRepresentation() + resp, err := c.controllerClient.ListSnapshots(ctx, creq, opts...) + + // these standard gRPC error codes are overloaded with CSI-specific + // meanings, so translate them into user-understandable terms + // https://github.com/container-storage-interface/spec/blob/master/spec.md#listsnapshot-errors + if err != nil { + code := status.Code(err) + switch code { + case codes.Aborted: + return nil, fmt.Errorf( + "invalid starting token %q: %v", req.StartingToken, err) + case codes.Internal: + return nil, fmt.Errorf( + "controller plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + return nil, err + } + + return NewListSnapshotsResponse(resp), nil +} + +// +// Node Endpoints +// + +func (c *client) NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + // note: no grpc retries needed here, as this is called in + // fingerprinting and will get retried by the caller + resp, err := c.nodeClient.NodeGetCapabilities(ctx, &csipbv1.NodeGetCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewNodeCapabilitySet(resp), nil +} + +func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) { + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + result := &NodeGetInfoResponse{} + + // note: no grpc retries needed here, as this is called in + // fingerprinting and will get retried by the caller + resp, err := c.nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{}) + if err != nil { + return nil, err + } + + if resp.GetNodeId() == "" { + return nil, fmt.Errorf("plugin failed to return nodeid") + } + + result.NodeID = resp.GetNodeId() + result.MaxVolumes = resp.GetMaxVolumesPerNode() + if result.MaxVolumes == 0 { + // set safe default so that scheduler ignores this constraint when not set + result.MaxVolumes = math.MaxInt64 + } + + topo := resp.GetAccessibleTopology() + if topo != nil { + result.AccessibleTopology = &Topology{Segments: topo.Segments} + } + + return result, nil +} + +func (c *client) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + err := req.Validate() + if err != nil { + return err + } + + // NodeStageVolume's response contains no extra data. If err == nil, we were + // successful. + _, err = c.nodeClient.NodeStageVolume(ctx, req.ToCSIRepresentation(), opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + err = fmt.Errorf("volume %q could not be found: %v", req.ExternalID, err) + case codes.AlreadyExists: + err = fmt.Errorf( + "volume %q is already staged to %q but with incompatible capabilities for this request: %v", + req.ExternalID, req.StagingTargetPath, err) + case codes.FailedPrecondition: + err = fmt.Errorf("volume %q does not have MULTI_NODE volume capability: %v", + req.ExternalID, err) + case codes.Internal: + err = fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + } + + return err +} + +func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + // These errors should not be returned during production use but exist as aids + // during Nomad development + if volumeID == "" { + return fmt.Errorf("missing volumeID") + } + if stagingTargetPath == "" { + return fmt.Errorf("missing stagingTargetPath") + } + + req := &csipbv1.NodeUnstageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: stagingTargetPath, + } + + // NodeUnstageVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeUnstageVolume(ctx, req, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + err = fmt.Errorf("%w: volume %q could not be found: %v", + ErrCSIClientRPCIgnorable, volumeID, err) + case codes.Internal: + err = fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + } + + return err +} + +func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + if err := req.Validate(); err != nil { + return fmt.Errorf("validation error: %v", err) + } + + // NodePublishVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodePublishVolume(ctx, req.ToCSIRepresentation(), opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + err = fmt.Errorf("volume %q could not be found: %v", req.ExternalID, err) + case codes.AlreadyExists: + err = fmt.Errorf( + "volume %q is already published at target path %q but with capabilities or a read_only setting incompatible with this request: %v", + req.ExternalID, req.TargetPath, err) + case codes.FailedPrecondition: + err = fmt.Errorf("volume %q does not have MULTI_NODE volume capability: %v", + req.ExternalID, err) + case codes.Internal: + err = fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + } + return err +} + +func (c *client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error { + if err := c.ensureConnected(ctx); err != nil { + return err + } + // These errors should not be returned during production use but exist as aids + // during Nomad development + if volumeID == "" { + return fmt.Errorf("missing volumeID") + } + if targetPath == "" { + return fmt.Errorf("missing targetPath") + } + + req := &csipbv1.NodeUnpublishVolumeRequest{ + VolumeId: volumeID, + TargetPath: targetPath, + } + + // NodeUnpublishVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeUnpublishVolume(ctx, req, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.NotFound: + err = fmt.Errorf("%w: volume %q could not be found: %v", + ErrCSIClientRPCIgnorable, volumeID, err) + case codes.Internal: + err = fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + } + } + + return err +} + +func (c *client) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) { + if err := req.Validate(); err != nil { + return nil, err + } + if err := c.ensureConnected(ctx); err != nil { + return nil, err + } + + exReq := req.ToCSIRepresentation() + resp, err := c.nodeClient.NodeExpandVolume(ctx, exReq, opts...) + if err != nil { + code := status.Code(err) + switch code { + case codes.InvalidArgument: + return nil, fmt.Errorf( + "requested capabilities not compatible with volume %q: %v", + req.ExternalVolumeID, err) + case codes.NotFound: + return nil, fmt.Errorf("%w: volume %q could not be found: %v", + ErrCSIClientRPCIgnorable, req.ExternalVolumeID, err) + case codes.FailedPrecondition: + return nil, fmt.Errorf("volume %q cannot be expanded while in use: %v", req.ExternalVolumeID, err) + case codes.OutOfRange: + return nil, fmt.Errorf( + "unsupported capacity_range for volume %q: %v", req.ExternalVolumeID, err) + case codes.Internal: + return nil, fmt.Errorf( + "node plugin returned an internal error, check the plugin allocation logs for more information: %v", err) + default: + return nil, fmt.Errorf("node plugin returned an error: %v", err) + } + } + + return &NodeExpandVolumeResponse{resp.GetCapacityBytes()}, nil +} diff --git a/plugin_interface/csi/client_test.go b/plugin_interface/csi/client_test.go new file mode 100644 index 00000000000..a86c5a728ba --- /dev/null +++ b/plugin_interface/csi/client_test.go @@ -0,0 +1,1678 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package csi + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "testing" + "time" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/shoenig/test/must" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/hashicorp/nomad/plugin-interface/csi/structs" + fake "github.com/hashicorp/nomad/plugin-interface/csi/testing" +) + +func newTestClient(t *testing.T) (*fake.IdentityClient, *fake.ControllerClient, *fake.NodeClient, CSIPlugin) { + ic := fake.NewIdentityClient() + cc := fake.NewControllerClient() + nc := fake.NewNodeClient() + + // we've set this as non-blocking so it won't connect to the + // socket unless a RPC is invoked + conn, err := grpc.DialContext(context.Background(), + filepath.Join(t.TempDir(), "csi.sock"), grpc.WithInsecure()) + if err != nil { + t.Errorf("failed: %v", err) + } + + client := &client{ + conn: conn, + identityClient: ic, + controllerClient: cc, + nodeClient: nc, + } + t.Cleanup(func() { + _ = client.Close() + }) + + return ic, cc, nc, client +} + +func TestClient_RPC_PluginProbe(t *testing.T) { + // ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + ProbeResponse *csipbv1.ProbeResponse + ExpectedResponse bool + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "returns false for ready when the provider returns false", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: false}, + }, + ExpectedResponse: false, + }, + { + Name: "returns true for ready when the provider returns true", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, + ExpectedResponse: true, + }, + { + /* When a SP does not return a ready value, a CO MAY treat this as ready. + We do so because example plugins rely on this behaviour. We may + re-evaluate this decision in the future. */ + Name: "returns true for ready when the provider returns a nil wrapper", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: nil, + }, + ExpectedResponse: true, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + ic, _, _, client := newTestClient(t) + defer client.Close() + + ic.NextErr = tc.ResponseErr + ic.NextPluginProbe = tc.ProbeResponse + + resp, err := client.PluginProbe(context.TODO()) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponse, resp) + }) + } + +} + +func TestClient_RPC_PluginInfo(t *testing.T) { + // ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + InfoResponse *csipbv1.GetPluginInfoResponse + ExpectedResponseName string + ExpectedResponseVersion string + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "returns an error if we receive an empty `name`", + InfoResponse: &csipbv1.GetPluginInfoResponse{ + Name: "", + VendorVersion: "", + }, + ExpectedErr: fmt.Errorf("PluginGetInfo: plugin returned empty name field"), + }, + { + Name: "returns the name when successfully retrieved and not empty", + InfoResponse: &csipbv1.GetPluginInfoResponse{ + Name: "com.hashicorp.storage", + VendorVersion: "1.0.1", + }, + ExpectedResponseName: "com.hashicorp.storage", + ExpectedResponseVersion: "1.0.1", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + ic, _, _, client := newTestClient(t) + defer client.Close() + + ic.NextErr = tc.ResponseErr + ic.NextPluginInfo = tc.InfoResponse + + name, version, err := client.PluginGetInfo(context.TODO()) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponseName, name) + must.Eq(t, tc.ExpectedResponseVersion, version) + }) + } + +} + +func TestClient_RPC_PluginGetCapabilities(t *testing.T) { + // ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.GetPluginCapabilitiesResponse + ExpectedResponse *PluginCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "HasControllerService is true when it's part of the response", + Response: &csipbv1.GetPluginCapabilitiesResponse{ + Capabilities: []*csipbv1.PluginCapability{ + { + Type: &csipbv1.PluginCapability_Service_{ + Service: &csipbv1.PluginCapability_Service{ + Type: csipbv1.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, + ExpectedResponse: &PluginCapabilitySet{hasControllerService: true}, + }, + { + Name: "HasTopologies is true when it's part of the response", + Response: &csipbv1.GetPluginCapabilitiesResponse{ + Capabilities: []*csipbv1.PluginCapability{ + { + Type: &csipbv1.PluginCapability_Service_{ + Service: &csipbv1.PluginCapability_Service{ + Type: csipbv1.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS, + }, + }, + }, + }, + }, + ExpectedResponse: &PluginCapabilitySet{hasTopologies: true}, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + ic, _, _, client := newTestClient(t) + defer client.Close() + + ic.NextErr = tc.ResponseErr + ic.NextPluginCapabilities = tc.Response + + resp, err := client.PluginGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { + // ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.ControllerGetCapabilitiesResponse + ExpectedResponse *ControllerCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "ignores unknown capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_UNKNOWN, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{}, + }, + { + Name: "detects list volumes capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{ + HasListVolumes: true, + HasListVolumesPublishedNodes: true, + }, + }, + { + Name: "detects publish capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_PUBLISH_READONLY, + }, + }, + }, + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{ + HasPublishUnpublishVolume: true, + HasPublishReadonly: true, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + cc.NextCapabilitiesResponse = tc.Response + + resp, err := client.ControllerGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_NodeGetCapabilities(t *testing.T) { + // ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeGetCapabilitiesResponse + ExpectedResponse *NodeCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "detects multiple capabilities", + Response: &csipbv1.NodeGetCapabilitiesResponse{ + Capabilities: []*csipbv1.NodeServiceCapability{ + { + Type: &csipbv1.NodeServiceCapability_Rpc{ + Rpc: &csipbv1.NodeServiceCapability_RPC{ + Type: csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + { + Type: &csipbv1.NodeServiceCapability_Rpc{ + Rpc: &csipbv1.NodeServiceCapability_RPC{ + Type: csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &NodeCapabilitySet{ + HasStageUnstageVolume: true, + HasExpandVolume: true, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient(t) + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextCapabilitiesResponse = tc.Response + + resp, err := client.NodeGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerPublishVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *ControllerPublishVolumeRequest + ResponseErr error + Response *csipbv1.ControllerPublishVolumeResponse + ExpectedResponse *ControllerPublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerPublishVolumeRequest{ExternalID: "vol", NodeID: "node"}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "handles missing NodeID", + Request: &ControllerPublishVolumeRequest{ExternalID: "vol"}, + Response: &csipbv1.ControllerPublishVolumeResponse{}, + ExpectedErr: fmt.Errorf("missing NodeID"), + }, + + { + Name: "handles PublishContext == nil", + Request: &ControllerPublishVolumeRequest{ + ExternalID: "vol", NodeID: "node"}, + Response: &csipbv1.ControllerPublishVolumeResponse{}, + ExpectedResponse: &ControllerPublishVolumeResponse{}, + }, + { + Name: "handles PublishContext != nil", + Request: &ControllerPublishVolumeRequest{ExternalID: "vol", NodeID: "node"}, + Response: &csipbv1.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "com.hashicorp/nomad-node-id": "foobar", + "com.plugin/device": "/dev/sdc1", + }, + }, + ExpectedResponse: &ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "com.hashicorp/nomad-node-id": "foobar", + "com.plugin/device": "/dev/sdc1", + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + cc.NextPublishVolumeResponse = tc.Response + + resp, err := client.ControllerPublishVolume(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *ControllerUnpublishVolumeRequest + ResponseErr error + Response *csipbv1.ControllerUnpublishVolumeResponse + ExpectedResponse *ControllerUnpublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerUnpublishVolumeRequest{ExternalID: "vol", NodeID: "node"}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "handles missing NodeID", + Request: &ControllerUnpublishVolumeRequest{ExternalID: "vol"}, + ExpectedErr: fmt.Errorf("missing NodeID"), + ExpectedResponse: nil, + }, + { + Name: "handles successful response", + Request: &ControllerUnpublishVolumeRequest{ExternalID: "vol", NodeID: "node"}, + ExpectedResponse: &ControllerUnpublishVolumeResponse{}, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + cc.NextUnpublishVolumeResponse = tc.Response + + resp, err := client.ControllerUnpublishVolume(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } + + must.Eq(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerValidateVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + AccessType VolumeAccessType + AccessMode VolumeAccessMode + ResponseErr error + Response *csipbv1.ValidateVolumeCapabilitiesResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "handles success empty capabilities", + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{}, + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "handles success exact match MountVolume", + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: map[string]string{}, + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessType: &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: "ext4", + MountFlags: []string{"errors=remount-ro", "noatime"}, + }, + }, + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + }, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + + { + Name: "handles success exact match BlockVolume", + AccessType: VolumeAccessTypeBlock, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessType: &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, + }, + + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + }, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + + { + Name: "handles failure AccessMode mismatch", + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: map[string]string{}, + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessType: &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }, + }, + ResponseErr: nil, + // this is a multierror + ExpectedErr: fmt.Errorf("volume capability validation failed: 1 error occurred:\n\t* requested access mode MULTI_NODE_MULTI_WRITER, got SINGLE_NODE_WRITER\n\n"), + }, + + { + Name: "handles failure MountFlags mismatch", + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: map[string]string{}, + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessType: &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: "ext4", + MountFlags: []string{}, + }, + }, + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + }, + }, + ResponseErr: nil, + // this is a multierror + ExpectedErr: fmt.Errorf("volume capability validation failed: 1 error occurred:\n\t* requested mount flags did not match available capabilities\n\n"), + }, + + { + Name: "handles failure MountFlags with Block", + AccessType: VolumeAccessTypeBlock, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: map[string]string{}, + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessType: &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: "ext4", + MountFlags: []string{}, + }, + }, + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + }, + }, + ResponseErr: nil, + // this is a multierror + ExpectedErr: fmt.Errorf("volume capability validation failed: 1 error occurred:\n\t* 'file-system' access type was not requested but was validated by the controller\n\n"), + }, + + { + Name: "handles success incomplete no AccessType", + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + }, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + + { + Name: "handles success incomplete no AccessMode", + AccessType: VolumeAccessTypeBlock, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + Response: &csipbv1.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csipbv1.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeCapabilities: []*csipbv1.VolumeCapability{ + { + AccessType: &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, + }, + }, + }, + }, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + requestedCaps := []*VolumeCapability{{ + AccessType: tc.AccessType, + AccessMode: tc.AccessMode, + MountVolume: &structs.CSIMountOptions{ // should be ignored + FSType: "ext4", + MountFlags: []string{"noatime", "errors=remount-ro"}, + }, + }} + req := &ControllerValidateVolumeRequest{ + ExternalID: "volumeID", + Secrets: structs.CSISecrets{}, + Capabilities: requestedCaps, + Parameters: map[string]string{}, + Context: map[string]string{}, + } + + cc.NextValidateVolumeCapabilitiesResponse = tc.Response + cc.NextErr = tc.ResponseErr + + err := client.ControllerValidateCapabilities(context.TODO(), req) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } else { + must.NoError(t, err, must.Sprint("name", tc.Name)) + } + }) + } +} + +func TestClient_RPC_ControllerCreateVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + CapacityRange *CapacityRange + ContentSource *VolumeContentSource + ResponseErr error + Response *csipbv1.CreateVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + + { + Name: "handles error invalid capacity range", + CapacityRange: &CapacityRange{ + RequiredBytes: 1000, + LimitBytes: 500, + }, + ExpectedErr: errors.New("LimitBytes cannot be less than RequiredBytes"), + }, + + { + Name: "handles error invalid content source", + ContentSource: &VolumeContentSource{ + SnapshotID: "snap-12345", + CloneID: "vol-12345", + }, + ExpectedErr: errors.New( + "one of SnapshotID or CloneID must be set if ContentSource is set"), + }, + + { + Name: "handles success missing source and range", + Response: &csipbv1.CreateVolumeResponse{}, + }, + + { + Name: "handles success with capacity range, source, and topology", + CapacityRange: &CapacityRange{ + RequiredBytes: 500, + LimitBytes: 1000, + }, + ContentSource: &VolumeContentSource{ + SnapshotID: "snap-12345", + }, + Response: &csipbv1.CreateVolumeResponse{ + Volume: &csipbv1.Volume{ + CapacityBytes: 1000, + ContentSource: &csipbv1.VolumeContentSource{ + Type: &csipbv1.VolumeContentSource_Snapshot{ + Snapshot: &csipbv1.VolumeContentSource_SnapshotSource{ + SnapshotId: "snap-12345", + }, + }, + }, + AccessibleTopology: []*csipbv1.Topology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + }, + }, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + req := &ControllerCreateVolumeRequest{ + Name: "vol-123456", + CapacityRange: tc.CapacityRange, + VolumeCapabilities: []*VolumeCapability{ + { + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeMultiWriter, + }, + }, + Parameters: map[string]string{}, + Secrets: structs.CSISecrets{}, + ContentSource: tc.ContentSource, + AccessibilityRequirements: &TopologyRequirement{ + Requisite: []*Topology{ + { + Segments: map[string]string{"rack": "R1"}, + }, + { + Segments: map[string]string{"rack": "R2"}, + }, + }, + }, + } + + cc.NextCreateVolumeResponse = tc.Response + cc.NextErr = tc.ResponseErr + + resp, err := client.ControllerCreateVolume(context.TODO(), req) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err, must.Sprint("name", tc.Name)) + if tc.Response == nil { + must.Nil(t, resp) + return + } + if tc.CapacityRange != nil { + must.Greater(t, 0, resp.Volume.CapacityBytes) + } + if tc.ContentSource != nil { + must.Eq(t, tc.ContentSource.CloneID, resp.Volume.ContentSource.CloneID) + must.Eq(t, tc.ContentSource.SnapshotID, resp.Volume.ContentSource.SnapshotID) + } + if tc.Response != nil && tc.Response.Volume != nil { + must.SliceLen(t, 1, resp.Volume.AccessibleTopology) + must.Eq(t, + req.AccessibilityRequirements.Requisite[0].Segments, + resp.Volume.AccessibleTopology[0].Segments, + ) + } + + }) + } +} + +func TestClient_RPC_ControllerDeleteVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *ControllerDeleteVolumeRequest + ResponseErr error + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerDeleteVolumeRequest{ExternalVolumeID: "vol-12345"}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + + { + Name: "handles error missing volume ID", + Request: &ControllerDeleteVolumeRequest{}, + ExpectedErr: errors.New("missing ExternalVolumeID"), + }, + + { + Name: "handles success", + Request: &ControllerDeleteVolumeRequest{ExternalVolumeID: "vol-12345"}, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + err := client.ControllerDeleteVolume(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err, must.Sprint("name", tc.Name)) + }) + } +} + +func TestClient_RPC_ControllerListVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *ControllerListVolumesRequest + ResponseErr error + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerListVolumesRequest{}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + + { + Name: "handles error invalid max entries", + Request: &ControllerListVolumesRequest{MaxEntries: -1}, + ExpectedErr: errors.New("MaxEntries cannot be negative"), + }, + + { + Name: "handles success", + Request: &ControllerListVolumesRequest{}, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + if tc.ResponseErr != nil { + // note: there's nothing interesting to assert here other than + // that we don't throw a NPE during transformation from + // protobuf to our struct + cc.NextListVolumesResponse = &csipbv1.ListVolumesResponse{ + Entries: []*csipbv1.ListVolumesResponse_Entry{ + { + Volume: &csipbv1.Volume{ + CapacityBytes: 1000000, + VolumeId: "vol-0", + VolumeContext: map[string]string{"foo": "bar"}, + + ContentSource: &csipbv1.VolumeContentSource{}, + AccessibleTopology: []*csipbv1.Topology{ + { + Segments: map[string]string{"rack": "A"}, + }, + }, + }, + }, + + { + Volume: &csipbv1.Volume{ + VolumeId: "vol-1", + AccessibleTopology: []*csipbv1.Topology{ + { + Segments: map[string]string{"rack": "A"}, + }, + }, + }, + }, + + { + Volume: &csipbv1.Volume{ + VolumeId: "vol-3", + ContentSource: &csipbv1.VolumeContentSource{ + Type: &csipbv1.VolumeContentSource_Snapshot{ + Snapshot: &csipbv1.VolumeContentSource_SnapshotSource{ + SnapshotId: "snap-12345", + }, + }, + }, + }, + }, + }, + NextToken: "abcdef", + } + } + + resp, err := client.ControllerListVolumes(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err, must.Sprint("name", tc.Name)) + must.NotNil(t, resp) + + }) + } +} + +func TestClient_RPC_ControllerCreateSnapshot(t *testing.T) { + //ci.Parallel(t) + + now := time.Now() + + cases := []struct { + Name string + Request *ControllerCreateSnapshotRequest + Response *csipbv1.CreateSnapshotResponse + ResponseErr error + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerCreateSnapshotRequest{ + VolumeID: "vol-12345", + Name: "snap-12345", + }, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + + { + Name: "handles error missing volume ID", + Request: &ControllerCreateSnapshotRequest{}, + ExpectedErr: errors.New("missing VolumeID"), + }, + + { + Name: "handles success", + Request: &ControllerCreateSnapshotRequest{ + VolumeID: "vol-12345", + Name: "snap-12345", + }, + Response: &csipbv1.CreateSnapshotResponse{ + Snapshot: &csipbv1.Snapshot{ + SizeBytes: 100000, + SnapshotId: "snap-12345", + SourceVolumeId: "vol-12345", + CreationTime: timestamppb.New(now), + ReadyToUse: true, + }, + }, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + cc.NextCreateSnapshotResponse = tc.Response + // note: there's nothing interesting to assert about the response + // here other than that we don't throw a NPE during transformation + // from protobuf to our struct + resp, err := client.ControllerCreateSnapshot(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } else { + must.NoError(t, err, must.Sprint("name", tc.Name)) + must.Positive(t, resp.Snapshot.CreateTime) + must.Eq(t, now.Second(), time.Unix(resp.Snapshot.CreateTime, 0).Second()) + } + }) + } +} + +func TestClient_RPC_ControllerDeleteSnapshot(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *ControllerDeleteSnapshotRequest + ResponseErr error + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerDeleteSnapshotRequest{SnapshotID: "vol-12345"}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + + { + Name: "handles error missing volume ID", + Request: &ControllerDeleteSnapshotRequest{}, + ExpectedErr: errors.New("missing SnapshotID"), + }, + + { + Name: "handles success", + Request: &ControllerDeleteSnapshotRequest{SnapshotID: "vol-12345"}, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + err := client.ControllerDeleteSnapshot(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err, must.Sprint("name", tc.Name)) + }) + } +} + +func TestClient_RPC_ControllerListSnapshots(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *ControllerListSnapshotsRequest + ResponseErr error + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerListSnapshotsRequest{}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + + { + Name: "handles error invalid max entries", + Request: &ControllerListSnapshotsRequest{MaxEntries: -1}, + ExpectedErr: errors.New("MaxEntries cannot be negative"), + }, + + { + Name: "handles success", + Request: &ControllerListSnapshotsRequest{}, + }, + } + + now := time.Now() + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + defer client.Close() + + cc.NextErr = tc.ResponseErr + if tc.ResponseErr == nil { + cc.NextListSnapshotsResponse = &csipbv1.ListSnapshotsResponse{ + Entries: []*csipbv1.ListSnapshotsResponse_Entry{ + { + Snapshot: &csipbv1.Snapshot{ + SizeBytes: 1000000, + SnapshotId: "snap-12345", + SourceVolumeId: "vol-12345", + ReadyToUse: true, + CreationTime: timestamppb.New(now), + }, + }, + }, + NextToken: "abcdef", + } + } + + resp, err := client.ControllerListSnapshots(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err, must.Sprint("name", tc.Name)) + must.NotNil(t, resp) + must.Len(t, 1, resp.Entries) + must.Positive(t, resp.Entries[0].Snapshot.CreateTime) + must.Eq(t, now.Second(), + time.Unix(resp.Entries[0].Snapshot.CreateTime, 0).Second()) + }) + } +} + +func TestClient_RPC_ControllerExpandVolume(t *testing.T) { + + cases := []struct { + Name string + Request *ControllerExpandVolumeRequest + ExpectCall *csipbv1.ControllerExpandVolumeRequest + ResponseErr error + ExpectedErr error + }{ + { + Name: "success", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", + RequiredBytes: 1, + LimitBytes: 2, + Capability: &VolumeCapability{ + AccessMode: VolumeAccessModeMultiNodeSingleWriter, + }, + Secrets: map[string]string{"super": "secret"}, + }, + ExpectCall: &csipbv1.ControllerExpandVolumeRequest{ + VolumeId: "vol-1", + CapacityRange: &csipbv1.CapacityRange{ + RequiredBytes: 1, + LimitBytes: 2, + }, + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_Mode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER), + }, + AccessType: &csipbv1.VolumeCapability_Block{Block: &csipbv1.VolumeCapability_BlockVolume{}}, + }, + Secrets: map[string]string{"super": "secret"}, + }, + }, + + { + Name: "validate only min set", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", + RequiredBytes: 4, + }, + ExpectCall: &csipbv1.ControllerExpandVolumeRequest{ + VolumeId: "vol-1", + CapacityRange: &csipbv1.CapacityRange{ + RequiredBytes: 4, + }, + }, + }, + { + Name: "validate missing volume ID", + Request: &ControllerExpandVolumeRequest{}, + ExpectedErr: errors.New("missing ExternalVolumeID"), + }, + { + Name: "validate missing max/min size", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", + }, + ExpectedErr: errors.New("one of LimitBytes or RequiredBytes must be set"), + }, + { + Name: "validate min greater than max", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", + RequiredBytes: 4, + LimitBytes: 2, + }, + ExpectedErr: errors.New("LimitBytes cannot be less than RequiredBytes"), + }, + + { + Name: "grpc error InvalidArgument", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", LimitBytes: 1000}, + ResponseErr: status.Errorf(codes.InvalidArgument, "sad args"), + ExpectedErr: errors.New("requested capabilities not compatible with volume \"vol-1\": rpc error: code = InvalidArgument desc = sad args"), + }, + + { + Name: "grpc error NotFound", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", LimitBytes: 1000}, + ResponseErr: status.Errorf(codes.NotFound, "does not exist"), + ExpectedErr: errors.New("volume \"vol-1\" could not be found: rpc error: code = NotFound desc = does not exist"), + }, + { + Name: "grpc error FailedPrecondition", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", LimitBytes: 1000}, + ResponseErr: status.Errorf(codes.FailedPrecondition, "unsupported"), + ExpectedErr: errors.New("volume \"vol-1\" cannot be expanded online: rpc error: code = FailedPrecondition desc = unsupported"), + }, + { + Name: "grpc error OutOfRange", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", LimitBytes: 1000}, + ResponseErr: status.Errorf(codes.OutOfRange, "too small"), + ExpectedErr: errors.New("unsupported capacity_range for volume \"vol-1\": rpc error: code = OutOfRange desc = too small"), + }, + { + Name: "grpc error Internal", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", LimitBytes: 1000}, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: errors.New("controller plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "grpc error default case", + Request: &ControllerExpandVolumeRequest{ + ExternalVolumeID: "vol-1", LimitBytes: 1000}, + ResponseErr: status.Errorf(codes.DataLoss, "misc unspecified error"), + ExpectedErr: errors.New("controller plugin returned an error: rpc error: code = DataLoss desc = misc unspecified error"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient(t) + + cc.NextErr = tc.ResponseErr + // the fake client should take ~no time, but set a timeout just in case + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + resp, err := client.ControllerExpandVolume(ctx, tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err) + must.NotNil(t, resp) + must.Eq(t, tc.ExpectCall, cc.LastExpandVolumeRequest) + + }) + } + + t.Run("connection error", func(t *testing.T) { + c := &client{} // induce c.ensureConnected() error + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + resp, err := c.ControllerExpandVolume(ctx, &ControllerExpandVolumeRequest{ + ExternalVolumeID: "valid-id", + RequiredBytes: 1, + }) + must.Nil(t, resp) + must.EqError(t, err, "address is empty") + }) +} + +func TestClient_RPC_NodeStageVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeStageVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: status.Errorf(codes.AlreadyExists, "some grpc error"), + ExpectedErr: fmt.Errorf("volume \"foo\" is already staged to \"/path\" but with incompatible capabilities for this request: rpc error: code = AlreadyExists desc = some grpc error"), + }, + { + Name: "handles success", + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient(t) + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextStageVolumeResponse = tc.Response + + err := client.NodeStageVolume(context.TODO(), &NodeStageVolumeRequest{ + ExternalID: "foo", + StagingTargetPath: "/path", + VolumeCapability: &VolumeCapability{}, + }) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } else { + must.NoError(t, err) + } + }) + } +} + +func TestClient_RPC_NodeUnstageVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeUnstageVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "handles success", + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient(t) + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextUnstageVolumeResponse = tc.Response + + err := client.NodeUnstageVolume(context.TODO(), "foo", "/foo") + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } else { + must.NoError(t, err) + } + }) + } +} + +func TestClient_RPC_NodePublishVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + Request *NodePublishVolumeRequest + ResponseErr error + Response *csipbv1.NodePublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &NodePublishVolumeRequest{ + ExternalID: "foo", + TargetPath: "/dev/null", + VolumeCapability: &VolumeCapability{}, + }, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "handles success", + Request: &NodePublishVolumeRequest{ + ExternalID: "foo", + TargetPath: "/dev/null", + VolumeCapability: &VolumeCapability{}, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "Performs validation of the publish volume request", + Request: &NodePublishVolumeRequest{ + ExternalID: "", + }, + ResponseErr: nil, + ExpectedErr: errors.New("validation error: missing volume ID"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient(t) + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextPublishVolumeResponse = tc.Response + + err := client.NodePublishVolume(context.TODO(), tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } else { + must.NoError(t, err) + } + }) + } +} +func TestClient_RPC_NodeUnpublishVolume(t *testing.T) { + //ci.Parallel(t) + + cases := []struct { + Name string + ExternalID string + TargetPath string + ResponseErr error + Response *csipbv1.NodeUnpublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ExternalID: "foo", + TargetPath: "/dev/null", + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: fmt.Errorf("node plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + { + Name: "handles success", + ExternalID: "foo", + TargetPath: "/dev/null", + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "Performs validation of the request args - ExternalID", + ResponseErr: nil, + ExpectedErr: errors.New("missing volumeID"), + }, + { + Name: "Performs validation of the request args - TargetPath", + ExternalID: "foo", + ResponseErr: nil, + ExpectedErr: errors.New("missing targetPath"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient(t) + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextUnpublishVolumeResponse = tc.Response + + err := client.NodeUnpublishVolume(context.TODO(), tc.ExternalID, tc.TargetPath) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + } else { + must.NoError(t, err) + } + }) + } +} + +func TestClient_RPC_NodeExpandVolume(t *testing.T) { + // minimum valid request + minRequest := &NodeExpandVolumeRequest{ + ExternalVolumeID: "test-vol", + TargetPath: "/test-path", + } + + cases := []struct { + Name string + Request *NodeExpandVolumeRequest + ExpectCall *csipbv1.NodeExpandVolumeRequest + ResponseErr error + ExpectedErr error + }{ + { + Name: "success min", + Request: minRequest, + ExpectCall: &csipbv1.NodeExpandVolumeRequest{ + VolumeId: "test-vol", + VolumePath: "/test-path", + }, + }, + { + Name: "success full", + Request: &NodeExpandVolumeRequest{ + ExternalVolumeID: "test-vol", + TargetPath: "/test-path", + StagingPath: "/test-staging-path", + CapacityRange: &CapacityRange{ + RequiredBytes: 5, + LimitBytes: 10, + }, + Capability: &VolumeCapability{ + AccessType: VolumeAccessTypeMount, + AccessMode: VolumeAccessModeMultiNodeSingleWriter, + MountVolume: &structs.CSIMountOptions{ + FSType: "test-fstype", + MountFlags: []string{"test-flags"}, + }, + }, + }, + ExpectCall: &csipbv1.NodeExpandVolumeRequest{ + VolumeId: "test-vol", + VolumePath: "/test-path", + StagingTargetPath: "/test-staging-path", + CapacityRange: &csipbv1.CapacityRange{ + RequiredBytes: 5, + LimitBytes: 10, + }, + VolumeCapability: &csipbv1.VolumeCapability{ + AccessType: &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: "test-fstype", + MountFlags: []string{"test-flags"}, + VolumeMountGroup: "", + }}, + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: csipbv1.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER}, + }, + }, + }, + + { + Name: "validate missing volume id", + Request: &NodeExpandVolumeRequest{ + TargetPath: "/test-path", + }, + ExpectedErr: errors.New("ExternalVolumeID is required"), + }, + { + Name: "validate missing target path", + Request: &NodeExpandVolumeRequest{ + ExternalVolumeID: "test-volume", + }, + ExpectedErr: errors.New("TargetPath is required"), + }, + { + Name: "validate min greater than max", + Request: &NodeExpandVolumeRequest{ + ExternalVolumeID: "test-vol", + TargetPath: "/test-path", + CapacityRange: &CapacityRange{ + RequiredBytes: 4, + LimitBytes: 2, + }, + }, + ExpectedErr: errors.New("LimitBytes cannot be less than RequiredBytes"), + }, + + { + Name: "grpc error default case", + Request: minRequest, + ResponseErr: status.Errorf(codes.DataLoss, "misc unspecified error"), + ExpectedErr: errors.New("node plugin returned an error: rpc error: code = DataLoss desc = misc unspecified error"), + }, + { + Name: "grpc error invalid argument", + Request: minRequest, + ResponseErr: status.Errorf(codes.InvalidArgument, "sad args"), + ExpectedErr: errors.New("requested capabilities not compatible with volume \"test-vol\": rpc error: code = InvalidArgument desc = sad args"), + }, + { + Name: "grpc error NotFound", + Request: minRequest, + ResponseErr: status.Errorf(codes.NotFound, "does not exist"), + ExpectedErr: errors.New("CSI client error (ignorable): volume \"test-vol\" could not be found: rpc error: code = NotFound desc = does not exist"), + }, + { + Name: "grpc error FailedPrecondition", + Request: minRequest, + ResponseErr: status.Errorf(codes.FailedPrecondition, "unsupported"), + ExpectedErr: errors.New("volume \"test-vol\" cannot be expanded while in use: rpc error: code = FailedPrecondition desc = unsupported"), + }, + { + Name: "grpc error OutOfRange", + Request: minRequest, + ResponseErr: status.Errorf(codes.OutOfRange, "too small"), + ExpectedErr: errors.New("unsupported capacity_range for volume \"test-vol\": rpc error: code = OutOfRange desc = too small"), + }, + { + Name: "grpc error Internal", + Request: minRequest, + ResponseErr: status.Errorf(codes.Internal, "some grpc error"), + ExpectedErr: errors.New("node plugin returned an internal error, check the plugin allocation logs for more information: rpc error: code = Internal desc = some grpc error"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient(t) + + nc.NextErr = tc.ResponseErr + // the fake client should take ~no time, but set a timeout just in case + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + resp, err := client.NodeExpandVolume(ctx, tc.Request) + if tc.ExpectedErr != nil { + must.EqError(t, err, tc.ExpectedErr.Error()) + return + } + must.NoError(t, err) + must.NotNil(t, resp) + must.Eq(t, tc.ExpectCall, nc.LastExpandVolumeRequest) + + }) + } + + t.Run("connection error", func(t *testing.T) { + c := &client{} // induce c.ensureConnected() error + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + resp, err := c.NodeExpandVolume(ctx, &NodeExpandVolumeRequest{ + ExternalVolumeID: "valid-id", + TargetPath: "/some-path", + }) + must.Nil(t, resp) + must.EqError(t, err, "address is empty") + }) +} diff --git a/plugin_interface/csi/errors.go b/plugin_interface/csi/errors.go new file mode 100644 index 00000000000..06cd734c10f --- /dev/null +++ b/plugin_interface/csi/errors.go @@ -0,0 +1,11 @@ +package csi + +import "errors" + +var ( + ErrCSIClientRPCIgnorable = errors.New("CSI client error (ignorable)") + ErrCSIClientRPCRetryable = errors.New("CSI client error (retryable)") + ErrCSIVolumeMaxClaims = errors.New("volume max claims reached") + ErrCSIVolumeUnschedulable = errors.New("volume is currently unschedulable") + ErrCSIPluginInUse = errors.New("plugin in use") +) diff --git a/plugin_interface/csi/fake/client.go b/plugin_interface/csi/fake/client.go new file mode 100644 index 00000000000..7e871449265 --- /dev/null +++ b/plugin_interface/csi/fake/client.go @@ -0,0 +1,375 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +// fake is a package that includes fake implementations of public interfaces +// from the CSI package for testing. +package fake + +import ( + "context" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc" + + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/csi" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" +) + +var _ csi.CSIPlugin = &Client{} + +// Client is a mock implementation of the csi.CSIPlugin interface for use in testing +// external components +type Client struct { + Mu sync.RWMutex + + NextPluginInfoResponse *base.PluginInfoResponse + NextPluginInfoErr error + PluginInfoCallCount int64 + + NextPluginProbeResponse bool + NextPluginProbeErr error + PluginProbeCallCount int64 + + NextPluginGetInfoNameResponse string + NextPluginGetInfoVersionResponse string + NextPluginGetInfoErr error + PluginGetInfoCallCount int64 + + NextPluginGetCapabilitiesResponse *csi.PluginCapabilitySet + NextPluginGetCapabilitiesErr error + PluginGetCapabilitiesCallCount int64 + + NextControllerGetCapabilitiesResponse *csi.ControllerCapabilitySet + NextControllerGetCapabilitiesErr error + ControllerGetCapabilitiesCallCount int64 + + NextControllerPublishVolumeResponse *csi.ControllerPublishVolumeResponse + NextControllerPublishVolumeErr error + ControllerPublishVolumeCallCount int64 + + NextControllerUnpublishVolumeResponse *csi.ControllerUnpublishVolumeResponse + NextControllerUnpublishVolumeErr error + ControllerUnpublishVolumeCallCount int64 + + NextControllerCreateVolumeResponse *csi.ControllerCreateVolumeResponse + NextControllerCreateVolumeErr error + ControllerCreateVolumeCallCount int64 + + NextControllerDeleteVolumeErr error + ControllerDeleteVolumeCallCount int64 + + NextControllerListVolumesResponse *csi.ControllerListVolumesResponse + NextControllerListVolumesErr error + ControllerListVolumesCallCount int64 + + NextControllerValidateVolumeErr error + ControllerValidateVolumeCallCount int64 + + NextControllerCreateSnapshotResponse *csi.ControllerCreateSnapshotResponse + NextControllerCreateSnapshotErr error + ControllerCreateSnapshotCallCount int64 + + NextControllerDeleteSnapshotErr error + ControllerDeleteSnapshotCallCount int64 + + NextControllerListSnapshotsResponse *csi.ControllerListSnapshotsResponse + NextControllerListSnapshotsErr error + ControllerListSnapshotsCallCount int64 + + NextControllerExpandVolumeResponse *csi.ControllerExpandVolumeResponse + NextControllerExpandVolumeErr error + ControllerExpandVolumeCallCount int64 + + NextNodeGetCapabilitiesResponse *csi.NodeCapabilitySet + NextNodeGetCapabilitiesErr error + NodeGetCapabilitiesCallCount int64 + + NextNodeGetInfoResponse *csi.NodeGetInfoResponse + NextNodeGetInfoErr error + NodeGetInfoCallCount int64 + + NextNodeStageVolumeErr error + NodeStageVolumeCallCount int64 + + NextNodeUnstageVolumeErr error + NodeUnstageVolumeCallCount int64 + + PrevVolumeCapability *csi.VolumeCapability + NextNodePublishVolumeErr error + NodePublishVolumeCallCount int64 + + NextNodeUnpublishVolumeErr error + NodeUnpublishVolumeCallCount int64 + + NextNodeExpandVolumeResponse *csi.NodeExpandVolumeResponse + NextNodeExpandVolumeErr error + NodeExpandVolumeCallCount int64 +} + +// PluginInfo describes the type and version of a plugin. +func (c *Client) PluginInfo() (*base.PluginInfoResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginInfoCallCount++ + + return c.NextPluginInfoResponse, c.NextPluginInfoErr +} + +// ConfigSchema returns the schema for parsing the plugins configuration. +func (c *Client) ConfigSchema() (*hclspec.Spec, error) { + return nil, errors.New("Unsupported") +} + +// SetConfig is used to set the configuration by passing a MessagePack +// encoding of it. +func (c *Client) SetConfig(a *base.Config) error { + return errors.New("Unsupported") +} + +// PluginProbe is used to verify that the plugin is in a healthy state +func (c *Client) PluginProbe(ctx context.Context) (bool, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginProbeCallCount++ + + return c.NextPluginProbeResponse, c.NextPluginProbeErr +} + +// PluginGetInfo is used to return semantic data about the plugin. +// Response: +// - string: name, the name of the plugin in domain notation format. +func (c *Client) PluginGetInfo(ctx context.Context) (string, string, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginGetInfoCallCount++ + + return c.NextPluginGetInfoNameResponse, c.NextPluginGetInfoVersionResponse, c.NextPluginGetInfoErr +} + +// PluginGetCapabilities is used to return the available capabilities from the +// identity service. This currently only looks for the CONTROLLER_SERVICE and +// Accessible Topology Support +func (c *Client) PluginGetCapabilities(ctx context.Context) (*csi.PluginCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginGetCapabilitiesCallCount++ + + return c.NextPluginGetCapabilitiesResponse, c.NextPluginGetCapabilitiesErr +} + +func (c *Client) ControllerGetCapabilities(ctx context.Context) (*csi.ControllerCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerGetCapabilitiesCallCount++ + + return c.NextControllerGetCapabilitiesResponse, c.NextControllerGetCapabilitiesErr +} + +// ControllerPublishVolume is used to attach a remote volume to a node +func (c *Client) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csi.ControllerPublishVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerPublishVolumeCallCount++ + + return c.NextControllerPublishVolumeResponse, c.NextControllerPublishVolumeErr +} + +// ControllerUnpublishVolume is used to attach a remote volume to a node +func (c *Client) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csi.ControllerUnpublishVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerUnpublishVolumeCallCount++ + + return c.NextControllerUnpublishVolumeResponse, c.NextControllerUnpublishVolumeErr +} + +func (c *Client) ControllerValidateCapabilities(ctx context.Context, req *csi.ControllerValidateVolumeRequest, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerValidateVolumeCallCount++ + + return c.NextControllerValidateVolumeErr +} + +func (c *Client) ControllerCreateVolume(ctx context.Context, in *csi.ControllerCreateVolumeRequest, opts ...grpc.CallOption) (*csi.ControllerCreateVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerCreateVolumeCallCount++ + return c.NextControllerCreateVolumeResponse, c.NextControllerCreateVolumeErr +} + +func (c *Client) ControllerDeleteVolume(ctx context.Context, req *csi.ControllerDeleteVolumeRequest, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerDeleteVolumeCallCount++ + return c.NextControllerDeleteVolumeErr +} + +func (c *Client) ControllerListVolumes(ctx context.Context, req *csi.ControllerListVolumesRequest, opts ...grpc.CallOption) (*csi.ControllerListVolumesResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerListVolumesCallCount++ + return c.NextControllerListVolumesResponse, c.NextControllerListVolumesErr +} + +func (c *Client) ControllerCreateSnapshot(ctx context.Context, req *csi.ControllerCreateSnapshotRequest, opts ...grpc.CallOption) (*csi.ControllerCreateSnapshotResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerCreateSnapshotCallCount++ + return c.NextControllerCreateSnapshotResponse, c.NextControllerCreateSnapshotErr +} + +func (c *Client) ControllerDeleteSnapshot(ctx context.Context, req *csi.ControllerDeleteSnapshotRequest, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerDeleteSnapshotCallCount++ + return c.NextControllerDeleteSnapshotErr +} + +func (c *Client) ControllerListSnapshots(ctx context.Context, req *csi.ControllerListSnapshotsRequest, opts ...grpc.CallOption) (*csi.ControllerListSnapshotsResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerListSnapshotsCallCount++ + return c.NextControllerListSnapshotsResponse, c.NextControllerListSnapshotsErr +} + +func (c *Client) ControllerExpandVolume(ctx context.Context, in *csi.ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*csi.ControllerExpandVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + c.ControllerExpandVolumeCallCount++ + return c.NextControllerExpandVolumeResponse, c.NextControllerExpandVolumeErr +} + +func (c *Client) NodeGetCapabilities(ctx context.Context) (*csi.NodeCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeGetCapabilitiesCallCount++ + + return c.NextNodeGetCapabilitiesResponse, c.NextNodeGetCapabilitiesErr +} + +// NodeGetInfo is used to return semantic data about the current node in +// respect to the SP. +func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeGetInfoCallCount++ + + return c.NextNodeGetInfoResponse, c.NextNodeGetInfoErr +} + +// NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability +// to prepare a volume for usage on a host. If err == nil, the response should +// be assumed to be successful. +func (c *Client) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeStageVolumeCallCount++ + + return c.NextNodeStageVolumeErr +} + +// NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability +// to undo the work performed by NodeStageVolume. If a volume has been staged, +// this RPC must be called before freeing the volume. +// +// If err == nil, the response should be assumed to be successful. +func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeUnstageVolumeCallCount++ + + return c.NextNodeUnstageVolumeErr +} + +func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PrevVolumeCapability = req.VolumeCapability + c.NodePublishVolumeCallCount++ + + return c.NextNodePublishVolumeErr +} + +func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeUnpublishVolumeCallCount++ + + return c.NextNodeUnpublishVolumeErr +} + +func (c *Client) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest, opts ...grpc.CallOption) (*csi.NodeExpandVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeExpandVolumeCallCount++ + return c.NextNodeExpandVolumeResponse, c.NextNodeExpandVolumeErr +} + +// Close the client and ensure any connections are cleaned up. +func (c *Client) Close() error { + + c.NextPluginInfoResponse = nil + c.NextPluginInfoErr = fmt.Errorf("closed client") + + c.NextPluginProbeResponse = false + c.NextPluginProbeErr = fmt.Errorf("closed client") + + c.NextPluginGetInfoNameResponse = "" + c.NextPluginGetInfoVersionResponse = "" + c.NextPluginGetInfoErr = fmt.Errorf("closed client") + + c.NextPluginGetCapabilitiesResponse = nil + c.NextPluginGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextControllerGetCapabilitiesResponse = nil + c.NextControllerGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextControllerPublishVolumeResponse = nil + c.NextControllerPublishVolumeErr = fmt.Errorf("closed client") + + c.NextControllerUnpublishVolumeResponse = nil + c.NextControllerUnpublishVolumeErr = fmt.Errorf("closed client") + + c.NextControllerExpandVolumeResponse = nil + c.NextControllerExpandVolumeErr = fmt.Errorf("closed client") + + c.NextControllerValidateVolumeErr = fmt.Errorf("closed client") + + c.NextNodeGetCapabilitiesResponse = nil + c.NextNodeGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextNodeGetInfoResponse = nil + c.NextNodeGetInfoErr = fmt.Errorf("closed client") + + c.NextNodeStageVolumeErr = fmt.Errorf("closed client") + + c.NextNodeUnstageVolumeErr = fmt.Errorf("closed client") + + c.NextNodePublishVolumeErr = fmt.Errorf("closed client") + + c.NextNodeUnpublishVolumeErr = fmt.Errorf("closed client") + + c.NextNodeExpandVolumeResponse = nil + c.NextNodeExpandVolumeErr = fmt.Errorf("closed client") + + return nil +} diff --git a/plugin_interface/csi/plugin.go b/plugin_interface/csi/plugin.go new file mode 100644 index 00000000000..4c059580847 --- /dev/null +++ b/plugin_interface/csi/plugin.go @@ -0,0 +1,1083 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package csi + +import ( + "context" + "errors" + "fmt" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/csi/structs" +) + +// CSIPlugin implements a lightweight abstraction layer around a CSI Plugin. +// It validates that responses from storage providers (SP's), correctly conform +// to the specification before returning response data or erroring. +type CSIPlugin interface { + base.BasePlugin + + // PluginProbe is used to verify that the plugin is in a healthy state + PluginProbe(ctx context.Context) (bool, error) + + // PluginGetInfo is used to return semantic data about the plugin. + // Response: + // - string: name, the name of the plugin in domain notation format. + // - string: version, the vendor version of the plugin + PluginGetInfo(ctx context.Context) (string, string, error) + + // PluginGetCapabilities is used to return the available capabilities from the + // identity service. This currently only looks for the CONTROLLER_SERVICE and + // Accessible Topology Support + PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) + + // GetControllerCapabilities is used to get controller-specific capabilities + // for a plugin. + ControllerGetCapabilities(ctx context.Context) (*ControllerCapabilitySet, error) + + // ControllerPublishVolume is used to attach a remote volume to a cluster node. + ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + + // ControllerUnpublishVolume is used to deattach a remote volume from a cluster node. + ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + + // ControllerValidateCapabilities is used to validate that a volume exists and + // supports the requested capability. + ControllerValidateCapabilities(ctx context.Context, req *ControllerValidateVolumeRequest, opts ...grpc.CallOption) error + + // ControllerCreateVolume is used to create a remote volume in the + // external storage provider + ControllerCreateVolume(ctx context.Context, req *ControllerCreateVolumeRequest, opts ...grpc.CallOption) (*ControllerCreateVolumeResponse, error) + + // ControllerDeleteVolume is used to delete a remote volume in the + // external storage provider + ControllerDeleteVolume(ctx context.Context, req *ControllerDeleteVolumeRequest, opts ...grpc.CallOption) error + + // ControllerListVolumes is used to list all volumes available in the + // external storage provider + ControllerListVolumes(ctx context.Context, req *ControllerListVolumesRequest, opts ...grpc.CallOption) (*ControllerListVolumesResponse, error) + + // ControllerExpandVolume is used to expand a volume's size + ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) + + // ControllerCreateSnapshot is used to create a volume snapshot in the + // external storage provider + ControllerCreateSnapshot(ctx context.Context, req *ControllerCreateSnapshotRequest, opts ...grpc.CallOption) (*ControllerCreateSnapshotResponse, error) + + // ControllerDeleteSnapshot is used to delete a volume snapshot from the + // external storage provider + ControllerDeleteSnapshot(ctx context.Context, req *ControllerDeleteSnapshotRequest, opts ...grpc.CallOption) error + + // ControllerListSnapshots is used to list all volume snapshots available + // in the external storage provider + ControllerListSnapshots(ctx context.Context, req *ControllerListSnapshotsRequest, opts ...grpc.CallOption) (*ControllerListSnapshotsResponse, error) + + // NodeGetCapabilities is used to return the available capabilities from the + // Node Service. + NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) + + // NodeGetInfo is used to return semantic data about the current node in + // respect to the SP. + NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) + + // NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability + // to prepare a volume for usage on a host. If err == nil, the response should + // be assumed to be successful. + NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest, opts ...grpc.CallOption) error + + // NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability + // to undo the work performed by NodeStageVolume. If a volume has been staged, + // this RPC must be called before freeing the volume. + // + // If err == nil, the response should be assumed to be successful. + NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error + + // NodePublishVolume is used to prepare a volume for use by an allocation. + // if err == nil the response should be assumed to be successful. + NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest, opts ...grpc.CallOption) error + + // NodeUnpublishVolume is used to cleanup usage of a volume for an alloc. This + // MUST be called before calling NodeUnstageVolume or ControllerUnpublishVolume + // for the given volume. + NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error + + // NodeExpandVolume is used to expand a volume. This MUST be called after + // any ControllerExpandVolume is called, but only if that RPC indicates + // that node expansion is required + NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) + + // Shutdown the client and ensure any connections are cleaned up. + Close() error +} + +type NodePublishVolumeRequest struct { + // The external ID of the volume to publish. + ExternalID string + + // If the volume was attached via a call to `ControllerPublishVolume` then + // we need to provide the returned PublishContext here. + PublishContext map[string]string + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // E.g {the plugins internal mount path}/staging/volumeid/... + // + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + StagingTargetPath string + + // The path to which the volume will be published. + // It MUST be an absolute path in the root filesystem of the process serving this + // request. + // E.g {the plugins internal mount path}/per-alloc/allocid/volumeid/... + // + // The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + TargetPath string + + // Volume capability describing how the CO intends to use this volume. + VolumeCapability *VolumeCapability + + Readonly bool + + // Secrets required by plugins to complete the node publish volume + // request. This field is OPTIONAL. + Secrets structs.CSISecrets + + // Volume context as returned by SP in the CSI + // CreateVolumeResponse.Volume.volume_context which we don't implement but + // can be entered by hand in the volume spec. This field is OPTIONAL. + VolumeContext map[string]string +} + +func (r *NodePublishVolumeRequest) ToCSIRepresentation() *csipbv1.NodePublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.NodePublishVolumeRequest{ + VolumeId: r.ExternalID, + PublishContext: r.PublishContext, + StagingTargetPath: r.StagingTargetPath, + TargetPath: r.TargetPath, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), + Readonly: r.Readonly, + Secrets: r.Secrets, + VolumeContext: r.VolumeContext, + } +} + +func (r *NodePublishVolumeRequest) Validate() error { + if r.ExternalID == "" { + return errors.New("missing volume ID") + } + + if r.TargetPath == "" { + return errors.New("missing TargetPath") + } + + if r.VolumeCapability == nil { + return errors.New("missing VolumeCapabilities") + } + + return nil +} + +type NodeStageVolumeRequest struct { + // The external ID of the volume to stage. + ExternalID string + + // If the volume was attached via a call to `ControllerPublishVolume` then + // we need to provide the returned PublishContext here. + PublishContext map[string]string + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + StagingTargetPath string + + // Volume capability describing how the CO intends to use this volume. + VolumeCapability *VolumeCapability + + // Secrets required by plugins to complete the node stage volume + // request. This field is OPTIONAL. + Secrets structs.CSISecrets + + // Volume context as returned by SP in the CSI + // CreateVolumeResponse.Volume.volume_context which we don't implement but + // can be entered by hand in the volume spec. This field is OPTIONAL. + VolumeContext map[string]string +} + +func (r *NodeStageVolumeRequest) ToCSIRepresentation() *csipbv1.NodeStageVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.NodeStageVolumeRequest{ + VolumeId: r.ExternalID, + PublishContext: r.PublishContext, + StagingTargetPath: r.StagingTargetPath, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), + Secrets: r.Secrets, + VolumeContext: r.VolumeContext, + } +} + +func (r *NodeStageVolumeRequest) Validate() error { + if r.ExternalID == "" { + return errors.New("missing volume ID") + } + + if r.StagingTargetPath == "" { + return errors.New("missing StagingTargetPath") + } + + if r.VolumeCapability == nil { + return errors.New("missing VolumeCapabilities") + } + + return nil +} + +type PluginCapabilitySet struct { + hasControllerService bool + hasTopologies bool +} + +func (p *PluginCapabilitySet) HasControllerService() bool { + return p.hasControllerService +} + +// HasToplogies indicates whether the volumes for this plugin are equally +// accessible by all nodes in the cluster. +// If true, we MUST use the topology information when scheduling workloads. +func (p *PluginCapabilitySet) HasToplogies() bool { + return p.hasTopologies +} + +func (p *PluginCapabilitySet) IsEqual(o *PluginCapabilitySet) bool { + return p.hasControllerService == o.hasControllerService && p.hasTopologies == o.hasTopologies +} + +func NewTestPluginCapabilitySet(topologies, controller bool) *PluginCapabilitySet { + return &PluginCapabilitySet{ + hasTopologies: topologies, + hasControllerService: controller, + } +} + +func NewPluginCapabilitySet(capabilities *csipbv1.GetPluginCapabilitiesResponse) *PluginCapabilitySet { + cs := &PluginCapabilitySet{} + + pluginCapabilities := capabilities.GetCapabilities() + + for _, pcap := range pluginCapabilities { + if svcCap := pcap.GetService(); svcCap != nil { + switch svcCap.Type { + case csipbv1.PluginCapability_Service_UNKNOWN: + continue + case csipbv1.PluginCapability_Service_CONTROLLER_SERVICE: + cs.hasControllerService = true + case csipbv1.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + cs.hasTopologies = true + default: + continue + } + } + } + + return cs +} + +type ControllerCapabilitySet struct { + HasCreateDeleteVolume bool + HasPublishUnpublishVolume bool + HasListVolumes bool + HasGetCapacity bool + HasCreateDeleteSnapshot bool + HasListSnapshots bool + HasCloneVolume bool + HasPublishReadonly bool + HasExpandVolume bool + HasListVolumesPublishedNodes bool + HasVolumeCondition bool + HasGetVolume bool +} + +func NewControllerCapabilitySet(resp *csipbv1.ControllerGetCapabilitiesResponse) *ControllerCapabilitySet { + cs := &ControllerCapabilitySet{} + + pluginCapabilities := resp.GetCapabilities() + for _, pcap := range pluginCapabilities { + if c := pcap.GetRpc(); c != nil { + switch c.Type { + case csipbv1.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: + cs.HasCreateDeleteVolume = true + case csipbv1.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + cs.HasPublishUnpublishVolume = true + case csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES: + cs.HasListVolumes = true + case csipbv1.ControllerServiceCapability_RPC_GET_CAPACITY: + cs.HasGetCapacity = true + case csipbv1.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: + cs.HasCreateDeleteSnapshot = true + case csipbv1.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: + cs.HasListSnapshots = true + case csipbv1.ControllerServiceCapability_RPC_CLONE_VOLUME: + cs.HasCloneVolume = true + case csipbv1.ControllerServiceCapability_RPC_PUBLISH_READONLY: + cs.HasPublishReadonly = true + case csipbv1.ControllerServiceCapability_RPC_EXPAND_VOLUME: + cs.HasExpandVolume = true + case csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES: + cs.HasListVolumesPublishedNodes = true + case csipbv1.ControllerServiceCapability_RPC_VOLUME_CONDITION: + cs.HasVolumeCondition = true + case csipbv1.ControllerServiceCapability_RPC_GET_VOLUME: + cs.HasGetVolume = true + default: + continue + } + } + } + + return cs +} + +type ControllerValidateVolumeRequest struct { + ExternalID string + Secrets structs.CSISecrets + Capabilities []*VolumeCapability + Parameters map[string]string + Context map[string]string +} + +func (r *ControllerValidateVolumeRequest) ToCSIRepresentation() *csipbv1.ValidateVolumeCapabilitiesRequest { + if r == nil { + return nil + } + + caps := make([]*csipbv1.VolumeCapability, 0, len(r.Capabilities)) + for _, cap := range r.Capabilities { + caps = append(caps, cap.ToCSIRepresentation()) + } + + return &csipbv1.ValidateVolumeCapabilitiesRequest{ + VolumeId: r.ExternalID, + VolumeContext: r.Context, + VolumeCapabilities: caps, + Parameters: r.Parameters, + Secrets: r.Secrets, + } +} + +type ControllerPublishVolumeRequest struct { + ExternalID string + NodeID string + ReadOnly bool + VolumeCapability *VolumeCapability + Secrets structs.CSISecrets + VolumeContext map[string]string +} + +func (r *ControllerPublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerPublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.ControllerPublishVolumeRequest{ + VolumeId: r.ExternalID, + NodeId: r.NodeID, + Readonly: r.ReadOnly, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), + Secrets: r.Secrets, + VolumeContext: r.VolumeContext, + } +} + +func (r *ControllerPublishVolumeRequest) Validate() error { + if r.ExternalID == "" { + return errors.New("missing volume ID") + } + if r.NodeID == "" { + return errors.New("missing NodeID") + } + return nil +} + +type ControllerPublishVolumeResponse struct { + PublishContext map[string]string +} + +type ControllerUnpublishVolumeRequest struct { + ExternalID string + NodeID string + Secrets structs.CSISecrets +} + +func (r *ControllerUnpublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerUnpublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.ControllerUnpublishVolumeRequest{ + VolumeId: r.ExternalID, + NodeId: r.NodeID, + Secrets: r.Secrets, + } +} + +func (r *ControllerUnpublishVolumeRequest) Validate() error { + if r.ExternalID == "" { + return errors.New("missing ExternalID") + } + if r.NodeID == "" { + // the spec allows this but it would unpublish the + // volume from all nodes + return errors.New("missing NodeID") + } + return nil +} + +type ControllerUnpublishVolumeResponse struct{} + +type ControllerCreateVolumeRequest struct { + // note that Name is intentionally differentiated from both CSIVolume.ID + // and ExternalVolumeID. This name is only a recommendation for the + // storage provider, and many will discard this suggestion + Name string + CapacityRange *CapacityRange + VolumeCapabilities []*VolumeCapability + Parameters map[string]string + Secrets structs.CSISecrets + ContentSource *VolumeContentSource + AccessibilityRequirements *TopologyRequirement +} + +func (r *ControllerCreateVolumeRequest) ToCSIRepresentation() *csipbv1.CreateVolumeRequest { + if r == nil { + return nil + } + caps := make([]*csipbv1.VolumeCapability, 0, len(r.VolumeCapabilities)) + for _, cap := range r.VolumeCapabilities { + caps = append(caps, cap.ToCSIRepresentation()) + } + req := &csipbv1.CreateVolumeRequest{ + Name: r.Name, + CapacityRange: r.CapacityRange.ToCSIRepresentation(), + VolumeCapabilities: caps, + Parameters: r.Parameters, + Secrets: r.Secrets, + VolumeContentSource: r.ContentSource.ToCSIRepresentation(), + AccessibilityRequirements: r.AccessibilityRequirements.ToCSIRepresentation(), + } + + return req +} + +func (r *ControllerCreateVolumeRequest) Validate() error { + if r.Name == "" { + return errors.New("missing Name") + } + if r.VolumeCapabilities == nil { + return errors.New("missing VolumeCapabilities") + } + if r.CapacityRange != nil { + if r.CapacityRange.LimitBytes == 0 && r.CapacityRange.RequiredBytes == 0 { + return errors.New( + "one of LimitBytes or RequiredBytes must be set if CapacityRange is set") + } + if r.CapacityRange.LimitBytes > 0 && + r.CapacityRange.LimitBytes < r.CapacityRange.RequiredBytes { + return errors.New("LimitBytes cannot be less than RequiredBytes") + } + } + if r.ContentSource != nil { + if r.ContentSource.CloneID != "" && r.ContentSource.SnapshotID != "" { + return errors.New( + "one of SnapshotID or CloneID must be set if ContentSource is set") + } + } + return nil +} + +// VolumeContentSource is snapshot or volume that the plugin will use to +// create the new volume. At most one of these fields can be set, but nil (and +// not an empty struct) is expected by CSI plugins if neither field is set. +type VolumeContentSource struct { + SnapshotID string + CloneID string +} + +func (vcr *VolumeContentSource) ToCSIRepresentation() *csipbv1.VolumeContentSource { + if vcr == nil { + return nil + } + if vcr.CloneID != "" { + return &csipbv1.VolumeContentSource{ + Type: &csipbv1.VolumeContentSource_Volume{ + Volume: &csipbv1.VolumeContentSource_VolumeSource{ + VolumeId: vcr.CloneID, + }, + }, + } + } else if vcr.SnapshotID != "" { + return &csipbv1.VolumeContentSource{ + Type: &csipbv1.VolumeContentSource_Snapshot{ + Snapshot: &csipbv1.VolumeContentSource_SnapshotSource{ + SnapshotId: vcr.SnapshotID, + }, + }, + } + } + // Nomad's RPCs will hand us an empty struct, not nil + return nil +} + +func newVolumeContentSource(src *csipbv1.VolumeContentSource) *VolumeContentSource { + return &VolumeContentSource{ + SnapshotID: src.GetSnapshot().GetSnapshotId(), + CloneID: src.GetVolume().GetVolumeId(), + } +} + +type TopologyRequirement struct { + Requisite []*Topology + Preferred []*Topology +} + +func (tr *TopologyRequirement) ToCSIRepresentation() *csipbv1.TopologyRequirement { + if tr == nil { + return nil + } + result := &csipbv1.TopologyRequirement{ + Requisite: []*csipbv1.Topology{}, + Preferred: []*csipbv1.Topology{}, + } + for _, topo := range tr.Requisite { + result.Requisite = append(result.Requisite, + &csipbv1.Topology{Segments: topo.Segments}) + } + for _, topo := range tr.Preferred { + result.Preferred = append(result.Preferred, + &csipbv1.Topology{Segments: topo.Segments}) + } + return result +} + +func newTopologies(src []*csipbv1.Topology) []*Topology { + t := []*Topology{} + for _, topo := range src { + t = append(t, &Topology{Segments: topo.Segments}) + } + return t +} + +type ControllerCreateVolumeResponse struct { + Volume *Volume +} + +func NewCreateVolumeResponse(resp *csipbv1.CreateVolumeResponse) *ControllerCreateVolumeResponse { + vol := resp.GetVolume() + return &ControllerCreateVolumeResponse{Volume: &Volume{ + CapacityBytes: vol.GetCapacityBytes(), + ExternalVolumeID: vol.GetVolumeId(), + VolumeContext: vol.GetVolumeContext(), + ContentSource: newVolumeContentSource(vol.GetContentSource()), + AccessibleTopology: newTopologies(vol.GetAccessibleTopology()), + }} +} + +type Volume struct { + CapacityBytes int64 + + // this is differentiated from VolumeID so as not to create confusion + // between the Nomad CSIVolume.ID and the storage provider's ID. + ExternalVolumeID string + VolumeContext map[string]string + ContentSource *VolumeContentSource + AccessibleTopology []*Topology +} + +type ControllerDeleteVolumeRequest struct { + ExternalVolumeID string + Secrets structs.CSISecrets +} + +func (r *ControllerDeleteVolumeRequest) ToCSIRepresentation() *csipbv1.DeleteVolumeRequest { + if r == nil { + return nil + } + return &csipbv1.DeleteVolumeRequest{ + VolumeId: r.ExternalVolumeID, + Secrets: r.Secrets, + } +} + +func (r *ControllerDeleteVolumeRequest) Validate() error { + if r.ExternalVolumeID == "" { + return errors.New("missing ExternalVolumeID") + } + return nil +} + +type ControllerExpandVolumeRequest struct { + ExternalVolumeID string + RequiredBytes int64 + LimitBytes int64 + Capability *VolumeCapability + Secrets structs.CSISecrets +} + +func (r *ControllerExpandVolumeRequest) Validate() error { + if r.ExternalVolumeID == "" { + return errors.New("missing ExternalVolumeID") + } + if r.LimitBytes == 0 && r.RequiredBytes == 0 { + return errors.New("one of LimitBytes or RequiredBytes must be set") + } + // per the spec: "A value of 0 is equal to an unspecified field value." + // so in this case, only error if both are set. + if r.LimitBytes > 0 && (r.LimitBytes < r.RequiredBytes) { + return errors.New("LimitBytes cannot be less than RequiredBytes") + } + return nil +} + +func (r *ControllerExpandVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerExpandVolumeRequest { + if r == nil { + return nil + } + return &csipbv1.ControllerExpandVolumeRequest{ + VolumeId: r.ExternalVolumeID, + CapacityRange: &csipbv1.CapacityRange{ + RequiredBytes: r.RequiredBytes, + LimitBytes: r.LimitBytes, + }, + Secrets: r.Secrets, + VolumeCapability: r.Capability.ToCSIRepresentation(), + } +} + +type ControllerExpandVolumeResponse struct { + CapacityBytes int64 + NodeExpansionRequired bool +} + +type ControllerListVolumesRequest struct { + MaxEntries int32 + StartingToken string +} + +func (r *ControllerListVolumesRequest) ToCSIRepresentation() *csipbv1.ListVolumesRequest { + if r == nil { + return nil + } + return &csipbv1.ListVolumesRequest{ + MaxEntries: r.MaxEntries, + StartingToken: r.StartingToken, + } +} + +func (r *ControllerListVolumesRequest) Validate() error { + if r.MaxEntries < 0 { + return errors.New("MaxEntries cannot be negative") + } + return nil +} + +type ControllerListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry + NextToken string +} + +func NewListVolumesResponse(resp *csipbv1.ListVolumesResponse) *ControllerListVolumesResponse { + if resp == nil { + return &ControllerListVolumesResponse{} + } + entries := []*ListVolumesResponse_Entry{} + if resp.Entries != nil { + for _, entry := range resp.Entries { + vol := entry.GetVolume() + status := entry.GetStatus() + entries = append(entries, &ListVolumesResponse_Entry{ + Volume: &Volume{ + CapacityBytes: vol.CapacityBytes, + ExternalVolumeID: vol.VolumeId, + VolumeContext: vol.VolumeContext, + ContentSource: newVolumeContentSource(vol.ContentSource), + AccessibleTopology: newTopologies(vol.AccessibleTopology), + }, + Status: &ListVolumesResponse_VolumeStatus{ + PublishedNodeIds: status.GetPublishedNodeIds(), + VolumeCondition: &VolumeCondition{ + Abnormal: status.GetVolumeCondition().GetAbnormal(), + Message: status.GetVolumeCondition().GetMessage(), + }, + }, + }) + } + } + return &ControllerListVolumesResponse{ + Entries: entries, + NextToken: resp.NextToken, + } +} + +type ListVolumesResponse_Entry struct { + Volume *Volume + Status *ListVolumesResponse_VolumeStatus +} + +type ListVolumesResponse_VolumeStatus struct { + PublishedNodeIds []string + VolumeCondition *VolumeCondition +} + +type VolumeCondition struct { + Abnormal bool + Message string +} + +type ControllerCreateSnapshotRequest struct { + VolumeID string + Name string + Secrets structs.CSISecrets + Parameters map[string]string +} + +func (r *ControllerCreateSnapshotRequest) ToCSIRepresentation() *csipbv1.CreateSnapshotRequest { + return &csipbv1.CreateSnapshotRequest{ + SourceVolumeId: r.VolumeID, + Name: r.Name, + Secrets: r.Secrets, + Parameters: r.Parameters, + } +} + +func (r *ControllerCreateSnapshotRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + if r.Name == "" { + return errors.New("missing Name") + } + return nil +} + +type ControllerCreateSnapshotResponse struct { + Snapshot *Snapshot +} + +type Snapshot struct { + ID string + SourceVolumeID string + SizeBytes int64 + CreateTime int64 + IsReady bool +} + +type ControllerDeleteSnapshotRequest struct { + SnapshotID string + Secrets structs.CSISecrets +} + +func (r *ControllerDeleteSnapshotRequest) ToCSIRepresentation() *csipbv1.DeleteSnapshotRequest { + return &csipbv1.DeleteSnapshotRequest{ + SnapshotId: r.SnapshotID, + Secrets: r.Secrets, + } +} + +func (r *ControllerDeleteSnapshotRequest) Validate() error { + if r.SnapshotID == "" { + return errors.New("missing SnapshotID") + } + return nil +} + +type ControllerListSnapshotsRequest struct { + MaxEntries int32 + StartingToken string + Secrets structs.CSISecrets +} + +func (r *ControllerListSnapshotsRequest) ToCSIRepresentation() *csipbv1.ListSnapshotsRequest { + return &csipbv1.ListSnapshotsRequest{ + MaxEntries: r.MaxEntries, + StartingToken: r.StartingToken, + Secrets: r.Secrets, + } +} + +func (r *ControllerListSnapshotsRequest) Validate() error { + if r.MaxEntries < 0 { + return errors.New("MaxEntries cannot be negative") + } + return nil +} + +func NewListSnapshotsResponse(resp *csipbv1.ListSnapshotsResponse) *ControllerListSnapshotsResponse { + if resp == nil { + return &ControllerListSnapshotsResponse{} + } + entries := []*ListSnapshotsResponse_Entry{} + if resp.Entries != nil { + for _, entry := range resp.Entries { + snap := entry.GetSnapshot() + entries = append(entries, &ListSnapshotsResponse_Entry{ + Snapshot: &Snapshot{ + SizeBytes: snap.GetSizeBytes(), + ID: snap.GetSnapshotId(), + SourceVolumeID: snap.GetSourceVolumeId(), + CreateTime: snap.GetCreationTime().GetSeconds(), + IsReady: snap.GetReadyToUse(), + }, + }) + } + } + return &ControllerListSnapshotsResponse{ + Entries: entries, + NextToken: resp.NextToken, + } +} + +type ControllerListSnapshotsResponse struct { + Entries []*ListSnapshotsResponse_Entry + NextToken string +} + +type ListSnapshotsResponse_Entry struct { + Snapshot *Snapshot +} + +type NodeCapabilitySet struct { + HasStageUnstageVolume bool + HasGetVolumeStats bool + HasExpandVolume bool + HasVolumeCondition bool +} + +func NewNodeCapabilitySet(resp *csipbv1.NodeGetCapabilitiesResponse) *NodeCapabilitySet { + cs := &NodeCapabilitySet{} + pluginCapabilities := resp.GetCapabilities() + for _, pcap := range pluginCapabilities { + if c := pcap.GetRpc(); c != nil { + switch c.Type { + case csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + cs.HasStageUnstageVolume = true + case csipbv1.NodeServiceCapability_RPC_GET_VOLUME_STATS: + cs.HasGetVolumeStats = true + case csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME: + cs.HasExpandVolume = true + case csipbv1.NodeServiceCapability_RPC_VOLUME_CONDITION: + cs.HasVolumeCondition = true + default: + continue + } + } + } + + return cs +} + +// VolumeAccessMode represents the desired access mode of the CSI Volume +type VolumeAccessMode csipbv1.VolumeCapability_AccessMode_Mode + +var _ fmt.Stringer = VolumeAccessModeUnknown + +var ( + VolumeAccessModeUnknown = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_UNKNOWN) + VolumeAccessModeSingleNodeWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER) + VolumeAccessModeSingleNodeReaderOnly = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY) + VolumeAccessModeMultiNodeReaderOnly = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY) + VolumeAccessModeMultiNodeSingleWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER) + VolumeAccessModeMultiNodeMultiWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER) +) + +func (a VolumeAccessMode) String() string { + return a.ToCSIRepresentation().String() +} + +func (a VolumeAccessMode) ToCSIRepresentation() csipbv1.VolumeCapability_AccessMode_Mode { + return csipbv1.VolumeCapability_AccessMode_Mode(a) +} + +// VolumeAccessType represents the filesystem apis that the user intends to use +// with the volume. E.g whether it will be used as a block device or if they wish +// to have a mounted filesystem. +type VolumeAccessType int32 + +var _ fmt.Stringer = VolumeAccessTypeBlock + +var ( + VolumeAccessTypeBlock VolumeAccessType = 1 + VolumeAccessTypeMount VolumeAccessType = 2 +) + +func (v VolumeAccessType) String() string { + if v == VolumeAccessTypeBlock { + return "VolumeAccessType.Block" + } else if v == VolumeAccessTypeMount { + return "VolumeAccessType.Mount" + } else { + return "VolumeAccessType.Unspecified" + } +} + +// VolumeCapability describes the overall usage requirements for a given CSI Volume +type VolumeCapability struct { + AccessType VolumeAccessType + AccessMode VolumeAccessMode + + // Indicate that the volume will be accessed via the filesystem API. + MountVolume *structs.CSIMountOptions +} + +func VolumeCapabilityFromStructs(sAccessType structs.VolumeAttachmentMode, sAccessMode structs.VolumeAccessMode, sMountOptions *structs.CSIMountOptions) (*VolumeCapability, error) { + var accessType VolumeAccessType + switch sAccessType { + case structs.CSIVolumeAttachmentModeBlockDevice: + accessType = VolumeAccessTypeBlock + case structs.CSIVolumeAttachmentModeFilesystem: + accessType = VolumeAccessTypeMount + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return nil, fmt.Errorf("unknown volume attachment mode: %s", sAccessType) + } + + var accessMode VolumeAccessMode + switch sAccessMode { + case structs.CSIVolumeAccessModeSingleNodeReader: + accessMode = VolumeAccessModeSingleNodeReaderOnly + case structs.CSIVolumeAccessModeSingleNodeWriter: + accessMode = VolumeAccessModeSingleNodeWriter + case structs.CSIVolumeAccessModeMultiNodeMultiWriter: + accessMode = VolumeAccessModeMultiNodeMultiWriter + case structs.CSIVolumeAccessModeMultiNodeSingleWriter: + accessMode = VolumeAccessModeMultiNodeSingleWriter + case structs.CSIVolumeAccessModeMultiNodeReader: + accessMode = VolumeAccessModeMultiNodeReaderOnly + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return nil, fmt.Errorf("unknown volume access mode: %v", sAccessMode) + } + + return &VolumeCapability{ + AccessType: accessType, + AccessMode: accessMode, + MountVolume: sMountOptions, + }, nil +} + +func (c *VolumeCapability) ToCSIRepresentation() *csipbv1.VolumeCapability { + if c == nil { + return nil + } + + vc := &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: c.AccessMode.ToCSIRepresentation(), + }, + } + + if c.AccessType == VolumeAccessTypeMount { + opts := &csipbv1.VolumeCapability_MountVolume{} + if c.MountVolume != nil { + opts.FsType = c.MountVolume.FSType + opts.MountFlags = c.MountVolume.MountFlags + } + vc.AccessType = &csipbv1.VolumeCapability_Mount{Mount: opts} + } else { + vc.AccessType = &csipbv1.VolumeCapability_Block{Block: &csipbv1.VolumeCapability_BlockVolume{}} + } + + return vc +} + +type CapacityRange struct { + RequiredBytes int64 + LimitBytes int64 +} + +func (c *CapacityRange) Validate() error { + if c == nil { + return nil + } + if c.RequiredBytes == 0 && c.LimitBytes == 0 { + return errors.New("either RequiredBytes or LimitBytes must be set") + } + if c.LimitBytes > 0 && c.LimitBytes < c.RequiredBytes { + return errors.New("LimitBytes cannot be less than RequiredBytes") + } + return nil +} + +func (c *CapacityRange) ToCSIRepresentation() *csipbv1.CapacityRange { + if c == nil { + return nil + } + return &csipbv1.CapacityRange{ + RequiredBytes: c.RequiredBytes, + LimitBytes: c.LimitBytes, + } +} + +type NodeExpandVolumeRequest struct { + ExternalVolumeID string + CapacityRange *CapacityRange + Capability *VolumeCapability + TargetPath string + StagingPath string +} + +func (r *NodeExpandVolumeRequest) Validate() error { + var err error + if r.ExternalVolumeID == "" { + err = errors.Join(err, errors.New("ExternalVolumeID is required")) + } + if r.TargetPath == "" { + err = errors.Join(err, errors.New("TargetPath is required")) + } + if e := r.CapacityRange.Validate(); e != nil { + err = errors.Join(err, e) + } + return err +} + +func (r *NodeExpandVolumeRequest) ToCSIRepresentation() *csipbv1.NodeExpandVolumeRequest { + if r == nil { + return nil + } + return &csipbv1.NodeExpandVolumeRequest{ + VolumeId: r.ExternalVolumeID, + VolumePath: r.TargetPath, + StagingTargetPath: r.StagingPath, + CapacityRange: r.CapacityRange.ToCSIRepresentation(), + VolumeCapability: r.Capability.ToCSIRepresentation(), + } +} + +type NodeExpandVolumeResponse struct { + CapacityBytes int64 +} diff --git a/plugin_interface/csi/structs/csi.go b/plugin_interface/csi/structs/csi.go new file mode 100644 index 00000000000..a697ba8c532 --- /dev/null +++ b/plugin_interface/csi/structs/csi.go @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package structs + +import ( + "fmt" + "slices" + + "github.com/hashicorp/nomad/plugin-interface/helper" +) + +// CSISocketName is the filename that Nomad expects plugins to create inside the +// PluginMountDir. +const CSISocketName = "csi.sock" + +// CSIIntermediaryDirname is the name of the directory inside the PluginMountDir +// where Nomad will expect plugins to create intermediary mounts for volumes. +const CSIIntermediaryDirname = "volumes" + +// VolumeTypeCSI is the type in the volume block of a TaskGroup +const VolumeTypeCSI = "csi" + +// CSIPluginType is an enum string that encapsulates the valid options for a +// CSIPlugin block's Type. These modes will allow the plugin to be used in +// different ways by the client. +type CSIPluginType string + +const ( + // CSIPluginTypeNode indicates that Nomad should only use the plugin for + // performing Node RPCs against the provided plugin. + CSIPluginTypeNode CSIPluginType = "node" + + // CSIPluginTypeController indicates that Nomad should only use the plugin for + // performing Controller RPCs against the provided plugin. + CSIPluginTypeController CSIPluginType = "controller" + + // CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for + // both controller and node rpcs. + CSIPluginTypeMonolith CSIPluginType = "monolith" +) + +// CSIPluginTypeIsValid validates the given CSIPluginType string and returns +// true only when a correct plugin type is specified. +func CSIPluginTypeIsValid(pt CSIPluginType) bool { + switch pt { + case CSIPluginTypeNode, CSIPluginTypeController, CSIPluginTypeMonolith: + return true + default: + return false + } +} + +// CSIVolumeCapability is the requested attachment and access mode for a +// volume +type CSIVolumeCapability struct { + AttachmentMode VolumeAttachmentMode + AccessMode VolumeAccessMode +} + +const ( + CSIVolumeAttachmentModeUnknown VolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice VolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem VolumeAttachmentMode = "file-system" +) + +const ( + CSIVolumeAccessModeUnknown VolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader VolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter VolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader VolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter VolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter VolumeAccessMode = "multi-node-multi-writer" +) + +// CSIMountOptions contain optional additional configuration that can be used +// when specifying that a Volume should be used with VolumeAccessTypeMount. +type CSIMountOptions struct { + // FSType is an optional field that allows an operator to specify the type + // of the filesystem. + FSType string + + // MountFlags contains additional options that may be used when mounting the + // volume by the plugin. This may contain sensitive data and should not be + // leaked. + MountFlags []string +} + +func (o *CSIMountOptions) Copy() *CSIMountOptions { + if o == nil { + return nil + } + + no := *o + no.MountFlags = slices.Clone(o.MountFlags) + return &no +} + +func (o *CSIMountOptions) Merge(p *CSIMountOptions) { + if p == nil { + return + } + if p.FSType != "" { + o.FSType = p.FSType + } + if p.MountFlags != nil { + o.MountFlags = p.MountFlags + } +} + +func (o *CSIMountOptions) Equal(p *CSIMountOptions) bool { + if o == nil && p == nil { + return true + } + if o == nil || p == nil { + return false + } + if o.FSType != p.FSType { + return false + } + return helper.SliceSetEq(o.MountFlags, p.MountFlags) +} + +// CSIMountOptions implements the Stringer and GoStringer interfaces to prevent +// accidental leakage of sensitive mount flags via logs. +var _ fmt.Stringer = &CSIMountOptions{} +var _ fmt.GoStringer = &CSIMountOptions{} + +func (o *CSIMountOptions) String() string { + mountFlagsString := "nil" + if len(o.MountFlags) != 0 { + mountFlagsString = "[REDACTED]" + } + + return fmt.Sprintf("csi.CSIOptions(FSType: %s, MountFlags: %s)", o.FSType, mountFlagsString) +} + +func (o *CSIMountOptions) GoString() string { + return o.String() +} + +// Sanitize returns a copy of the CSIMountOptions with sensitive data redacted +func (o *CSIMountOptions) Sanitize() *CSIMountOptions { + redacted := *o + if len(o.MountFlags) != 0 { + redacted.MountFlags = []string{"[REDACTED]"} + } + return &redacted +} + +// CSISecrets contain optional additional configuration that can be used +// when specifying that a Volume should be used with VolumeAccessTypeMount. +type CSISecrets map[string]string + +// CSISecrets implements the Stringer and GoStringer interfaces to prevent +// accidental leakage of secrets via logs. +var _ fmt.Stringer = &CSISecrets{} +var _ fmt.GoStringer = &CSISecrets{} + +func (s *CSISecrets) String() string { + redacted := map[string]string{} + for k := range *s { + redacted[k] = "[REDACTED]" + } + return fmt.Sprintf("csi.CSISecrets(%v)", redacted) +} + +func (s *CSISecrets) GoString() string { + return s.String() +} + +// Sanitize returns a copy of the CSISecrets with sensitive data redacted +func (s *CSISecrets) Sanitize() *CSISecrets { + redacted := CSISecrets{} + for k := range *s { + redacted[k] = "[REDACTED]" + } + return &redacted +} diff --git a/plugin_interface/csi/structs/volumes.go b/plugin_interface/csi/structs/volumes.go new file mode 100644 index 00000000000..cd9337af919 --- /dev/null +++ b/plugin_interface/csi/structs/volumes.go @@ -0,0 +1,207 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: BUSL-1.1 + +package structs + +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" +) + +const ( + VolumeTypeHost = "host" + + VolumeMountPropagationPrivate = "private" + VolumeMountPropagationHostToTask = "host-to-task" + VolumeMountPropagationBidirectional = "bidirectional" + + SELinuxSharedVolume = "z" + SELinuxPrivateVolume = "Z" +) + +var ( + errVolMountInvalidPropagationMode = fmt.Errorf("volume mount has an invalid propagation mode") + errVolMountInvalidSELinuxLabel = fmt.Errorf("volume mount has an invalid SELinux label") + errVolMountEmptyVol = fmt.Errorf("volume mount references an empty volume") +) + +// ClientHostVolumeConfig is used to configure access to host paths on a Nomad Client +type ClientHostVolumeConfig struct { + Name string `hcl:",key"` + Path string `hcl:"path"` + ReadOnly bool `hcl:"read_only"` + // ID is set for dynamic host volumes only. + ID string `hcl:"-"` +} + +func (p *ClientHostVolumeConfig) Equal(o *ClientHostVolumeConfig) bool { + if p == nil && o == nil { + return true + } + if p == nil || o == nil { + return false + } + return *p == *o +} + +func (p *ClientHostVolumeConfig) Copy() *ClientHostVolumeConfig { + if p == nil { + return nil + } + + c := new(ClientHostVolumeConfig) + *c = *p + return c +} + +func CopyMapStringClientHostVolumeConfig(m map[string]*ClientHostVolumeConfig) map[string]*ClientHostVolumeConfig { + if m == nil { + return nil + } + + nm := make(map[string]*ClientHostVolumeConfig, len(m)) + for k, v := range m { + nm[k] = v.Copy() + } + + return nm +} + +func CopySliceClientHostVolumeConfig(s []*ClientHostVolumeConfig) []*ClientHostVolumeConfig { + l := len(s) + if l == 0 { + return nil + } + + ns := make([]*ClientHostVolumeConfig, l) + for idx, cfg := range s { + ns[idx] = cfg.Copy() + } + + return ns +} + +func HostVolumeSliceMerge(a, b []*ClientHostVolumeConfig) []*ClientHostVolumeConfig { + n := make([]*ClientHostVolumeConfig, len(a)) + seenKeys := make(map[string]int, len(a)) + + for i, config := range a { + n[i] = config.Copy() + seenKeys[config.Name] = i + } + + for _, config := range b { + if fIndex, ok := seenKeys[config.Name]; ok { + n[fIndex] = config.Copy() + continue + } + + n = append(n, config.Copy()) + } + + return n +} + +// VolumeAttachmentMode chooses the type of storage api that will be used to +// interact with the device. +type VolumeAttachmentMode string + +// VolumeAccessMode indicates how a volume should be used in a storage topology +// e.g whether the provider should make the volume available concurrently. +type VolumeAccessMode string + +// VolumeMount represents the relationship between a destination path in a task +// and the task group volume that should be mounted there. +type VolumeMount struct { + Volume string + Destination string + ReadOnly bool + PropagationMode string + SELinuxLabel string +} + +// Hash is a very basic string based implementation of a hasher. +func (v *VolumeMount) Hash() string { + return fmt.Sprintf("%#+v", v) +} + +func (v *VolumeMount) Equal(o *VolumeMount) bool { + if v == nil || o == nil { + return v == o + } + switch { + case v.Volume != o.Volume: + return false + case v.Destination != o.Destination: + return false + case v.ReadOnly != o.ReadOnly: + return false + case v.PropagationMode != o.PropagationMode: + return false + case v.SELinuxLabel != o.SELinuxLabel: + return false + } + + return true +} + +func (v *VolumeMount) Copy() *VolumeMount { + if v == nil { + return nil + } + + nv := new(VolumeMount) + *nv = *v + return nv +} + +func (v *VolumeMount) Validate() error { + var mErr *multierror.Error + + // Validate the task does not reference undefined volume mounts + if v.Volume == "" { + mErr = multierror.Append(mErr, errVolMountEmptyVol) + } + + if !v.MountPropagationModeIsValid() { + mErr = multierror.Append(mErr, fmt.Errorf("%w: %q", errVolMountInvalidPropagationMode, v.PropagationMode)) + } + + if !v.SELinuxLabelIsValid() { + mErr = multierror.Append(mErr, fmt.Errorf("%w: \"%s\"", errVolMountInvalidSELinuxLabel, v.SELinuxLabel)) + } + + return mErr.ErrorOrNil() +} + +func (v *VolumeMount) MountPropagationModeIsValid() bool { + switch v.PropagationMode { + case "", VolumeMountPropagationPrivate, VolumeMountPropagationHostToTask, VolumeMountPropagationBidirectional: + return true + default: + return false + } +} + +func (v *VolumeMount) SELinuxLabelIsValid() bool { + switch v.SELinuxLabel { + case "", SELinuxSharedVolume, SELinuxPrivateVolume: + return true + default: + return false + } +} + +func CopySliceVolumeMount(s []*VolumeMount) []*VolumeMount { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*VolumeMount, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} diff --git a/plugin_interface/csi/testing/client.go b/plugin_interface/csi/testing/client.go new file mode 100644 index 00000000000..fb1f8c32154 --- /dev/null +++ b/plugin_interface/csi/testing/client.go @@ -0,0 +1,199 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package testing + +import ( + "context" + "fmt" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" +) + +// IdentityClient is a CSI identity client used for testing +type IdentityClient struct { + NextErr error + NextPluginInfo *csipbv1.GetPluginInfoResponse + NextPluginCapabilities *csipbv1.GetPluginCapabilitiesResponse + NextPluginProbe *csipbv1.ProbeResponse +} + +// NewIdentityClient returns a new IdentityClient +func NewIdentityClient() *IdentityClient { + return &IdentityClient{} +} + +func (f *IdentityClient) Reset() { + f.NextErr = nil + f.NextPluginInfo = nil + f.NextPluginCapabilities = nil + f.NextPluginProbe = nil +} + +// GetPluginInfo returns plugin info +func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipbv1.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipbv1.GetPluginInfoResponse, error) { + return f.NextPluginInfo, f.NextErr +} + +// GetPluginCapabilities implements csi method +func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipbv1.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.GetPluginCapabilitiesResponse, error) { + return f.NextPluginCapabilities, f.NextErr +} + +// Probe implements csi method +func (f *IdentityClient) Probe(ctx context.Context, in *csipbv1.ProbeRequest, opts ...grpc.CallOption) (*csipbv1.ProbeResponse, error) { + return f.NextPluginProbe, f.NextErr +} + +// ControllerClient is a CSI controller client used for testing +type ControllerClient struct { + NextErr error + NextCapabilitiesResponse *csipbv1.ControllerGetCapabilitiesResponse + NextPublishVolumeResponse *csipbv1.ControllerPublishVolumeResponse + NextUnpublishVolumeResponse *csipbv1.ControllerUnpublishVolumeResponse + NextValidateVolumeCapabilitiesResponse *csipbv1.ValidateVolumeCapabilitiesResponse + NextCreateVolumeResponse *csipbv1.CreateVolumeResponse + NextExpandVolumeResponse *csipbv1.ControllerExpandVolumeResponse + LastExpandVolumeRequest *csipbv1.ControllerExpandVolumeRequest + NextDeleteVolumeResponse *csipbv1.DeleteVolumeResponse + NextListVolumesResponse *csipbv1.ListVolumesResponse + NextCreateSnapshotResponse *csipbv1.CreateSnapshotResponse + NextDeleteSnapshotResponse *csipbv1.DeleteSnapshotResponse + NextListSnapshotsResponse *csipbv1.ListSnapshotsResponse +} + +// NewControllerClient returns a new ControllerClient +func NewControllerClient() *ControllerClient { + return &ControllerClient{} +} + +func (c *ControllerClient) Reset() { + c.NextErr = nil + c.NextCapabilitiesResponse = nil + c.NextPublishVolumeResponse = nil + c.NextUnpublishVolumeResponse = nil + c.NextValidateVolumeCapabilitiesResponse = nil + c.NextCreateVolumeResponse = nil + c.NextExpandVolumeResponse = nil + c.LastExpandVolumeRequest = nil + c.NextDeleteVolumeResponse = nil + c.NextListVolumesResponse = nil + c.NextCreateSnapshotResponse = nil + c.NextDeleteSnapshotResponse = nil + c.NextListSnapshotsResponse = nil +} + +func (c *ControllerClient) ControllerGetCapabilities(ctx context.Context, in *csipbv1.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ControllerGetCapabilitiesResponse, error) { + return c.NextCapabilitiesResponse, c.NextErr +} + +func (c *ControllerClient) ControllerPublishVolume(ctx context.Context, in *csipbv1.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerPublishVolumeResponse, error) { + return c.NextPublishVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *csipbv1.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerUnpublishVolumeResponse, error) { + return c.NextUnpublishVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) { + return c.NextValidateVolumeCapabilitiesResponse, c.NextErr +} + +func (c *ControllerClient) CreateVolume(ctx context.Context, in *csipbv1.CreateVolumeRequest, opts ...grpc.CallOption) (*csipbv1.CreateVolumeResponse, error) { + if in.VolumeContentSource != nil { + if in.VolumeContentSource.Type == nil || (in.VolumeContentSource.Type == + &csipbv1.VolumeContentSource_Volume{ + Volume: &csipbv1.VolumeContentSource_VolumeSource{VolumeId: ""}, + }) || (in.VolumeContentSource.Type == + &csipbv1.VolumeContentSource_Snapshot{ + Snapshot: &csipbv1.VolumeContentSource_SnapshotSource{SnapshotId: ""}, + }) { + return nil, fmt.Errorf("empty content source should be nil") + } + } + return c.NextCreateVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ControllerExpandVolume(ctx context.Context, in *csipbv1.ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerExpandVolumeResponse, error) { + c.LastExpandVolumeRequest = in + return c.NextExpandVolumeResponse, c.NextErr +} + +func (c *ControllerClient) DeleteVolume(ctx context.Context, in *csipbv1.DeleteVolumeRequest, opts ...grpc.CallOption) (*csipbv1.DeleteVolumeResponse, error) { + return c.NextDeleteVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ListVolumes(ctx context.Context, in *csipbv1.ListVolumesRequest, opts ...grpc.CallOption) (*csipbv1.ListVolumesResponse, error) { + return c.NextListVolumesResponse, c.NextErr +} + +func (c *ControllerClient) CreateSnapshot(ctx context.Context, in *csipbv1.CreateSnapshotRequest, opts ...grpc.CallOption) (*csipbv1.CreateSnapshotResponse, error) { + return c.NextCreateSnapshotResponse, c.NextErr +} + +func (c *ControllerClient) DeleteSnapshot(ctx context.Context, in *csipbv1.DeleteSnapshotRequest, opts ...grpc.CallOption) (*csipbv1.DeleteSnapshotResponse, error) { + return c.NextDeleteSnapshotResponse, c.NextErr +} + +func (c *ControllerClient) ListSnapshots(ctx context.Context, in *csipbv1.ListSnapshotsRequest, opts ...grpc.CallOption) (*csipbv1.ListSnapshotsResponse, error) { + return c.NextListSnapshotsResponse, c.NextErr +} + +// NodeClient is a CSI Node client used for testing +type NodeClient struct { + NextErr error + NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse + NextGetInfoResponse *csipbv1.NodeGetInfoResponse + NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse + NextUnstageVolumeResponse *csipbv1.NodeUnstageVolumeResponse + NextPublishVolumeResponse *csipbv1.NodePublishVolumeResponse + NextUnpublishVolumeResponse *csipbv1.NodeUnpublishVolumeResponse + NextExpandVolumeResponse *csipbv1.NodeExpandVolumeResponse + LastExpandVolumeRequest *csipbv1.NodeExpandVolumeRequest +} + +// NewNodeClient returns a new stub NodeClient +func NewNodeClient() *NodeClient { + return &NodeClient{} +} + +func (c *NodeClient) Reset() { + c.NextErr = nil + c.NextCapabilitiesResponse = nil + c.NextGetInfoResponse = nil + c.NextStageVolumeResponse = nil + c.NextUnstageVolumeResponse = nil + c.NextPublishVolumeResponse = nil + c.NextUnpublishVolumeResponse = nil + c.NextExpandVolumeResponse = nil +} + +func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { + return c.NextCapabilitiesResponse, c.NextErr +} + +func (c *NodeClient) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) { + return c.NextGetInfoResponse, c.NextErr +} + +func (c *NodeClient) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) { + return c.NextStageVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) { + return c.NextUnstageVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) { + return c.NextPublishVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodeUnpublishVolume(ctx context.Context, in *csipbv1.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnpublishVolumeResponse, error) { + return c.NextUnpublishVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodeExpandVolume(ctx context.Context, in *csipbv1.NodeExpandVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeExpandVolumeResponse, error) { + c.LastExpandVolumeRequest = in + return c.NextExpandVolumeResponse, c.NextErr +} diff --git a/plugin_interface/device/client.go b/plugin_interface/device/client.go new file mode 100644 index 00000000000..cf4d6e3437e --- /dev/null +++ b/plugin_interface/device/client.go @@ -0,0 +1,153 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "context" + "io" + "time" + + "github.com/LK4D4/joincontext" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/device/proto" + "github.com/hashicorp/nomad/plugin-interface/helper" +) + +// devicePluginClient implements the client side of a remote device plugin, using +// gRPC to communicate to the remote plugin. +type devicePluginClient struct { + // basePluginClient is embedded to give access to the base plugin methods. + *base.BasePluginClient + + client proto.DevicePluginClient + + // doneCtx is closed when the plugin exits + doneCtx context.Context +} + +// Fingerprint is used to retrieve the set of devices and their health from the +// device plugin. An error may be immediately returned if the fingerprint call +// could not be made or as part of the streaming response. If the context is +// cancelled, the error will be propagated. +func (d *devicePluginClient) Fingerprint(ctx context.Context) (<-chan *FingerprintResponse, error) { + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + var req proto.FingerprintRequest + stream, err := d.client.Fingerprint(joinedCtx, &req) + if err != nil { + return nil, helper.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + out := make(chan *FingerprintResponse, 1) + go d.handleFingerprint(ctx, stream, out) + return out, nil +} + +// handleFingerprint should be launched in a goroutine and handles converting +// the gRPC stream to a channel. Exits either when context is cancelled or the +// stream has an error. +func (d *devicePluginClient) handleFingerprint( + reqCtx context.Context, + stream proto.DevicePlugin_FingerprintClient, + out chan *FingerprintResponse) { + + defer close(out) + for { + resp, err := stream.Recv() + if err != nil { + if err != io.EOF { + out <- &FingerprintResponse{ + Error: helper.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + // Send the response + f := &FingerprintResponse{ + Devices: convertProtoDeviceGroups(resp.GetDeviceGroup()), + } + select { + case <-reqCtx.Done(): + return + case out <- f: + } + } +} + +func (d *devicePluginClient) Reserve(deviceIDs []string) (*ContainerReservation, error) { + // Build the request + req := &proto.ReserveRequest{ + DeviceIds: deviceIDs, + } + + // Make the request + resp, err := d.client.Reserve(d.doneCtx, req) + if err != nil { + return nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + // Convert the response + out := convertProtoContainerReservation(resp.GetContainerRes()) + return out, nil +} + +// Stats is used to retrieve device statistics from the device plugin. An error +// may be immediately returned if the stats call could not be made or as part of +// the streaming response. If the context is cancelled, the error will be +// propagated. +func (d *devicePluginClient) Stats(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + req := proto.StatsRequest{ + CollectionInterval: ptypes.DurationProto(interval), + } + stream, err := d.client.Stats(joinedCtx, &req) + if err != nil { + return nil, helper.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + out := make(chan *StatsResponse, 1) + go d.handleStats(ctx, stream, out) + return out, nil +} + +// handleStats should be launched in a goroutine and handles converting +// the gRPC stream to a channel. Exits either when context is cancelled or the +// stream has an error. +func (d *devicePluginClient) handleStats( + reqCtx context.Context, + stream proto.DevicePlugin_StatsClient, + out chan *StatsResponse) { + + defer close(out) + for { + resp, err := stream.Recv() + if err != nil { + if err != io.EOF { + out <- &StatsResponse{ + Error: helper.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + // Send the response + s := &StatsResponse{ + Groups: convertProtoDeviceGroupsStats(resp.GetGroups()), + } + select { + case <-reqCtx.Done(): + return + case out <- s: + } + } +} diff --git a/plugin_interface/device/cmd/example/README.md b/plugin_interface/device/cmd/example/README.md new file mode 100644 index 00000000000..3e5d6d1a6cf --- /dev/null +++ b/plugin_interface/device/cmd/example/README.md @@ -0,0 +1,121 @@ +This package provides an example implementation of a device plugin for +reference. + +# Behavior + +The example device plugin models files within a specified directory as devices. The plugin will periodically scan the directory for changes and will expose them via the streaming Fingerprint RPC. Device health is set to unhealthy if the file has a specific filemode permission as described by the config `unhealthy_perm`. Further statistics are also collected on the detected devices. + +# Installation + +```shell +nomad_plugin_dir='/opt/nomad/plugins' # for example +go build -o $nomad_plugin_dir/nomad-device-example ./cmd +``` + +# Config + +Example client agent config with our +[plugin](https://developer.hashicorp.com/nomad/docs/configuration/plugin) block: + +```hcl +client { + enabled = true +} + +plugin_dir = "/opt/nomad/plugins" + +plugin "nomad-device-example" { + config { + dir = "/tmp/nomad-device" + list_period = "1s" + unhealthy_perm = "-rwxrwxrwx" + } +} +``` + +The valid configuration options are: + +* `dir` (`string`: `"."`): The directory to scan for files that will represent fake devices. +* `list_period` (`string`: `"5s"`): The interval to scan the directory for changes. +* `unhealthy_perm` (`string`: `"-rwxrwxrwx"`): The file mode permission that if set on a detected file will casue the device to be considered unhealthy. + +# Usage + +Create two instances of the device, one unhealthy: + +```shell +mkdir -p /tmp/nomad-device +cd /tmp/nomad-device +touch device01 && chmod 0777 device01 +touch device02 +``` + +It should be fingerprinted by the client agent after the `list_period`, +which you can check with: + +```shell +nomad node status -json -self | jq '.NodeResources.Devices' +``` + +```json +[ + { + "Attributes": null, + "Instances": [ + { + "HealthDescription": "Device has bad permissions \"-rwxrwxrwx\"", + "Healthy": false, + "ID": "device01", + "Locality": null + }, + { + "HealthDescription": "", + "Healthy": true, + "ID": "device02", + "Locality": null + } + ], + "Name": "mock", + "Type": "file", + "Vendor": "nomad" + } +] + +``` + +The value to put in job specification +[device](https://developer.hashicorp.com/nomad/docs/job-specification/device) +block, or a quota specification, +is `"{Vendor}/{Type}/{Name}"` i.e. `"nomad/file/mock"`: + +`job.nomad.hcl`: + +```hcl +job "job" { + group "grp" { + task "tsk" { + driver = "..." + config {} + resources { + device "nomad/file/mock" { + count = 1 + } + } + } + } +} +``` + +`dev.quota.hcl`: + +```hcl +name = "dev" +limit { + region = "global" + region_limit { + device "nomad/file/mock" { + count = 2 # to allow for deployments/reschedules + } + } +} +``` diff --git a/plugin_interface/device/cmd/example/cmd/main.go b/plugin_interface/device/cmd/example/cmd/main.go new file mode 100644 index 00000000000..0186184c6a2 --- /dev/null +++ b/plugin_interface/device/cmd/example/cmd/main.go @@ -0,0 +1,21 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/nomad/plugin-interface" + "github.com/hashicorp/nomad/plugin-interface/device/cmd/example" +) + +func main() { + // Serve the plugin + plugins.Serve(factory) +} + +// factory returns a new instance of our example device plugin +func factory(log log.Logger) interface{} { + return example.NewExampleDevice(log) +} diff --git a/plugin_interface/device/cmd/example/device.go b/plugin_interface/device/cmd/example/device.go new file mode 100644 index 00000000000..d76f07264e5 --- /dev/null +++ b/plugin_interface/device/cmd/example/device.go @@ -0,0 +1,377 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package example + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/device" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" + "github.com/kr/pretty" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // pluginName is the name of the plugin + pluginName = "example-fs-device" + + // vendor is the vendor providing the devices + vendor = "nomad" + + // deviceType is the type of device being returned + deviceType = "file" + + // deviceName is the name of the devices being exposed + deviceName = "mock" +) + +var ( + // pluginInfo describes the plugin + pluginInfo = &base.PluginInfoResponse{ + Type: base.PluginTypeDevice, + PluginApiVersions: []string{device.ApiVersion010}, + PluginVersion: "v0.1.0", + Name: pluginName, + } + + // configSpec is the specification of the plugin's configuration + configSpec = hclspec.NewObject(map[string]*hclspec.Spec{ + "dir": hclspec.NewDefault( + hclspec.NewAttr("dir", "string", false), + hclspec.NewLiteral("\".\""), + ), + "list_period": hclspec.NewDefault( + hclspec.NewAttr("list_period", "string", false), + hclspec.NewLiteral("\"5s\""), + ), + "unhealthy_perm": hclspec.NewDefault( + hclspec.NewAttr("unhealthy_perm", "string", false), + hclspec.NewLiteral("\"-rwxrwxrwx\""), + ), + }) +) + +// Config contains configuration information for the plugin. +type Config struct { + Dir string `codec:"dir"` + ListPeriod string `codec:"list_period"` + UnhealthyPerm string `codec:"unhealthy_perm"` +} + +// FsDevice is an example device plugin. The device plugin exposes files as +// devices and periodically polls the directory for new files. If a file has a +// given file permission, it is considered unhealthy. This device plugin is +// purely for use as an example. +type FsDevice struct { + logger log.Logger + + // deviceDir is the directory we expose as devices + deviceDir string + + // unhealthyPerm is the permissions on a file we consider unhealthy + unhealthyPerm string + + // listPeriod is how often we should list the device directory to detect new + // devices + listPeriod time.Duration + + // devices is the set of detected devices and maps whether they are healthy + devices map[string]bool + deviceLock sync.RWMutex +} + +// NewExampleDevice returns a new example device plugin. +func NewExampleDevice(log log.Logger) *FsDevice { + return &FsDevice{ + logger: log.Named(pluginName), + devices: make(map[string]bool), + } +} + +// PluginInfo returns information describing the plugin. +func (d *FsDevice) PluginInfo() (*base.PluginInfoResponse, error) { + return pluginInfo, nil +} + +// ConfigSchema returns the plugins configuration schema. +func (d *FsDevice) ConfigSchema() (*hclspec.Spec, error) { + return configSpec, nil +} + +// SetConfig is used to set the configuration of the plugin. +func (d *FsDevice) SetConfig(c *base.Config) error { + var config Config + if err := base.MsgPackDecode(c.PluginConfig, &config); err != nil { + return err + } + + // Save the device directory and the unhealthy permissions + d.deviceDir = config.Dir + d.unhealthyPerm = config.UnhealthyPerm + + // Convert the poll period + period, err := time.ParseDuration(config.ListPeriod) + if err != nil { + return fmt.Errorf("failed to parse list period %q: %v", config.ListPeriod, err) + } + d.listPeriod = period + + d.logger.Debug("test debug") + d.logger.Info("config set", "config", log.Fmt("% #v", pretty.Formatter(config))) + return nil +} + +// Fingerprint streams detected devices. If device changes are detected or the +// devices health changes, messages will be emitted. +func (d *FsDevice) Fingerprint(ctx context.Context) (<-chan *device.FingerprintResponse, error) { + if d.deviceDir == "" { + return nil, status.New(codes.Internal, "device directory not set in config").Err() + } + + outCh := make(chan *device.FingerprintResponse) + go d.fingerprint(ctx, outCh) + return outCh, nil +} + +// fingerprint is the long running goroutine that detects hardware +func (d *FsDevice) fingerprint(ctx context.Context, devices chan *device.FingerprintResponse) { + defer close(devices) + + // Create a timer that will fire immediately for the first detection + ticker := time.NewTimer(0) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticker.Reset(d.listPeriod) + } + + d.logger.Trace("scanning for changes") + + files, err := ioutil.ReadDir(d.deviceDir) + if err != nil { + d.logger.Error("failed to list device directory", "error", err) + devices <- device.NewFingerprintError(err) + return + } + + detected := d.diffFiles(files) + if len(detected) == 0 { + continue + } + + devices <- device.NewFingerprint(getDeviceGroup(detected)) + + } +} + +func (d *FsDevice) diffFiles(files []os.FileInfo) []*device.Device { + d.deviceLock.Lock() + defer d.deviceLock.Unlock() + + // Build an unhealthy message + unhealthyDesc := fmt.Sprintf("Device has bad permissions %q", d.unhealthyPerm) + + var changes bool + fnames := make(map[string]struct{}) + for _, f := range files { + name := f.Name() + fnames[name] = struct{}{} + if f.IsDir() { + d.logger.Trace("skipping directory", "directory", name) + continue + } + + // Determine the health + perms := f.Mode().Perm().String() + healthy := perms != d.unhealthyPerm + d.logger.Trace("checking health", "file perm", perms, "unhealthy perms", d.unhealthyPerm, "healthy", healthy) + + // See if we alreay have the device + oldHealth, ok := d.devices[name] + if ok && oldHealth == healthy { + continue + } + + // Health has changed or we have a new object + changes = true + d.devices[name] = healthy + } + + for id := range d.devices { + if _, ok := fnames[id]; !ok { + delete(d.devices, id) + changes = true + } + } + + // Nothing to do + if !changes { + return nil + } + + // Build the devices + detected := make([]*device.Device, 0, len(d.devices)) + for name, healthy := range d.devices { + var desc string + if !healthy { + desc = unhealthyDesc + } + + detected = append(detected, &device.Device{ + ID: name, + Healthy: healthy, + HealthDesc: desc, + }) + } + + return detected +} + +// getDeviceGroup is a helper to build the DeviceGroup given a set of devices. +func getDeviceGroup(devices []*device.Device) *device.DeviceGroup { + return &device.DeviceGroup{ + Vendor: vendor, + Type: deviceType, + Name: deviceName, + Devices: devices, + } +} + +// Reserve returns information on how to mount the given devices. +func (d *FsDevice) Reserve(deviceIDs []string) (*device.ContainerReservation, error) { + if len(deviceIDs) == 0 { + return nil, status.New(codes.InvalidArgument, "no device ids given").Err() + } + + deviceDir, err := filepath.Abs(d.deviceDir) + if err != nil { + return nil, status.Newf(codes.Internal, "failed to load device dir abs path").Err() + } + + resp := &device.ContainerReservation{} + + for _, id := range deviceIDs { + // Check if the device is known + if _, ok := d.devices[id]; !ok { + return nil, status.Newf(codes.InvalidArgument, "unknown device %q", id).Err() + } + + // Add a mount + resp.Mounts = append(resp.Mounts, &device.Mount{ + TaskPath: fmt.Sprintf("/tmp/task-mounts/%s", id), + HostPath: filepath.Join(deviceDir, id), + ReadOnly: false, + }) + } + + return resp, nil +} + +// Stats streams statistics for the detected devices. +func (d *FsDevice) Stats(ctx context.Context, interval time.Duration) (<-chan *device.StatsResponse, error) { + outCh := make(chan *device.StatsResponse) + go d.stats(ctx, outCh, interval) + return outCh, nil +} + +// stats is the long running goroutine that streams device statistics +func (d *FsDevice) stats(ctx context.Context, stats chan *device.StatsResponse, interval time.Duration) { + defer close(stats) + + // Create a timer that will fire immediately for the first detection + ticker := time.NewTimer(0) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticker.Reset(interval) + } + + deviceStats, err := d.collectStats() + if err != nil { + stats <- &device.StatsResponse{ + Error: err, + } + return + } + if deviceStats == nil { + continue + } + + stats <- &device.StatsResponse{ + Groups: []*device.DeviceGroupStats{deviceStats}, + } + } +} + +func (d *FsDevice) collectStats() (*device.DeviceGroupStats, error) { + d.deviceLock.RLock() + defer d.deviceLock.RUnlock() + l := len(d.devices) + if l == 0 { + return nil, nil + } + + now := time.Now() + group := &device.DeviceGroupStats{ + Vendor: vendor, + Type: deviceType, + Name: deviceName, + InstanceStats: make(map[string]*device.DeviceStats, l), + } + + for k := range d.devices { + p := filepath.Join(d.deviceDir, k) + f, err := os.Stat(p) + if err != nil { + return nil, fmt.Errorf("failed to stat %q: %v", p, err) + } + + s := &device.DeviceStats{ + Summary: &structs.StatValue{ + IntNumeratorVal: helper.PointerOf(f.Size()), + Unit: "bytes", + Desc: "Filesize in bytes", + }, + Stats: &structs.StatObject{ + Attributes: map[string]*structs.StatValue{ + "size": { + IntNumeratorVal: helper.PointerOf(f.Size()), + Unit: "bytes", + Desc: "Filesize in bytes", + }, + "modify_time": { + StringVal: helper.PointerOf(f.ModTime().String()), + Desc: "Last modified", + }, + "mode": { + StringVal: helper.PointerOf(f.Mode().String()), + Desc: "File mode", + }, + }, + }, + Timestamp: now, + } + + group.InstanceStats[k] = s + } + + return group, nil +} diff --git a/plugin_interface/device/device.go b/plugin_interface/device/device.go new file mode 100644 index 00000000000..8bbfa8a4029 --- /dev/null +++ b/plugin_interface/device/device.go @@ -0,0 +1,229 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "context" + "fmt" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" +) + +const ( + // DeviceTypeGPU is a canonical device type for a GPU. + DeviceTypeGPU = "gpu" +) + +var ( + // ErrPluginDisabled indicates that the device plugin is disabled + ErrPluginDisabled = fmt.Errorf("device is not enabled") +) + +// DevicePlugin is the interface for a plugin that can expose detected devices +// to Nomad and inform it how to mount them. +type DevicePlugin interface { + base.BasePlugin + + // Fingerprint returns a stream of devices that are detected. + Fingerprint(ctx context.Context) (<-chan *FingerprintResponse, error) + + // Reserve is used to reserve a set of devices and retrieve mount + // instructions. + Reserve(deviceIDs []string) (*ContainerReservation, error) + + // Stats returns a stream of statistics per device collected at the passed + // interval. + Stats(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) +} + +// FingerprintResponse includes a set of detected devices or an error in the +// process of fingerprinting. +type FingerprintResponse struct { + // Devices is a set of devices that have been detected. + Devices []*DeviceGroup + + // Error is populated when fingerprinting has failed. + Error error +} + +// NewFingerprint takes a set of device groups and returns a fingerprint +// response +func NewFingerprint(devices ...*DeviceGroup) *FingerprintResponse { + return &FingerprintResponse{ + Devices: devices, + } +} + +// NewFingerprintError takes an error and returns a fingerprint response +func NewFingerprintError(err error) *FingerprintResponse { + return &FingerprintResponse{ + Error: err, + } +} + +// DeviceGroup is a grouping of devices that share a common vendor, device type +// and name. +type DeviceGroup struct { + // Vendor is the vendor providing the device (nvidia, intel, etc). + Vendor string + + // Type is the type of the device (gpu, fpga, etc). + Type string + + // Name is the devices model name. + Name string + + // Devices is the set of device instances. + Devices []*Device + + // Attributes are a set of attributes shared for all the devices. + Attributes map[string]*structs.Attribute +} + +// Validate validates that the device group is valid +func (d *DeviceGroup) Validate() error { + var mErr multierror.Error + + if d.Vendor == "" { + _ = multierror.Append(&mErr, fmt.Errorf("device vendor must be specified")) + } + if d.Type == "" { + _ = multierror.Append(&mErr, fmt.Errorf("device type must be specified")) + } + if d.Name == "" { + _ = multierror.Append(&mErr, fmt.Errorf("device name must be specified")) + } + + for i, dev := range d.Devices { + if dev == nil { + _ = multierror.Append(&mErr, fmt.Errorf("device %d is nil", i)) + continue + } + + if err := dev.Validate(); err != nil { + _ = multierror.Append(&mErr, multierror.Prefix(err, fmt.Sprintf("device %d: ", i))) + } + } + + for k, v := range d.Attributes { + if err := v.Validate(); err != nil { + _ = multierror.Append(&mErr, fmt.Errorf("device attribute %q invalid: %v", k, err)) + } + } + + return mErr.ErrorOrNil() + +} + +// Device is an instance of a particular device. +type Device struct { + // ID is the identifier for the device. + ID string + + // Healthy marks whether the device is healthy and can be used for + // scheduling. + Healthy bool + + // HealthDesc describes why the device may be unhealthy. + HealthDesc string + + // HwLocality captures hardware locality information for the device. + HwLocality *DeviceLocality +} + +// Validate validates that the device is valid +func (d *Device) Validate() error { + if d.ID == "" { + return fmt.Errorf("device ID must be specified") + } + + return nil +} + +// DeviceLocality captures hardware locality information for a device. +type DeviceLocality struct { + // PciBusID is the PCI bus ID of the device. + PciBusID string +} + +// ContainerReservation describes how to mount a device into a container. A +// container is an isolated environment that shares the host's OS. +type ContainerReservation struct { + // Envs are a set of environment variables to set for the task. + Envs map[string]string + + // Mounts are used to mount host volumes into a container that may include + // libraries, etc. + Mounts []*Mount + + // Devices are the set of devices to mount into the container. + Devices []*DeviceSpec +} + +// Mount is used to mount a host directory into a container. +type Mount struct { + // TaskPath is the location in the task's file system to mount. + TaskPath string + + // HostPath is the host directory path to mount. + HostPath string + + // ReadOnly defines whether the mount should be read only to the task. + ReadOnly bool +} + +// DeviceSpec captures how to mount a device into a container. +type DeviceSpec struct { + // TaskPath is the location to mount the device in the task's file system. + TaskPath string + + // HostPath is the host location of the device. + HostPath string + + // CgroupPerms defines the permissions to use when mounting the device. + CgroupPerms string +} + +// StatsResponse returns statistics for each device group. +type StatsResponse struct { + // Groups contains statistics for each device group. + Groups []*DeviceGroupStats + + // Error is populated when collecting statistics has failed. + Error error +} + +// NewStatsError takes an error and returns a stats response +func NewStatsError(err error) *StatsResponse { + return &StatsResponse{ + Error: err, + } +} + +// DeviceGroupStats contains statistics for each device of a particular +// device group, identified by the vendor, type and name of the device. +type DeviceGroupStats struct { + Vendor string + Type string + Name string + + // InstanceStats is a mapping of each device ID to its statistics. + InstanceStats map[string]*DeviceStats +} + +// DeviceStats is the statistics for an individual device +type DeviceStats struct { + // Summary exposes a single summary metric that should be the most + // informative to users. + Summary *structs.StatValue + + // Stats contains the verbose statistics for the device. + Stats *structs.StatObject + + // Timestamp is the time the statistics were collected. + Timestamp time.Time +} diff --git a/plugin_interface/device/mock.go b/plugin_interface/device/mock.go new file mode 100644 index 00000000000..ed5168be6f5 --- /dev/null +++ b/plugin_interface/device/mock.go @@ -0,0 +1,111 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "context" + "time" + + "github.com/hashicorp/nomad/plugin-interface/base" +) + +type FingerprintFn func(context.Context) (<-chan *FingerprintResponse, error) +type ReserveFn func([]string) (*ContainerReservation, error) +type StatsFn func(context.Context, time.Duration) (<-chan *StatsResponse, error) + +// MockDevicePlugin is used for testing. +// Each function can be set as a closure to make assertions about how data +// is passed through the base plugin layer. +type MockDevicePlugin struct { + *base.MockPlugin + FingerprintF FingerprintFn + ReserveF ReserveFn + StatsF StatsFn +} + +func (p *MockDevicePlugin) Fingerprint(ctx context.Context) (<-chan *FingerprintResponse, error) { + return p.FingerprintF(ctx) +} + +func (p *MockDevicePlugin) Reserve(devices []string) (*ContainerReservation, error) { + return p.ReserveF(devices) +} + +func (p *MockDevicePlugin) Stats(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + return p.StatsF(ctx, interval) +} + +// Below are static implementations of the device functions + +// StaticFingerprinter fingerprints the passed devices just once +func StaticFingerprinter(devices []*DeviceGroup) FingerprintFn { + return func(_ context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + outCh <- &FingerprintResponse{ + Devices: devices, + } + return outCh, nil + } +} + +// ErrorChFingerprinter returns an error fingerprinting over the channel +func ErrorChFingerprinter(err error) FingerprintFn { + return func(_ context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + outCh <- &FingerprintResponse{ + Error: err, + } + return outCh, nil + } +} + +// StaticReserve returns the passed container reservation +func StaticReserve(out *ContainerReservation) ReserveFn { + return func(_ []string) (*ContainerReservation, error) { + return out, nil + } +} + +// ErrorReserve returns the passed error +func ErrorReserve(err error) ReserveFn { + return func(_ []string) (*ContainerReservation, error) { + return nil, err + } +} + +// StaticStats returns the passed statistics +func StaticStats(out []*DeviceGroupStats) StatsFn { + return func(ctx context.Context, intv time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + + go func() { + ticker := time.NewTimer(0) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticker.Reset(intv) + } + + outCh <- &StatsResponse{ + Groups: out, + } + } + }() + + return outCh, nil + } +} + +// ErrorChStats returns an error collecting stats over the channel +func ErrorChStats(err error) StatsFn { + return func(_ context.Context, _ time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + outCh <- &StatsResponse{ + Error: err, + } + return outCh, nil + } +} diff --git a/plugin_interface/device/plugin.go b/plugin_interface/device/plugin.go new file mode 100644 index 00000000000..41a4818967a --- /dev/null +++ b/plugin_interface/device/plugin.go @@ -0,0 +1,54 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "context" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base" + bproto "github.com/hashicorp/nomad/plugin-interface/base/proto" + "github.com/hashicorp/nomad/plugin-interface/device/proto" + "google.golang.org/grpc" +) + +// PluginDevice is wraps a DevicePlugin and implements go-plugins GRPCPlugin +// interface to expose the interface over gRPC. +type PluginDevice struct { + plugin.NetRPCUnsupportedPlugin + Impl DevicePlugin +} + +func (p *PluginDevice) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterDevicePluginServer(s, &devicePluginServer{ + impl: p.Impl, + broker: broker, + }) + return nil +} + +func (p *PluginDevice) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (any, error) { + return &devicePluginClient{ + doneCtx: ctx, + client: proto.NewDevicePluginClient(c), + BasePluginClient: &base.BasePluginClient{ + Client: bproto.NewBasePluginClient(c), + DoneCtx: ctx, + }, + }, nil +} + +// Serve is used to serve a device plugin +func Serve(dev DevicePlugin, logger log.Logger) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: base.Handshake, + Plugins: map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: dev}, + base.PluginTypeDevice: &PluginDevice{Impl: dev}, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/plugin_interface/device/plugin_test.go b/plugin_interface/device/plugin_test.go new file mode 100644 index 00000000000..1d51c5db09e --- /dev/null +++ b/plugin_interface/device/plugin_test.go @@ -0,0 +1,726 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "context" + "fmt" + "testing" + "time" + + pb "github.com/golang/protobuf/proto" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" + "github.com/stretchr/testify/require" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc/status" +) + +func TestDevicePlugin_PluginInfo(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + var ( + apiVersions = []string{"v0.1.0", "v0.2.0"} + ) + + const ( + pluginVersion = "v0.2.1" + pluginName = "mock_device" + ) + + knownType := func() (*base.PluginInfoResponse, error) { + info := &base.PluginInfoResponse{ + Type: base.PluginTypeDevice, + PluginApiVersions: apiVersions, + PluginVersion: pluginVersion, + Name: pluginName, + } + return info, nil + } + unknownType := func() (*base.PluginInfoResponse, error) { + info := &base.PluginInfoResponse{ + Type: "bad", + PluginApiVersions: apiVersions, + PluginVersion: pluginVersion, + Name: pluginName, + } + return info, nil + } + + mock := &MockDevicePlugin{ + MockPlugin: &base.MockPlugin{ + PluginInfoF: knownType, + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + resp, err := impl.PluginInfo() + require.NoError(err) + require.Equal(apiVersions, resp.PluginApiVersions) + require.Equal(pluginVersion, resp.PluginVersion) + require.Equal(pluginName, resp.Name) + require.Equal(base.PluginTypeDevice, resp.Type) + + // Swap the implementation to return an unknown type + mock.PluginInfoF = unknownType + _, err = impl.PluginInfo() + require.Error(err) + require.Contains(err.Error(), "unknown type") +} + +func TestDevicePlugin_ConfigSchema(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + mock := &MockDevicePlugin{ + MockPlugin: &base.MockPlugin{ + ConfigSchemaF: func() (*hclspec.Spec, error) { + return base.TestSpec, nil + }, + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + specOut, err := impl.ConfigSchema() + require.NoError(err) + require.True(pb.Equal(base.TestSpec, specOut)) +} + +func TestDevicePlugin_SetConfig(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + var receivedData []byte + mock := &MockDevicePlugin{ + MockPlugin: &base.MockPlugin{ + PluginInfoF: func() (*base.PluginInfoResponse, error) { + return &base.PluginInfoResponse{ + Type: base.PluginTypeDevice, + PluginApiVersions: []string{"v0.0.1"}, + PluginVersion: "v0.0.1", + Name: "mock_device", + }, nil + }, + ConfigSchemaF: func() (*hclspec.Spec, error) { + return base.TestSpec, nil + }, + SetConfigF: func(cfg *base.Config) error { + receivedData = cfg.PluginConfig + return nil + }, + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + config := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("v1"), + "bar": cty.NumberIntVal(1337), + "baz": cty.BoolVal(true), + }) + cdata, err := msgpack.Marshal(config, config.Type()) + require.NoError(err) + require.NoError(impl.SetConfig(&base.Config{PluginConfig: cdata})) + require.Equal(cdata, receivedData) + + // Decode the value back + var actual base.TestConfig + require.NoError(structs.Decode(receivedData, &actual)) + require.Equal("v1", actual.Foo) + require.EqualValues(1337, actual.Bar) + require.True(actual.Baz) +} + +func TestDevicePlugin_Fingerprint(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + devices1 := []*DeviceGroup{ + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "foo", + Attributes: map[string]*structs.Attribute{ + "memory": { + Int: helper.PointerOf(int64(4)), + Unit: "GiB", + }, + }, + }, + } + devices2 := []*DeviceGroup{ + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "foo", + }, + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "bar", + }, + } + + mock := &MockDevicePlugin{ + FingerprintF: func(ctx context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + go func() { + // Send two messages + for _, devs := range [][]*DeviceGroup{devices1, devices2} { + select { + case <-ctx.Done(): + return + case outCh <- &FingerprintResponse{Devices: devs}: + } + } + close(outCh) + return + }() + return outCh, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + // Create a context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Get the stream + stream, err := impl.Fingerprint(ctx) + require.NoError(err) + + // Get the first message + var first *FingerprintResponse + select { + case <-time.After(1 * time.Second): + t.Fatal("timeout") + case first = <-stream: + } + + require.NoError(first.Error) + require.EqualValues(devices1, first.Devices) + + // Get the second message + var second *FingerprintResponse + select { + case <-time.After(1 * time.Second): + t.Fatal("timeout") + case second = <-stream: + } + + require.NoError(second.Error) + require.EqualValues(devices2, second.Devices) + + select { + case _, ok := <-stream: + require.False(ok) + case <-time.After(1 * time.Second): + t.Fatal("stream should be closed") + } +} + +func TestDevicePlugin_Fingerprint_StreamErr(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + ferr := fmt.Errorf("mock fingerprinting failed") + mock := &MockDevicePlugin{ + FingerprintF: func(ctx context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + go func() { + // Send the error + select { + case <-ctx.Done(): + return + case outCh <- &FingerprintResponse{Error: ferr}: + } + + close(outCh) + return + }() + return outCh, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + // Create a context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Get the stream + stream, err := impl.Fingerprint(ctx) + require.NoError(err) + + // Get the first message + var first *FingerprintResponse + select { + case <-time.After(1 * time.Second): + t.Fatal("timeout") + case first = <-stream: + } + + errStatus := status.Convert(ferr) + require.EqualError(first.Error, errStatus.Err().Error()) +} + +func TestDevicePlugin_Fingerprint_CancelCtx(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + mock := &MockDevicePlugin{ + FingerprintF: func(ctx context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + go func() { + <-ctx.Done() + close(outCh) + return + }() + return outCh, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + // Create a context + ctx, cancel := context.WithCancel(context.Background()) + + // Get the stream + stream, err := impl.Fingerprint(ctx) + require.NoError(err) + + // Get the first message + select { + case <-time.After(50 * time.Millisecond): + case _ = <-stream: + t.Fatal("bad value") + } + + // Cancel the context + cancel() + + // Make sure we are done + select { + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + case v := <-stream: + require.Error(v.Error) + require.EqualError(v.Error, context.Canceled.Error()) + } +} + +func TestDevicePlugin_Reserve(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + reservation := &ContainerReservation{ + Envs: map[string]string{ + "foo": "bar", + }, + Mounts: []*Mount{ + { + TaskPath: "foo", + HostPath: "bar", + ReadOnly: true, + }, + }, + Devices: []*DeviceSpec{ + { + TaskPath: "foo", + HostPath: "bar", + CgroupPerms: "rx", + }, + }, + } + + var received []string + mock := &MockDevicePlugin{ + ReserveF: func(devices []string) (*ContainerReservation, error) { + received = devices + return reservation, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + req := []string{"a", "b"} + containerRes, err := impl.Reserve(req) + require.NoError(err) + require.EqualValues(req, received) + require.EqualValues(reservation, containerRes) +} + +func TestDevicePlugin_Stats(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + devices1 := []*DeviceGroupStats{ + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "foo", + InstanceStats: map[string]*DeviceStats{ + "1": { + Summary: &structs.StatValue{ + IntNumeratorVal: helper.PointerOf(int64(10)), + IntDenominatorVal: helper.PointerOf(int64(20)), + Unit: "MB", + Desc: "Unit test", + }, + }, + }, + }, + } + devices2 := []*DeviceGroupStats{ + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "foo", + InstanceStats: map[string]*DeviceStats{ + "1": { + Summary: &structs.StatValue{ + FloatNumeratorVal: helper.PointerOf(float64(10.0)), + FloatDenominatorVal: helper.PointerOf(float64(20.0)), + Unit: "MB", + Desc: "Unit test", + }, + }, + }, + }, + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "bar", + InstanceStats: map[string]*DeviceStats{ + "1": { + Summary: &structs.StatValue{ + StringVal: helper.PointerOf("foo"), + Unit: "MB", + Desc: "Unit test", + }, + }, + }, + }, + { + Vendor: "nvidia", + Type: DeviceTypeGPU, + Name: "baz", + InstanceStats: map[string]*DeviceStats{ + "1": { + Summary: &structs.StatValue{ + BoolVal: helper.PointerOf(true), + Unit: "MB", + Desc: "Unit test", + }, + }, + }, + }, + } + + mock := &MockDevicePlugin{ + StatsF: func(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + go func() { + // Send two messages + for _, devs := range [][]*DeviceGroupStats{devices1, devices2} { + select { + case <-ctx.Done(): + return + case outCh <- &StatsResponse{Groups: devs}: + } + } + close(outCh) + return + }() + return outCh, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + // Create a context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Get the stream + stream, err := impl.Stats(ctx, time.Millisecond) + require.NoError(err) + + // Get the first message + var first *StatsResponse + select { + case <-time.After(1 * time.Second): + t.Fatal("timeout") + case first = <-stream: + } + + require.NoError(first.Error) + require.EqualValues(devices1, first.Groups) + + // Get the second message + var second *StatsResponse + select { + case <-time.After(1 * time.Second): + t.Fatal("timeout") + case second = <-stream: + } + + require.NoError(second.Error) + require.EqualValues(devices2, second.Groups) + + select { + case _, ok := <-stream: + require.False(ok) + case <-time.After(1 * time.Second): + t.Fatal("stream should be closed") + } +} + +func TestDevicePlugin_Stats_StreamErr(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + ferr := fmt.Errorf("mock stats failed") + mock := &MockDevicePlugin{ + StatsF: func(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + go func() { + // Send the error + select { + case <-ctx.Done(): + return + case outCh <- &StatsResponse{Error: ferr}: + } + + close(outCh) + return + }() + return outCh, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + // Create a context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Get the stream + stream, err := impl.Stats(ctx, time.Millisecond) + require.NoError(err) + + // Get the first message + var first *StatsResponse + select { + case <-time.After(1 * time.Second): + t.Fatal("timeout") + case first = <-stream: + } + + errStatus := status.Convert(ferr) + require.EqualError(first.Error, errStatus.Err().Error()) +} + +func TestDevicePlugin_Stats_CancelCtx(t *testing.T) { + // ci.Parallel(t) + require := require.New(t) + + mock := &MockDevicePlugin{ + StatsF: func(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + go func() { + <-ctx.Done() + close(outCh) + return + }() + return outCh, nil + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDevice{Impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DevicePlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + // Create a context + ctx, cancel := context.WithCancel(context.Background()) + + // Get the stream + stream, err := impl.Stats(ctx, time.Millisecond) + require.NoError(err) + + // Get the first message + select { + case <-time.After(50 * time.Millisecond): + case _ = <-stream: + t.Fatal("bad value") + } + + // Cancel the context + cancel() + + // Make sure we are done + select { + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + case v := <-stream: + require.Error(v.Error) + require.EqualError(v.Error, context.Canceled.Error()) + } +} diff --git a/plugin_interface/device/proto/device.pb.go b/plugin_interface/device/proto/device.pb.go new file mode 100644 index 00000000000..c0a400c5373 --- /dev/null +++ b/plugin_interface/device/proto/device.pb.go @@ -0,0 +1,1083 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/device/proto/device.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + proto1 "github.com/hashicorp/nomad/plugin-interface/shared/structs/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// FingerprintRequest is used to request for devices to be fingerprinted. +type FingerprintRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintRequest) Reset() { *m = FingerprintRequest{} } +func (m *FingerprintRequest) String() string { return proto.CompactTextString(m) } +func (*FingerprintRequest) ProtoMessage() {} +func (*FingerprintRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{0} +} + +func (m *FingerprintRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintRequest.Unmarshal(m, b) +} +func (m *FingerprintRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintRequest.Marshal(b, m, deterministic) +} +func (m *FingerprintRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintRequest.Merge(m, src) +} +func (m *FingerprintRequest) XXX_Size() int { + return xxx_messageInfo_FingerprintRequest.Size(m) +} +func (m *FingerprintRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintRequest proto.InternalMessageInfo + +// FingerprintResponse returns a set of detected devices. +type FingerprintResponse struct { + // device_group is a group of devices that share a vendor, device_type, and + // device_name. This is returned as a set so that a single plugin could + // potentially detect several device types and models. + DeviceGroup []*DeviceGroup `protobuf:"bytes,1,rep,name=device_group,json=deviceGroup,proto3" json:"device_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintResponse) Reset() { *m = FingerprintResponse{} } +func (m *FingerprintResponse) String() string { return proto.CompactTextString(m) } +func (*FingerprintResponse) ProtoMessage() {} +func (*FingerprintResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{1} +} + +func (m *FingerprintResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintResponse.Unmarshal(m, b) +} +func (m *FingerprintResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintResponse.Marshal(b, m, deterministic) +} +func (m *FingerprintResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintResponse.Merge(m, src) +} +func (m *FingerprintResponse) XXX_Size() int { + return xxx_messageInfo_FingerprintResponse.Size(m) +} +func (m *FingerprintResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintResponse proto.InternalMessageInfo + +func (m *FingerprintResponse) GetDeviceGroup() []*DeviceGroup { + if m != nil { + return m.DeviceGroup + } + return nil +} + +// DeviceGroup is a group of devices that share a vendor, device type and name. +type DeviceGroup struct { + // vendor is the name of the vendor of the device + Vendor string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"` + // device_type is the type of the device (gpu, fpga, etc). + DeviceType string `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + // device_name is the name of the device. + DeviceName string `protobuf:"bytes,3,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + // devices is the set of devices detected by the plugin. + Devices []*DetectedDevice `protobuf:"bytes,4,rep,name=devices,proto3" json:"devices,omitempty"` + // attributes allows adding attributes to be used for constraints or + // affinities. + Attributes map[string]*proto1.Attribute `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceGroup) Reset() { *m = DeviceGroup{} } +func (m *DeviceGroup) String() string { return proto.CompactTextString(m) } +func (*DeviceGroup) ProtoMessage() {} +func (*DeviceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{2} +} + +func (m *DeviceGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceGroup.Unmarshal(m, b) +} +func (m *DeviceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceGroup.Marshal(b, m, deterministic) +} +func (m *DeviceGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceGroup.Merge(m, src) +} +func (m *DeviceGroup) XXX_Size() int { + return xxx_messageInfo_DeviceGroup.Size(m) +} +func (m *DeviceGroup) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceGroup proto.InternalMessageInfo + +func (m *DeviceGroup) GetVendor() string { + if m != nil { + return m.Vendor + } + return "" +} + +func (m *DeviceGroup) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *DeviceGroup) GetDeviceName() string { + if m != nil { + return m.DeviceName + } + return "" +} + +func (m *DeviceGroup) GetDevices() []*DetectedDevice { + if m != nil { + return m.Devices + } + return nil +} + +func (m *DeviceGroup) GetAttributes() map[string]*proto1.Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +// DetectedDevice is a single detected device. +type DetectedDevice struct { + // ID is the ID of the device. This ID is used during allocation and must be + // stable across restarts of the device driver. + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Health of the device. + Healthy bool `protobuf:"varint,2,opt,name=healthy,proto3" json:"healthy,omitempty"` + // health_description allows the device plugin to optionally + // annotate the health field with a human readable reason. + HealthDescription string `protobuf:"bytes,3,opt,name=health_description,json=healthDescription,proto3" json:"health_description,omitempty"` + // hw_locality is optionally set to expose hardware locality information for + // more optimal placement decisions. + HwLocality *DeviceLocality `protobuf:"bytes,4,opt,name=hw_locality,json=hwLocality,proto3" json:"hw_locality,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DetectedDevice) Reset() { *m = DetectedDevice{} } +func (m *DetectedDevice) String() string { return proto.CompactTextString(m) } +func (*DetectedDevice) ProtoMessage() {} +func (*DetectedDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{3} +} + +func (m *DetectedDevice) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DetectedDevice.Unmarshal(m, b) +} +func (m *DetectedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DetectedDevice.Marshal(b, m, deterministic) +} +func (m *DetectedDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_DetectedDevice.Merge(m, src) +} +func (m *DetectedDevice) XXX_Size() int { + return xxx_messageInfo_DetectedDevice.Size(m) +} +func (m *DetectedDevice) XXX_DiscardUnknown() { + xxx_messageInfo_DetectedDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_DetectedDevice proto.InternalMessageInfo + +func (m *DetectedDevice) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *DetectedDevice) GetHealthy() bool { + if m != nil { + return m.Healthy + } + return false +} + +func (m *DetectedDevice) GetHealthDescription() string { + if m != nil { + return m.HealthDescription + } + return "" +} + +func (m *DetectedDevice) GetHwLocality() *DeviceLocality { + if m != nil { + return m.HwLocality + } + return nil +} + +// DeviceLocality is used to expose HW locality information about a device. +type DeviceLocality struct { + // pci_bus_id is the PCI bus ID for the device. If reported, it + // allows Nomad to make NUMA aware optimizations. + PciBusId string `protobuf:"bytes,1,opt,name=pci_bus_id,json=pciBusId,proto3" json:"pci_bus_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceLocality) Reset() { *m = DeviceLocality{} } +func (m *DeviceLocality) String() string { return proto.CompactTextString(m) } +func (*DeviceLocality) ProtoMessage() {} +func (*DeviceLocality) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{4} +} + +func (m *DeviceLocality) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceLocality.Unmarshal(m, b) +} +func (m *DeviceLocality) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceLocality.Marshal(b, m, deterministic) +} +func (m *DeviceLocality) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceLocality.Merge(m, src) +} +func (m *DeviceLocality) XXX_Size() int { + return xxx_messageInfo_DeviceLocality.Size(m) +} +func (m *DeviceLocality) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceLocality.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceLocality proto.InternalMessageInfo + +func (m *DeviceLocality) GetPciBusId() string { + if m != nil { + return m.PciBusId + } + return "" +} + +// ReserveRequest is used to ask the device driver for information on +// how to allocate the requested devices. +type ReserveRequest struct { + // device_ids are the requested devices. + DeviceIds []string `protobuf:"bytes,1,rep,name=device_ids,json=deviceIds,proto3" json:"device_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveRequest) Reset() { *m = ReserveRequest{} } +func (m *ReserveRequest) String() string { return proto.CompactTextString(m) } +func (*ReserveRequest) ProtoMessage() {} +func (*ReserveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{5} +} + +func (m *ReserveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveRequest.Unmarshal(m, b) +} +func (m *ReserveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveRequest.Marshal(b, m, deterministic) +} +func (m *ReserveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveRequest.Merge(m, src) +} +func (m *ReserveRequest) XXX_Size() int { + return xxx_messageInfo_ReserveRequest.Size(m) +} +func (m *ReserveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveRequest proto.InternalMessageInfo + +func (m *ReserveRequest) GetDeviceIds() []string { + if m != nil { + return m.DeviceIds + } + return nil +} + +// ReserveResponse informs Nomad how to expose the requested devices +// to the task. +type ReserveResponse struct { + // container_res contains information on how to mount the device + // into a task isolated using container technologies (where the + // host is shared) + ContainerRes *ContainerReservation `protobuf:"bytes,1,opt,name=container_res,json=containerRes,proto3" json:"container_res,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveResponse) Reset() { *m = ReserveResponse{} } +func (m *ReserveResponse) String() string { return proto.CompactTextString(m) } +func (*ReserveResponse) ProtoMessage() {} +func (*ReserveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{6} +} + +func (m *ReserveResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveResponse.Unmarshal(m, b) +} +func (m *ReserveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveResponse.Marshal(b, m, deterministic) +} +func (m *ReserveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveResponse.Merge(m, src) +} +func (m *ReserveResponse) XXX_Size() int { + return xxx_messageInfo_ReserveResponse.Size(m) +} +func (m *ReserveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveResponse proto.InternalMessageInfo + +func (m *ReserveResponse) GetContainerRes() *ContainerReservation { + if m != nil { + return m.ContainerRes + } + return nil +} + +// ContainerReservation returns how to mount the device into a +// container that shares the host OS. +type ContainerReservation struct { + // List of environment variable to be set + Envs map[string]string `protobuf:"bytes,1,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mounts for the task. + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + // Devices for the task. + Devices []*DeviceSpec `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerReservation) Reset() { *m = ContainerReservation{} } +func (m *ContainerReservation) String() string { return proto.CompactTextString(m) } +func (*ContainerReservation) ProtoMessage() {} +func (*ContainerReservation) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{7} +} + +func (m *ContainerReservation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ContainerReservation.Unmarshal(m, b) +} +func (m *ContainerReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ContainerReservation.Marshal(b, m, deterministic) +} +func (m *ContainerReservation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerReservation.Merge(m, src) +} +func (m *ContainerReservation) XXX_Size() int { + return xxx_messageInfo_ContainerReservation.Size(m) +} +func (m *ContainerReservation) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerReservation.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerReservation proto.InternalMessageInfo + +func (m *ContainerReservation) GetEnvs() map[string]string { + if m != nil { + return m.Envs + } + return nil +} + +func (m *ContainerReservation) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ContainerReservation) GetDevices() []*DeviceSpec { + if m != nil { + return m.Devices + } + return nil +} + +// Mount specifies a host volume to mount into a task. +// where device library or tools are installed on host and task +type Mount struct { + // Path of the mount within the task. + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // Path of the mount on the host. + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // If set, the mount is read-only. + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{8} +} + +func (m *Mount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mount.Unmarshal(m, b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mount.Marshal(b, m, deterministic) +} +func (m *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(m, src) +} +func (m *Mount) XXX_Size() int { + return xxx_messageInfo_Mount.Size(m) +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) +} + +var xxx_messageInfo_Mount proto.InternalMessageInfo + +func (m *Mount) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *Mount) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Mount) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +// DeviceSpec specifies a host device to mount into a task. +type DeviceSpec struct { + // Path of the device within the task. + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // Path of the device on the host. + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // Cgroups permissions of the device, candidates are one or more of + // * r - allows task to read from the specified device. + // * w - allows task to write to the specified device. + // * m - allows task to create device files that do not yet exist + Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceSpec) Reset() { *m = DeviceSpec{} } +func (m *DeviceSpec) String() string { return proto.CompactTextString(m) } +func (*DeviceSpec) ProtoMessage() {} +func (*DeviceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{9} +} + +func (m *DeviceSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceSpec.Unmarshal(m, b) +} +func (m *DeviceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceSpec.Marshal(b, m, deterministic) +} +func (m *DeviceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSpec.Merge(m, src) +} +func (m *DeviceSpec) XXX_Size() int { + return xxx_messageInfo_DeviceSpec.Size(m) +} +func (m *DeviceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSpec proto.InternalMessageInfo + +func (m *DeviceSpec) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *DeviceSpec) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *DeviceSpec) GetPermissions() string { + if m != nil { + return m.Permissions + } + return "" +} + +// StatsRequest is used to parameterize the retrieval of statistics. +type StatsRequest struct { + // collection_interval is the duration in which to collect statistics. + CollectionInterval *duration.Duration `protobuf:"bytes,1,opt,name=collection_interval,json=collectionInterval,proto3" json:"collection_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{10} +} + +func (m *StatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatsRequest.Unmarshal(m, b) +} +func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) +} +func (m *StatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsRequest.Merge(m, src) +} +func (m *StatsRequest) XXX_Size() int { + return xxx_messageInfo_StatsRequest.Size(m) +} +func (m *StatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatsRequest proto.InternalMessageInfo + +func (m *StatsRequest) GetCollectionInterval() *duration.Duration { + if m != nil { + return m.CollectionInterval + } + return nil +} + +// StatsResponse returns the statistics for each device group. +type StatsResponse struct { + // groups contains statistics for each device group. + Groups []*DeviceGroupStats `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (m *StatsResponse) String() string { return proto.CompactTextString(m) } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{11} +} + +func (m *StatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatsResponse.Unmarshal(m, b) +} +func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic) +} +func (m *StatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsResponse.Merge(m, src) +} +func (m *StatsResponse) XXX_Size() int { + return xxx_messageInfo_StatsResponse.Size(m) +} +func (m *StatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatsResponse proto.InternalMessageInfo + +func (m *StatsResponse) GetGroups() []*DeviceGroupStats { + if m != nil { + return m.Groups + } + return nil +} + +// DeviceGroupStats contains statistics for each device of a particular +// device group, identified by the vendor, type and name of the device. +type DeviceGroupStats struct { + Vendor string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // instance_stats is a mapping of each device ID to its statistics. + InstanceStats map[string]*DeviceStats `protobuf:"bytes,4,rep,name=instance_stats,json=instanceStats,proto3" json:"instance_stats,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceGroupStats) Reset() { *m = DeviceGroupStats{} } +func (m *DeviceGroupStats) String() string { return proto.CompactTextString(m) } +func (*DeviceGroupStats) ProtoMessage() {} +func (*DeviceGroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{12} +} + +func (m *DeviceGroupStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceGroupStats.Unmarshal(m, b) +} +func (m *DeviceGroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceGroupStats.Marshal(b, m, deterministic) +} +func (m *DeviceGroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceGroupStats.Merge(m, src) +} +func (m *DeviceGroupStats) XXX_Size() int { + return xxx_messageInfo_DeviceGroupStats.Size(m) +} +func (m *DeviceGroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceGroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceGroupStats proto.InternalMessageInfo + +func (m *DeviceGroupStats) GetVendor() string { + if m != nil { + return m.Vendor + } + return "" +} + +func (m *DeviceGroupStats) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *DeviceGroupStats) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeviceGroupStats) GetInstanceStats() map[string]*DeviceStats { + if m != nil { + return m.InstanceStats + } + return nil +} + +// DeviceStats is the statistics for an individual device +type DeviceStats struct { + // summary exposes a single summary metric that should be the most + // informative to users. + Summary *proto1.StatValue `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + // stats contains the verbose statistics for the device. + Stats *proto1.StatObject `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"` + // timestamp is the time the statistics were collected. + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceStats) Reset() { *m = DeviceStats{} } +func (m *DeviceStats) String() string { return proto.CompactTextString(m) } +func (*DeviceStats) ProtoMessage() {} +func (*DeviceStats) Descriptor() ([]byte, []int) { + return fileDescriptor_5edb0c35c07fa415, []int{13} +} + +func (m *DeviceStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceStats.Unmarshal(m, b) +} +func (m *DeviceStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceStats.Marshal(b, m, deterministic) +} +func (m *DeviceStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceStats.Merge(m, src) +} +func (m *DeviceStats) XXX_Size() int { + return xxx_messageInfo_DeviceStats.Size(m) +} +func (m *DeviceStats) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceStats.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceStats proto.InternalMessageInfo + +func (m *DeviceStats) GetSummary() *proto1.StatValue { + if m != nil { + return m.Summary + } + return nil +} + +func (m *DeviceStats) GetStats() *proto1.StatObject { + if m != nil { + return m.Stats + } + return nil +} + +func (m *DeviceStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func init() { + proto.RegisterType((*FingerprintRequest)(nil), "hashicorp.nomad.plugins.device.FingerprintRequest") + proto.RegisterType((*FingerprintResponse)(nil), "hashicorp.nomad.plugins.device.FingerprintResponse") + proto.RegisterType((*DeviceGroup)(nil), "hashicorp.nomad.plugins.device.DeviceGroup") + proto.RegisterMapType((map[string]*proto1.Attribute)(nil), "hashicorp.nomad.plugins.device.DeviceGroup.AttributesEntry") + proto.RegisterType((*DetectedDevice)(nil), "hashicorp.nomad.plugins.device.DetectedDevice") + proto.RegisterType((*DeviceLocality)(nil), "hashicorp.nomad.plugins.device.DeviceLocality") + proto.RegisterType((*ReserveRequest)(nil), "hashicorp.nomad.plugins.device.ReserveRequest") + proto.RegisterType((*ReserveResponse)(nil), "hashicorp.nomad.plugins.device.ReserveResponse") + proto.RegisterType((*ContainerReservation)(nil), "hashicorp.nomad.plugins.device.ContainerReservation") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.device.ContainerReservation.EnvsEntry") + proto.RegisterType((*Mount)(nil), "hashicorp.nomad.plugins.device.Mount") + proto.RegisterType((*DeviceSpec)(nil), "hashicorp.nomad.plugins.device.DeviceSpec") + proto.RegisterType((*StatsRequest)(nil), "hashicorp.nomad.plugins.device.StatsRequest") + proto.RegisterType((*StatsResponse)(nil), "hashicorp.nomad.plugins.device.StatsResponse") + proto.RegisterType((*DeviceGroupStats)(nil), "hashicorp.nomad.plugins.device.DeviceGroupStats") + proto.RegisterMapType((map[string]*DeviceStats)(nil), "hashicorp.nomad.plugins.device.DeviceGroupStats.InstanceStatsEntry") + proto.RegisterType((*DeviceStats)(nil), "hashicorp.nomad.plugins.device.DeviceStats") +} + +func init() { + proto.RegisterFile("plugins/device/proto/device.proto", fileDescriptor_5edb0c35c07fa415) +} + +var fileDescriptor_5edb0c35c07fa415 = []byte{ + // 965 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x8e, 0xdb, 0x44, + 0x10, 0x27, 0xc9, 0xe5, 0x92, 0x4c, 0xee, 0xae, 0x65, 0x7b, 0x42, 0xc6, 0x40, 0x7b, 0x58, 0x42, + 0x3a, 0x41, 0xeb, 0x94, 0x14, 0x89, 0x0a, 0x04, 0x52, 0xdb, 0x94, 0x5e, 0xf8, 0xd3, 0xab, 0xb6, + 0x15, 0x52, 0x8b, 0x84, 0xb5, 0x67, 0x2f, 0xf1, 0xb6, 0xf6, 0xda, 0xec, 0xae, 0x53, 0x99, 0x4f, + 0x3c, 0x0e, 0x5f, 0x78, 0x01, 0x1e, 0x86, 0x0f, 0x3c, 0x09, 0xf2, 0xee, 0x3a, 0xf1, 0xfd, 0xe9, + 0x25, 0x81, 0x4f, 0xde, 0x9d, 0x99, 0xdf, 0xcc, 0xec, 0xcc, 0x6f, 0x67, 0x0d, 0x1f, 0xe6, 0x49, + 0x31, 0x63, 0x5c, 0x8e, 0x22, 0x3a, 0x67, 0x21, 0x1d, 0xe5, 0x22, 0x53, 0x99, 0xdd, 0xf8, 0x7a, + 0x83, 0xae, 0xc7, 0x44, 0xc6, 0x2c, 0xcc, 0x44, 0xee, 0xf3, 0x2c, 0x25, 0x91, 0x6f, 0x21, 0xbe, + 0xb1, 0x72, 0x6f, 0xcc, 0xb2, 0x6c, 0x96, 0x58, 0xe8, 0x49, 0xf1, 0xcb, 0x48, 0xb1, 0x94, 0x4a, + 0x45, 0xd2, 0xdc, 0x38, 0x70, 0xaf, 0x9f, 0x35, 0x88, 0x0a, 0x41, 0x14, 0xcb, 0xb8, 0xd5, 0xdf, + 0xac, 0x73, 0x90, 0x31, 0x11, 0x34, 0x1a, 0x49, 0x25, 0x8a, 0x50, 0x49, 0x9b, 0x0b, 0x51, 0x4a, + 0xb0, 0x93, 0x42, 0xd9, 0x74, 0xdc, 0xc3, 0x4b, 0xad, 0xa5, 0x22, 0x4a, 0x1a, 0x4b, 0x6f, 0x1f, + 0xd0, 0x37, 0x8c, 0xcf, 0xa8, 0xc8, 0x05, 0xe3, 0x0a, 0xd3, 0x5f, 0x0b, 0x2a, 0x95, 0x47, 0xe1, + 0xda, 0x29, 0xa9, 0xcc, 0x33, 0x2e, 0x29, 0x7a, 0x0c, 0x3b, 0xe6, 0x3c, 0xc1, 0x4c, 0x64, 0x45, + 0xee, 0xb4, 0x0e, 0x3a, 0x87, 0xc3, 0xf1, 0x27, 0xfe, 0xe5, 0x87, 0xf7, 0x27, 0xfa, 0xf3, 0xa8, + 0x82, 0xe0, 0x61, 0xb4, 0xdc, 0x78, 0xbf, 0x77, 0x60, 0xd8, 0x50, 0xa2, 0x77, 0x60, 0x7b, 0x4e, + 0x79, 0x94, 0x09, 0xa7, 0x75, 0xd0, 0x3a, 0x1c, 0x60, 0xbb, 0x43, 0x37, 0xc0, 0xc2, 0x02, 0x55, + 0xe6, 0xd4, 0x69, 0x6b, 0x25, 0x18, 0xd1, 0xb3, 0x32, 0xa7, 0x0d, 0x03, 0x4e, 0x52, 0xea, 0x74, + 0x9a, 0x06, 0x8f, 0x49, 0x4a, 0xd1, 0x11, 0xf4, 0xcc, 0x4e, 0x3a, 0x5b, 0x3a, 0x69, 0x7f, 0x75, + 0xd2, 0x8a, 0x86, 0x8a, 0x46, 0x26, 0x3f, 0x5c, 0xc3, 0xd1, 0x4f, 0x00, 0x8b, 0x6a, 0x4b, 0xa7, + 0xab, 0x9d, 0x7d, 0xb9, 0x41, 0x05, 0xfc, 0x7b, 0x0b, 0xf4, 0x43, 0xae, 0x44, 0x89, 0x1b, 0xee, + 0xdc, 0x1c, 0xae, 0x9c, 0x51, 0xa3, 0xab, 0xd0, 0x79, 0x45, 0x4b, 0x5b, 0x90, 0x6a, 0x89, 0x1e, + 0x41, 0x77, 0x4e, 0x92, 0xc2, 0xd4, 0x61, 0x38, 0xfe, 0xf4, 0x8d, 0xc1, 0x4d, 0xf3, 0x7d, 0xdb, + 0xfc, 0x65, 0x60, 0x6c, 0xf0, 0x5f, 0xb4, 0xef, 0xb6, 0xbc, 0xbf, 0x5a, 0xb0, 0x77, 0xfa, 0xa8, + 0x68, 0x0f, 0xda, 0xd3, 0x89, 0x0d, 0xd8, 0x9e, 0x4e, 0x90, 0x03, 0xbd, 0x98, 0x92, 0x44, 0xc5, + 0xa5, 0x8e, 0xd8, 0xc7, 0xf5, 0x16, 0xdd, 0x02, 0x64, 0x96, 0x41, 0x44, 0x65, 0x28, 0x58, 0x5e, + 0x11, 0xd6, 0x56, 0xff, 0x6d, 0xa3, 0x99, 0x2c, 0x15, 0xe8, 0x18, 0x86, 0xf1, 0xeb, 0x20, 0xc9, + 0x42, 0x92, 0x30, 0x55, 0x3a, 0x5b, 0x3a, 0x7d, 0x7f, 0xbd, 0xda, 0x7d, 0x6f, 0x51, 0x18, 0xe2, + 0xd7, 0xf5, 0xda, 0xf3, 0xab, 0xdc, 0x9b, 0x5a, 0xf4, 0x3e, 0x40, 0x1e, 0xb2, 0xe0, 0xa4, 0x90, + 0x01, 0x8b, 0xec, 0x19, 0xfa, 0x79, 0xc8, 0xee, 0x17, 0x72, 0x1a, 0x79, 0x23, 0xd8, 0xc3, 0x54, + 0x52, 0x31, 0xa7, 0x96, 0xe8, 0xe8, 0x03, 0xb0, 0x2c, 0x09, 0x58, 0x24, 0x35, 0x9f, 0x07, 0x78, + 0x60, 0x24, 0xd3, 0x48, 0x7a, 0x09, 0x5c, 0x59, 0x00, 0xec, 0x1d, 0x78, 0x0e, 0xbb, 0x61, 0xc6, + 0x15, 0x61, 0x9c, 0x8a, 0x40, 0x50, 0xa9, 0x83, 0x0c, 0xc7, 0x9f, 0xad, 0x3a, 0xc6, 0x83, 0x1a, + 0x64, 0x1c, 0xea, 0xbb, 0x8d, 0x77, 0xc2, 0x86, 0xd4, 0xfb, 0xa3, 0x0d, 0xfb, 0x17, 0x99, 0x21, + 0x0c, 0x5b, 0x94, 0xcf, 0xa5, 0xbd, 0x6f, 0x5f, 0xff, 0x97, 0x50, 0xfe, 0x43, 0x3e, 0xb7, 0x84, + 0xd3, 0xbe, 0xd0, 0x57, 0xb0, 0x9d, 0x66, 0x05, 0x57, 0xd2, 0x69, 0x6b, 0xaf, 0x1f, 0xad, 0xf2, + 0xfa, 0x43, 0x65, 0x8d, 0x2d, 0x08, 0x4d, 0x96, 0x17, 0xaa, 0xa3, 0xf1, 0x1f, 0xaf, 0xd7, 0xc7, + 0xa7, 0x39, 0x0d, 0x17, 0x97, 0xc9, 0xfd, 0x1c, 0x06, 0x8b, 0xbc, 0x2e, 0x60, 0xfa, 0x7e, 0x93, + 0xe9, 0x83, 0x26, 0x6d, 0x7f, 0x86, 0xae, 0xce, 0x07, 0xbd, 0x07, 0x03, 0x45, 0xe4, 0xab, 0x20, + 0x27, 0x2a, 0xae, 0xfb, 0x5d, 0x09, 0x9e, 0x10, 0x15, 0x57, 0xca, 0x38, 0x93, 0xca, 0x28, 0x8d, + 0x8f, 0x7e, 0x25, 0xa8, 0x95, 0x82, 0x92, 0x28, 0xc8, 0x78, 0x52, 0x6a, 0xce, 0xf6, 0x71, 0xbf, + 0x12, 0x1c, 0xf3, 0xa4, 0xf4, 0x62, 0x80, 0x65, 0xbe, 0xff, 0x23, 0xc8, 0x01, 0x0c, 0x73, 0x2a, + 0x52, 0x26, 0x25, 0xcb, 0xb8, 0xb4, 0x57, 0xa3, 0x29, 0xf2, 0x5e, 0xc0, 0xce, 0xd3, 0x6a, 0x1e, + 0xd7, 0x8c, 0xfc, 0x16, 0xae, 0x85, 0x59, 0x92, 0xd0, 0xb0, 0xea, 0x5a, 0xc0, 0xb8, 0xaa, 0x3a, + 0x98, 0x58, 0x96, 0xbd, 0xeb, 0x9b, 0x67, 0xc2, 0xaf, 0x9f, 0x09, 0x7f, 0x62, 0x9f, 0x09, 0x8c, + 0x96, 0xa8, 0xa9, 0x05, 0x79, 0xcf, 0x61, 0xd7, 0xfa, 0xb6, 0xe4, 0x3d, 0x82, 0x6d, 0x3d, 0xb9, + 0x6b, 0x2a, 0xdd, 0xde, 0x60, 0x70, 0x19, 0x4f, 0x16, 0xef, 0xfd, 0xd9, 0x86, 0xab, 0x67, 0x95, + 0x6f, 0x9c, 0xdf, 0x08, 0xb6, 0x1a, 0x83, 0x5b, 0xaf, 0x2b, 0x59, 0x63, 0x56, 0xeb, 0x35, 0x7a, + 0x09, 0x7b, 0x8c, 0x4b, 0x45, 0x78, 0x48, 0x03, 0xfd, 0x48, 0xd9, 0x61, 0xfd, 0x60, 0xd3, 0x34, + 0xfd, 0xa9, 0x75, 0xa3, 0x77, 0x86, 0xf6, 0xbb, 0xac, 0x29, 0x73, 0x53, 0x40, 0xe7, 0x8d, 0x2e, + 0xe0, 0xe0, 0xbd, 0xd3, 0xd3, 0x76, 0xcd, 0xc7, 0xce, 0x14, 0xab, 0x41, 0xd8, 0xbf, 0x5b, 0xf5, + 0x53, 0x67, 0x4a, 0xf5, 0x1d, 0xf4, 0x64, 0x91, 0xa6, 0x44, 0x94, 0xb6, 0xb5, 0x6b, 0x8f, 0xf1, + 0x0a, 0xff, 0x63, 0xe5, 0x17, 0xd7, 0x1e, 0xd0, 0x11, 0x74, 0x4d, 0xb9, 0x4c, 0x8e, 0xe3, 0x4d, + 0x5c, 0x1d, 0x9f, 0xbc, 0xa4, 0xa1, 0xc2, 0xc6, 0x01, 0xba, 0x0b, 0x83, 0xc5, 0x9f, 0x89, 0x6e, + 0xcd, 0x70, 0xec, 0x9e, 0xe3, 0xdc, 0xb3, 0xda, 0x02, 0x2f, 0x8d, 0xc7, 0xff, 0xb4, 0x61, 0xc7, + 0x1c, 0xf0, 0x89, 0x0e, 0x86, 0x7e, 0x83, 0x61, 0xe3, 0x1f, 0x02, 0x8d, 0x57, 0x15, 0xee, 0xfc, + 0x6f, 0x88, 0x7b, 0x67, 0x23, 0x8c, 0xe1, 0xb8, 0xf7, 0xd6, 0xed, 0x16, 0x4a, 0xa0, 0x67, 0xe7, + 0x36, 0x5a, 0xf9, 0xbe, 0x9c, 0x7e, 0x11, 0xdc, 0xd1, 0xda, 0xf6, 0x75, 0x3c, 0x14, 0x43, 0xd7, + 0x34, 0xf5, 0xe6, 0x2a, 0x6c, 0xf3, 0xa6, 0xbb, 0xb7, 0xd6, 0xb4, 0x5e, 0x9e, 0xeb, 0x7e, 0xef, + 0x45, 0xd7, 0x74, 0x61, 0x5b, 0x7f, 0xee, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x11, 0xd4, 0x56, + 0x04, 0x9b, 0x0a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// DevicePluginClient is the client API for DevicePlugin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DevicePluginClient interface { + // Fingerprint allows the device plugin to return a set of + // detected devices and provide a mechanism to update the state of + // the device. + Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (DevicePlugin_FingerprintClient, error) + // Reserve is called by the client before starting an allocation + // that requires access to the plugin’s devices. The plugin can use + // this to run any setup steps and provides the mounting details to + // the Nomad client + Reserve(ctx context.Context, in *ReserveRequest, opts ...grpc.CallOption) (*ReserveResponse, error) + // Stats returns a stream of device statistics. + Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (DevicePlugin_StatsClient, error) +} + +type devicePluginClient struct { + cc grpc.ClientConnInterface +} + +func NewDevicePluginClient(cc grpc.ClientConnInterface) DevicePluginClient { + return &devicePluginClient{cc} +} + +func (c *devicePluginClient) Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (DevicePlugin_FingerprintClient, error) { + stream, err := c.cc.NewStream(ctx, &_DevicePlugin_serviceDesc.Streams[0], "/hashicorp.nomad.plugins.device.DevicePlugin/Fingerprint", opts...) + if err != nil { + return nil, err + } + x := &devicePluginFingerprintClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DevicePlugin_FingerprintClient interface { + Recv() (*FingerprintResponse, error) + grpc.ClientStream +} + +type devicePluginFingerprintClient struct { + grpc.ClientStream +} + +func (x *devicePluginFingerprintClient) Recv() (*FingerprintResponse, error) { + m := new(FingerprintResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *devicePluginClient) Reserve(ctx context.Context, in *ReserveRequest, opts ...grpc.CallOption) (*ReserveResponse, error) { + out := new(ReserveResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.device.DevicePlugin/Reserve", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *devicePluginClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (DevicePlugin_StatsClient, error) { + stream, err := c.cc.NewStream(ctx, &_DevicePlugin_serviceDesc.Streams[1], "/hashicorp.nomad.plugins.device.DevicePlugin/Stats", opts...) + if err != nil { + return nil, err + } + x := &devicePluginStatsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DevicePlugin_StatsClient interface { + Recv() (*StatsResponse, error) + grpc.ClientStream +} + +type devicePluginStatsClient struct { + grpc.ClientStream +} + +func (x *devicePluginStatsClient) Recv() (*StatsResponse, error) { + m := new(StatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// DevicePluginServer is the server API for DevicePlugin service. +type DevicePluginServer interface { + // Fingerprint allows the device plugin to return a set of + // detected devices and provide a mechanism to update the state of + // the device. + Fingerprint(*FingerprintRequest, DevicePlugin_FingerprintServer) error + // Reserve is called by the client before starting an allocation + // that requires access to the plugin’s devices. The plugin can use + // this to run any setup steps and provides the mounting details to + // the Nomad client + Reserve(context.Context, *ReserveRequest) (*ReserveResponse, error) + // Stats returns a stream of device statistics. + Stats(*StatsRequest, DevicePlugin_StatsServer) error +} + +// UnimplementedDevicePluginServer can be embedded to have forward compatible implementations. +type UnimplementedDevicePluginServer struct { +} + +func (*UnimplementedDevicePluginServer) Fingerprint(req *FingerprintRequest, srv DevicePlugin_FingerprintServer) error { + return status.Errorf(codes.Unimplemented, "method Fingerprint not implemented") +} +func (*UnimplementedDevicePluginServer) Reserve(ctx context.Context, req *ReserveRequest) (*ReserveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Reserve not implemented") +} +func (*UnimplementedDevicePluginServer) Stats(req *StatsRequest, srv DevicePlugin_StatsServer) error { + return status.Errorf(codes.Unimplemented, "method Stats not implemented") +} + +func RegisterDevicePluginServer(s *grpc.Server, srv DevicePluginServer) { + s.RegisterService(&_DevicePlugin_serviceDesc, srv) +} + +func _DevicePlugin_Fingerprint_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(FingerprintRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DevicePluginServer).Fingerprint(m, &devicePluginFingerprintServer{stream}) +} + +type DevicePlugin_FingerprintServer interface { + Send(*FingerprintResponse) error + grpc.ServerStream +} + +type devicePluginFingerprintServer struct { + grpc.ServerStream +} + +func (x *devicePluginFingerprintServer) Send(m *FingerprintResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _DevicePlugin_Reserve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReserveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DevicePluginServer).Reserve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.device.DevicePlugin/Reserve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevicePluginServer).Reserve(ctx, req.(*ReserveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DevicePlugin_Stats_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DevicePluginServer).Stats(m, &devicePluginStatsServer{stream}) +} + +type DevicePlugin_StatsServer interface { + Send(*StatsResponse) error + grpc.ServerStream +} + +type devicePluginStatsServer struct { + grpc.ServerStream +} + +func (x *devicePluginStatsServer) Send(m *StatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _DevicePlugin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hashicorp.nomad.plugins.device.DevicePlugin", + HandlerType: (*DevicePluginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Reserve", + Handler: _DevicePlugin_Reserve_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Fingerprint", + Handler: _DevicePlugin_Fingerprint_Handler, + ServerStreams: true, + }, + { + StreamName: "Stats", + Handler: _DevicePlugin_Stats_Handler, + ServerStreams: true, + }, + }, + Metadata: "plugins/device/proto/device.proto", +} diff --git a/plugin_interface/device/proto/device.proto b/plugin_interface/device/proto/device.proto new file mode 100644 index 00000000000..8c9a498e254 --- /dev/null +++ b/plugin_interface/device/proto/device.proto @@ -0,0 +1,179 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package hashicorp.nomad.plugins.device; +option go_package = "proto"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "plugins/shared/structs/proto/attribute.proto"; +import "plugins/shared/structs/proto/stats.proto"; + +// DevicePlugin is the API exposed by device plugins +service DevicePlugin { + // Fingerprint allows the device plugin to return a set of + // detected devices and provide a mechanism to update the state of + // the device. + rpc Fingerprint(FingerprintRequest) returns (stream FingerprintResponse) {} + + // Reserve is called by the client before starting an allocation + // that requires access to the plugin’s devices. The plugin can use + // this to run any setup steps and provides the mounting details to + // the Nomad client + rpc Reserve(ReserveRequest) returns (ReserveResponse) {} + + // Stats returns a stream of device statistics. + rpc Stats(StatsRequest) returns (stream StatsResponse) {} +} + +// FingerprintRequest is used to request for devices to be fingerprinted. +message FingerprintRequest {} + +// FingerprintResponse returns a set of detected devices. +message FingerprintResponse { + // device_group is a group of devices that share a vendor, device_type, and + // device_name. This is returned as a set so that a single plugin could + // potentially detect several device types and models. + repeated DeviceGroup device_group = 1; +} + +// DeviceGroup is a group of devices that share a vendor, device type and name. +message DeviceGroup { + // vendor is the name of the vendor of the device + string vendor = 1; + + // device_type is the type of the device (gpu, fpga, etc). + string device_type = 2; + + // device_name is the name of the device. + string device_name = 3; + + // devices is the set of devices detected by the plugin. + repeated DetectedDevice devices = 4; + + // attributes allows adding attributes to be used for constraints or + // affinities. + map attributes = 5; +} + +// DetectedDevice is a single detected device. +message DetectedDevice { + // ID is the ID of the device. This ID is used during allocation and must be + // stable across restarts of the device driver. + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + string ID = 1; + + // Health of the device. + bool healthy = 2; + + // health_description allows the device plugin to optionally + // annotate the health field with a human readable reason. + string health_description = 3; + + // hw_locality is optionally set to expose hardware locality information for + // more optimal placement decisions. + DeviceLocality hw_locality = 4; +} + +// DeviceLocality is used to expose HW locality information about a device. +message DeviceLocality { + // pci_bus_id is the PCI bus ID for the device. If reported, it + // allows Nomad to make NUMA aware optimizations. + string pci_bus_id = 1; +} + + +// ReserveRequest is used to ask the device driver for information on +// how to allocate the requested devices. +message ReserveRequest { + // device_ids are the requested devices. + repeated string device_ids = 1; +} + +// ReserveResponse informs Nomad how to expose the requested devices +// to the task. +message ReserveResponse { + // container_res contains information on how to mount the device + // into a task isolated using container technologies (where the + // host is shared) + ContainerReservation container_res = 1; +} + +// ContainerReservation returns how to mount the device into a +// container that shares the host OS. +message ContainerReservation { + // List of environment variable to be set + map envs = 1; + + // Mounts for the task. + repeated Mount mounts = 2; + + // Devices for the task. + repeated DeviceSpec devices = 3; +} + +// Mount specifies a host volume to mount into a task. +// where device library or tools are installed on host and task +message Mount { + // Path of the mount within the task. + string task_path = 1; + + // Path of the mount on the host. + string host_path = 2; + + // If set, the mount is read-only. + bool read_only = 3; +} + +// DeviceSpec specifies a host device to mount into a task. +message DeviceSpec { + // Path of the device within the task. + string task_path = 1; + + // Path of the device on the host. + string host_path = 2; + + // Cgroups permissions of the device, candidates are one or more of + // * r - allows task to read from the specified device. + // * w - allows task to write to the specified device. + // * m - allows task to create device files that do not yet exist + string permissions = 3; +} + + +// StatsRequest is used to parameterize the retrieval of statistics. +message StatsRequest { + // collection_interval is the duration in which to collect statistics. + google.protobuf.Duration collection_interval = 1; +} + +// StatsResponse returns the statistics for each device group. +message StatsResponse { + // groups contains statistics for each device group. + repeated DeviceGroupStats groups = 1; +} + +// DeviceGroupStats contains statistics for each device of a particular +// device group, identified by the vendor, type and name of the device. +message DeviceGroupStats { + string vendor = 1; + string type = 2; + string name = 3; + + // instance_stats is a mapping of each device ID to its statistics. + map instance_stats = 4; +} + +// DeviceStats is the statistics for an individual device +message DeviceStats { + // summary exposes a single summary metric that should be the most + // informative to users. + hashicorp.nomad.plugins.shared.structs.StatValue summary = 1; + + // stats contains the verbose statistics for the device. + hashicorp.nomad.plugins.shared.structs.StatObject stats = 2; + + // timestamp is the time the statistics were collected. + google.protobuf.Timestamp timestamp = 3; +} diff --git a/plugin_interface/device/server.go b/plugin_interface/device/server.go new file mode 100644 index 00000000000..53860782a62 --- /dev/null +++ b/plugin_interface/device/server.go @@ -0,0 +1,123 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "context" + "fmt" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/go-plugin" + + "github.com/hashicorp/nomad/plugin-interface/device/proto" +) + +// devicePluginServer wraps a device plugin and exposes it via gRPC. +type devicePluginServer struct { + broker *plugin.GRPCBroker + impl DevicePlugin +} + +func (d *devicePluginServer) Fingerprint(req *proto.FingerprintRequest, stream proto.DevicePlugin_FingerprintServer) error { + ctx := stream.Context() + outCh, err := d.impl.Fingerprint(ctx) + if err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + case resp, ok := <-outCh: + // The output channel has been closed, end the stream + if !ok { + return nil + } + + // Handle any error + if resp.Error != nil { + return resp.Error + } + + // Convert the devices + out := convertStructDeviceGroups(resp.Devices) + + // Build the response + presp := &proto.FingerprintResponse{ + DeviceGroup: out, + } + + // Send the devices + if err := stream.Send(presp); err != nil { + return err + } + } + } +} + +func (d *devicePluginServer) Reserve(ctx context.Context, req *proto.ReserveRequest) (*proto.ReserveResponse, error) { + resp, err := d.impl.Reserve(req.GetDeviceIds()) + if err != nil { + return nil, err + } + + // Make the response + presp := &proto.ReserveResponse{ + ContainerRes: convertStructContainerReservation(resp), + } + + return presp, nil +} + +func (d *devicePluginServer) Stats(req *proto.StatsRequest, stream proto.DevicePlugin_StatsServer) error { + ctx := stream.Context() + + // Retrieve the collection interval + interval, err := ptypes.Duration(req.CollectionInterval) + if err != nil { + return fmt.Errorf("failed to parse collection interval: %v", err) + } + + // Default the duration if we get an invalid duration + if interval.Nanoseconds() == 0 { + interval = time.Second + } + + outCh, err := d.impl.Stats(ctx, interval) + if err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + case resp, ok := <-outCh: + // The output channel has been closed, end the stream + if !ok { + return nil + } + + // Handle any error + if resp.Error != nil { + return resp.Error + } + + // Convert the devices + out := convertStructDeviceGroupsStats(resp.Groups) + + // Build the response + presp := &proto.StatsResponse{ + Groups: out, + } + + // Send the devices + if err := stream.Send(presp); err != nil { + return err + } + } + } +} diff --git a/plugin_interface/device/util.go b/plugin_interface/device/util.go new file mode 100644 index 00000000000..116187e0f10 --- /dev/null +++ b/plugin_interface/device/util.go @@ -0,0 +1,390 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +import ( + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/nomad/plugin-interface/device/proto" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" +) + +// convertProtoDeviceGroups converts between a list of proto and structs DeviceGroup +func convertProtoDeviceGroups(in []*proto.DeviceGroup) []*DeviceGroup { + if in == nil { + return nil + } + + out := make([]*DeviceGroup, len(in)) + for i, group := range in { + out[i] = convertProtoDeviceGroup(group) + } + + return out +} + +// convertProtoDeviceGroup converts between a proto and structs DeviceGroup +func convertProtoDeviceGroup(in *proto.DeviceGroup) *DeviceGroup { + if in == nil { + return nil + } + + return &DeviceGroup{ + Vendor: in.Vendor, + Type: in.DeviceType, + Name: in.DeviceName, + Devices: convertProtoDevices(in.Devices), + Attributes: structs.ConvertProtoAttributeMap(in.Attributes), + } +} + +// convertProtoDevices converts between a list of proto and structs Device +func convertProtoDevices(in []*proto.DetectedDevice) []*Device { + if in == nil { + return nil + } + + out := make([]*Device, len(in)) + for i, d := range in { + out[i] = convertProtoDevice(d) + } + + return out +} + +// convertProtoDevice converts between a proto and structs Device +func convertProtoDevice(in *proto.DetectedDevice) *Device { + if in == nil { + return nil + } + + return &Device{ + ID: in.ID, + Healthy: in.Healthy, + HealthDesc: in.HealthDescription, + HwLocality: convertProtoDeviceLocality(in.HwLocality), + } +} + +// convertProtoDeviceLocality converts between a proto and structs DeviceLocality +func convertProtoDeviceLocality(in *proto.DeviceLocality) *DeviceLocality { + if in == nil { + return nil + } + + return &DeviceLocality{ + PciBusID: in.PciBusId, + } +} + +// convertProtoContainerReservation is used to convert between a proto and struct +// ContainerReservation +func convertProtoContainerReservation(in *proto.ContainerReservation) *ContainerReservation { + if in == nil { + return nil + } + + return &ContainerReservation{ + Envs: in.Envs, + Mounts: convertProtoMounts(in.Mounts), + Devices: convertProtoDeviceSpecs(in.Devices), + } +} + +// convertProtoMount converts between a list of proto and structs Mount +func convertProtoMounts(in []*proto.Mount) []*Mount { + if in == nil { + return nil + } + + out := make([]*Mount, len(in)) + for i, d := range in { + out[i] = convertProtoMount(d) + } + + return out +} + +// convertProtoMount converts between a proto and structs Mount +func convertProtoMount(in *proto.Mount) *Mount { + if in == nil { + return nil + } + + return &Mount{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + ReadOnly: in.ReadOnly, + } +} + +// convertProtoDeviceSpecs converts between a list of proto and structs DeviceSpecs +func convertProtoDeviceSpecs(in []*proto.DeviceSpec) []*DeviceSpec { + if in == nil { + return nil + } + + out := make([]*DeviceSpec, len(in)) + for i, d := range in { + out[i] = convertProtoDeviceSpec(d) + } + + return out +} + +// convertProtoDeviceSpec converts between a proto and structs DeviceSpec +func convertProtoDeviceSpec(in *proto.DeviceSpec) *DeviceSpec { + if in == nil { + return nil + } + + return &DeviceSpec{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + CgroupPerms: in.Permissions, + } +} + +// convertStructDeviceGroup converts between a list of struct and proto DeviceGroup +func convertStructDeviceGroups(in []*DeviceGroup) []*proto.DeviceGroup { + if in == nil { + return nil + } + + out := make([]*proto.DeviceGroup, len(in)) + for i, g := range in { + out[i] = convertStructDeviceGroup(g) + } + + return out +} + +// convertStructDeviceGroup converts between a struct and proto DeviceGroup +func convertStructDeviceGroup(in *DeviceGroup) *proto.DeviceGroup { + if in == nil { + return nil + } + + return &proto.DeviceGroup{ + Vendor: in.Vendor, + DeviceType: in.Type, + DeviceName: in.Name, + Devices: convertStructDevices(in.Devices), + Attributes: structs.ConvertStructAttributeMap(in.Attributes), + } +} + +// convertStructDevices converts between a list of struct and proto Device +func convertStructDevices(in []*Device) []*proto.DetectedDevice { + if in == nil { + return nil + } + + out := make([]*proto.DetectedDevice, len(in)) + for i, d := range in { + out[i] = convertStructDevice(d) + } + + return out +} + +// convertStructDevice converts between a struct and proto Device +func convertStructDevice(in *Device) *proto.DetectedDevice { + if in == nil { + return nil + } + + return &proto.DetectedDevice{ + ID: in.ID, + Healthy: in.Healthy, + HealthDescription: in.HealthDesc, + HwLocality: convertStructDeviceLocality(in.HwLocality), + } +} + +// convertStructDeviceLocality converts between a struct and proto DeviceLocality +func convertStructDeviceLocality(in *DeviceLocality) *proto.DeviceLocality { + if in == nil { + return nil + } + + return &proto.DeviceLocality{ + PciBusId: in.PciBusID, + } +} + +// convertStructContainerReservation is used to convert between a struct and +// proto ContainerReservation +func convertStructContainerReservation(in *ContainerReservation) *proto.ContainerReservation { + if in == nil { + return nil + } + + return &proto.ContainerReservation{ + Envs: in.Envs, + Mounts: convertStructMounts(in.Mounts), + Devices: convertStructDeviceSpecs(in.Devices), + } +} + +// convertStructMount converts between a list of structs and proto Mount +func convertStructMounts(in []*Mount) []*proto.Mount { + if in == nil { + return nil + } + + out := make([]*proto.Mount, len(in)) + for i, m := range in { + out[i] = convertStructMount(m) + } + + return out +} + +// convertStructMount converts between a struct and proto Mount +func convertStructMount(in *Mount) *proto.Mount { + if in == nil { + return nil + } + + return &proto.Mount{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + ReadOnly: in.ReadOnly, + } +} + +// convertStructDeviceSpecs converts between a list of struct and proto DeviceSpecs +func convertStructDeviceSpecs(in []*DeviceSpec) []*proto.DeviceSpec { + if in == nil { + return nil + } + + out := make([]*proto.DeviceSpec, len(in)) + for i, d := range in { + out[i] = convertStructDeviceSpec(d) + } + + return out +} + +// convertStructDeviceSpec converts between a struct and proto DeviceSpec +func convertStructDeviceSpec(in *DeviceSpec) *proto.DeviceSpec { + if in == nil { + return nil + } + + return &proto.DeviceSpec{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + Permissions: in.CgroupPerms, + } +} + +// convertProtoDeviceGroupsStats converts between a list of struct and proto +// DeviceGroupStats +func convertProtoDeviceGroupsStats(in []*proto.DeviceGroupStats) []*DeviceGroupStats { + if in == nil { + return nil + } + + out := make([]*DeviceGroupStats, len(in)) + for i, m := range in { + out[i] = convertProtoDeviceGroupStats(m) + } + + return out +} + +// convertProtoDeviceGroupStats converts between a proto and struct +// DeviceGroupStats +func convertProtoDeviceGroupStats(in *proto.DeviceGroupStats) *DeviceGroupStats { + if in == nil { + return nil + } + + out := &DeviceGroupStats{ + Vendor: in.Vendor, + Type: in.Type, + Name: in.Name, + InstanceStats: make(map[string]*DeviceStats, len(in.InstanceStats)), + } + + for k, v := range in.InstanceStats { + out.InstanceStats[k] = convertProtoDeviceStats(v) + } + + return out +} + +// convertProtoDeviceStats converts between a proto and struct DeviceStats +func convertProtoDeviceStats(in *proto.DeviceStats) *DeviceStats { + if in == nil { + return nil + } + + ts, err := ptypes.Timestamp(in.Timestamp) + if err != nil { + return nil + } + + return &DeviceStats{ + Summary: structs.ConvertProtoStatValue(in.Summary), + Stats: structs.ConvertProtoStatObject(in.Stats), + Timestamp: ts, + } +} + +// convertStructDeviceGroupsStats converts between a list of struct and proto +// DeviceGroupStats +func convertStructDeviceGroupsStats(in []*DeviceGroupStats) []*proto.DeviceGroupStats { + if in == nil { + return nil + } + + out := make([]*proto.DeviceGroupStats, len(in)) + for i, m := range in { + out[i] = convertStructDeviceGroupStats(m) + } + + return out +} + +// convertStructDeviceGroupStats converts between a struct and proto +// DeviceGroupStats +func convertStructDeviceGroupStats(in *DeviceGroupStats) *proto.DeviceGroupStats { + if in == nil { + return nil + } + + out := &proto.DeviceGroupStats{ + Vendor: in.Vendor, + Type: in.Type, + Name: in.Name, + InstanceStats: make(map[string]*proto.DeviceStats, len(in.InstanceStats)), + } + + for k, v := range in.InstanceStats { + out.InstanceStats[k] = convertStructDeviceStats(v) + } + + return out +} + +// convertStructDeviceStats converts between a struct and proto DeviceStats +func convertStructDeviceStats(in *DeviceStats) *proto.DeviceStats { + if in == nil { + return nil + } + + ts, err := ptypes.TimestampProto(in.Timestamp) + if err != nil { + return nil + } + + return &proto.DeviceStats{ + Summary: structs.ConvertStructStatValue(in.Summary), + Stats: structs.ConvertStructStatObject(in.Stats), + Timestamp: ts, + } +} diff --git a/plugin_interface/device/versions.go b/plugin_interface/device/versions.go new file mode 100644 index 00000000000..bb2f0a3e25b --- /dev/null +++ b/plugin_interface/device/versions.go @@ -0,0 +1,9 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package device + +const ( + // ApiVersion010 is the initial API version for the device plugins + ApiVersion010 = "v0.1.0" +) diff --git a/plugin_interface/drivers/client.go b/plugin_interface/drivers/client.go new file mode 100644 index 00000000000..b3c4103f709 --- /dev/null +++ b/plugin_interface/drivers/client.go @@ -0,0 +1,519 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + "errors" + "io" + "time" + + "github.com/LK4D4/joincontext" + "github.com/golang/protobuf/ptypes" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/drivers/fsisolation" + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" + pstructs "github.com/hashicorp/nomad/plugin-interface/shared/structs" + sproto "github.com/hashicorp/nomad/plugin-interface/shared/structs/proto" + "google.golang.org/grpc/status" +) + +var _ DriverPlugin = &driverPluginClient{} + +type driverPluginClient struct { + *base.BasePluginClient + + client proto.DriverClient + logger hclog.Logger + + // doneCtx is closed when the plugin exits + doneCtx context.Context +} + +func (d *driverPluginClient) TaskConfigSchema() (*hclspec.Spec, error) { + req := &proto.TaskConfigSchemaRequest{} + + resp, err := d.client.TaskConfigSchema(d.doneCtx, req) + if err != nil { + return nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + return resp.Spec, nil +} + +func (d *driverPluginClient) Capabilities() (*Capabilities, error) { + req := &proto.CapabilitiesRequest{} + + resp, err := d.client.Capabilities(d.doneCtx, req) + if err != nil { + return nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + caps := &Capabilities{} + if resp.Capabilities != nil { + caps.SendSignals = resp.Capabilities.SendSignals + caps.Exec = resp.Capabilities.Exec + caps.MustInitiateNetwork = resp.Capabilities.MustCreateNetwork + + for _, mode := range resp.Capabilities.NetworkIsolationModes { + caps.NetIsolationModes = append(caps.NetIsolationModes, netIsolationModeFromProto(mode)) + } + + switch resp.Capabilities.FsIsolation { + case proto.DriverCapabilities_NONE: + caps.FSIsolation = fsisolation.None + case proto.DriverCapabilities_CHROOT: + caps.FSIsolation = fsisolation.Chroot + case proto.DriverCapabilities_IMAGE: + caps.FSIsolation = fsisolation.Image + case proto.DriverCapabilities_UNVEIL: + caps.FSIsolation = fsisolation.Unveil + default: + caps.FSIsolation = fsisolation.None + } + + caps.MountConfigs = MountConfigSupport(resp.Capabilities.MountConfigs) + caps.DisableLogCollection = resp.Capabilities.DisableLogCollection + caps.DynamicWorkloadUsers = resp.Capabilities.DynamicWorkloadUsers + } + + return caps, nil +} + +// Fingerprint the driver, return a chan that will be pushed to periodically and on changes to health +func (d *driverPluginClient) Fingerprint(ctx context.Context) (<-chan *Fingerprint, error) { + req := &proto.FingerprintRequest{} + + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + stream, err := d.client.Fingerprint(joinedCtx, req) + if err != nil { + return nil, helper.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + ch := make(chan *Fingerprint, 1) + go d.handleFingerprint(ctx, ch, stream) + + return ch, nil +} + +func (d *driverPluginClient) handleFingerprint(reqCtx context.Context, ch chan *Fingerprint, stream proto.Driver_FingerprintClient) { + defer close(ch) + for { + pb, err := stream.Recv() + if err != nil { + if err != io.EOF { + ch <- &Fingerprint{ + Err: helper.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + f := &Fingerprint{ + Attributes: pstructs.ConvertProtoAttributeMap(pb.Attributes), + Health: healthStateFromProto(pb.Health), + HealthDescription: pb.HealthDescription, + } + + if pb.Err != "" { + f.Err = errors.New(pb.Err) + } + + select { + case <-reqCtx.Done(): + return + case ch <- f: + } + } +} + +// RecoverTask does internal state recovery to be able to control the task of +// the given TaskHandle +func (d *driverPluginClient) RecoverTask(h *TaskHandle) error { + req := &proto.RecoverTaskRequest{Handle: taskHandleToProto(h)} + + _, err := d.client.RecoverTask(d.doneCtx, req) + return helper.HandleGrpcErr(err, d.doneCtx) +} + +// StartTask starts execution of a task with the given TaskConfig. A TaskHandle +// is returned to the caller that can be used to recover state of the task, +// should the driver crash or exit prematurely. +func (d *driverPluginClient) StartTask(c *TaskConfig) (*TaskHandle, *DriverNetwork, error) { + req := &proto.StartTaskRequest{ + Task: taskConfigToProto(c), + } + + resp, err := d.client.StartTask(d.doneCtx, req) + if err != nil { + st := status.Convert(err) + if len(st.Details()) > 0 { + if rec, ok := st.Details()[0].(*sproto.RecoverableError); ok { + return nil, nil, structs.NewRecoverableError(err, rec.Recoverable) + } + } + return nil, nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + var net *DriverNetwork + if resp.NetworkOverride != nil { + net = &DriverNetwork{ + PortMap: map[string]int{}, + IP: resp.NetworkOverride.Addr, + AutoAdvertise: resp.NetworkOverride.AutoAdvertise, + } + for k, v := range resp.NetworkOverride.PortMap { + net.PortMap[k] = int(v) + } + } + + return taskHandleFromProto(resp.Handle), net, nil +} + +// WaitTask returns a channel that will have an ExitResult pushed to it once when the task +// exits on its own or is killed. If WaitTask is called after the task has exited, the channel +// will immedialy return the ExitResult. WaitTask can be called multiple times for +// the same task without issue. +func (d *driverPluginClient) WaitTask(ctx context.Context, id string) (<-chan *ExitResult, error) { + ch := make(chan *ExitResult) + go d.handleWaitTask(ctx, id, ch) + return ch, nil +} + +func (d *driverPluginClient) handleWaitTask(ctx context.Context, id string, ch chan *ExitResult) { + defer close(ch) + var result ExitResult + req := &proto.WaitTaskRequest{ + TaskId: id, + } + + // Join the passed context and the shutdown context + joinedCtx, joinedCtxCancel := joincontext.Join(ctx, d.doneCtx) + defer joinedCtxCancel() + + resp, err := d.client.WaitTask(joinedCtx, req) + if err != nil { + result.Err = helper.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } else { + // Set result values if the result is provided in the + // response. It is possible only the Err value is set + // in the response (channel close error). + if resp.Result != nil { + result.ExitCode = int(resp.Result.ExitCode) + result.Signal = int(resp.Result.Signal) + result.OOMKilled = resp.Result.OomKilled + } + if len(resp.Err) > 0 { + result.Err = errors.New(resp.Err) + } + } + ch <- &result +} + +// StopTask stops the task with the given taskID. A timeout and signal can be +// given to control a graceful termination of the task. The driver will send the +// given signal to the task and wait for the given timeout for it to exit. If the +// task does not exit within the timeout it will be forcefully killed. +func (d *driverPluginClient) StopTask(taskID string, timeout time.Duration, signal string) error { + req := &proto.StopTaskRequest{ + TaskId: taskID, + Timeout: ptypes.DurationProto(timeout), + Signal: signal, + } + + _, err := d.client.StopTask(d.doneCtx, req) + return helper.HandleGrpcErr(err, d.doneCtx) +} + +// DestroyTask removes the task from the driver's in memory state. The task +// cannot be running unless force is set to true. If force is set to true the +// driver will forcefully terminate the task before removing it. +func (d *driverPluginClient) DestroyTask(taskID string, force bool) error { + req := &proto.DestroyTaskRequest{ + TaskId: taskID, + Force: force, + } + + _, err := d.client.DestroyTask(d.doneCtx, req) + return helper.HandleGrpcErr(err, d.doneCtx) +} + +// InspectTask returns status information for a task +func (d *driverPluginClient) InspectTask(taskID string) (*TaskStatus, error) { + req := &proto.InspectTaskRequest{TaskId: taskID} + + resp, err := d.client.InspectTask(d.doneCtx, req) + if err != nil { + return nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + status, err := taskStatusFromProto(resp.Task) + if err != nil { + return nil, err + } + + if resp.Driver != nil { + status.DriverAttributes = resp.Driver.Attributes + } + if resp.NetworkOverride != nil { + status.NetworkOverride = &DriverNetwork{ + PortMap: map[string]int{}, + IP: resp.NetworkOverride.Addr, + AutoAdvertise: resp.NetworkOverride.AutoAdvertise, + } + for k, v := range resp.NetworkOverride.PortMap { + status.NetworkOverride.PortMap[k] = int(v) + } + } + + return status, nil +} + +// TaskStats returns resource usage statistics for the task +func (d *driverPluginClient) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *TaskResourceUsage, error) { + req := &proto.TaskStatsRequest{ + TaskId: taskID, + CollectionInterval: ptypes.DurationProto(interval), + } + ctx, _ = joincontext.Join(ctx, d.doneCtx) + stream, err := d.client.TaskStats(ctx, req) + if err != nil { + st := status.Convert(err) + if len(st.Details()) > 0 { + if rec, ok := st.Details()[0].(*sproto.RecoverableError); ok { + return nil, structs.NewRecoverableError(err, rec.Recoverable) + } + } + return nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + ch := make(chan *TaskResourceUsage, 1) + go d.handleStats(ctx, ch, stream) + + return ch, nil +} + +func (d *driverPluginClient) handleStats(ctx context.Context, ch chan<- *TaskResourceUsage, stream proto.Driver_TaskStatsClient) { + defer close(ch) + for { + resp, err := stream.Recv() + if ctx.Err() != nil { + // Context canceled; exit gracefully + return + } + + if err != nil { + if err != io.EOF { + d.logger.Error("error receiving stream from TaskStats driver RPC, closing stream", "error", err) + } + + // End of stream + return + } + + stats, err := TaskStatsFromProto(resp.Stats) + if err != nil { + d.logger.Error("failed to decode stats from RPC", "error", err, "stats", resp.Stats) + continue + } + + select { + case ch <- stats: + case <-ctx.Done(): + return + } + } +} + +// TaskEvents returns a channel that will receive events from the driver about all +// tasks such as lifecycle events, terminal errors, etc. +func (d *driverPluginClient) TaskEvents(ctx context.Context) (<-chan *TaskEvent, error) { + req := &proto.TaskEventsRequest{} + + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + stream, err := d.client.TaskEvents(joinedCtx, req) + if err != nil { + return nil, helper.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + ch := make(chan *TaskEvent, 1) + go d.handleTaskEvents(ctx, ch, stream) + return ch, nil +} + +func (d *driverPluginClient) handleTaskEvents(reqCtx context.Context, ch chan *TaskEvent, stream proto.Driver_TaskEventsClient) { + defer close(ch) + for { + ev, err := stream.Recv() + if err != nil { + if err != io.EOF { + ch <- &TaskEvent{ + Err: helper.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + timestamp, _ := ptypes.Timestamp(ev.Timestamp) + event := &TaskEvent{ + TaskID: ev.TaskId, + AllocID: ev.AllocId, + TaskName: ev.TaskName, + Annotations: ev.Annotations, + Message: ev.Message, + Timestamp: timestamp, + } + select { + case <-reqCtx.Done(): + return + case ch <- event: + } + } +} + +// SignalTask will send the given signal to the specified task +func (d *driverPluginClient) SignalTask(taskID string, signal string) error { + req := &proto.SignalTaskRequest{ + TaskId: taskID, + Signal: signal, + } + _, err := d.client.SignalTask(d.doneCtx, req) + return helper.HandleGrpcErr(err, d.doneCtx) +} + +// ExecTask will run the given command within the execution context of the task. +// The driver will wait for the given timeout for the command to complete before +// terminating it. The stdout and stderr of the command will be return to the caller, +// along with other exit information such as exit code. +func (d *driverPluginClient) ExecTask(taskID string, cmd []string, timeout time.Duration) (*ExecTaskResult, error) { + req := &proto.ExecTaskRequest{ + TaskId: taskID, + Command: cmd, + Timeout: ptypes.DurationProto(timeout), + } + + resp, err := d.client.ExecTask(d.doneCtx, req) + if err != nil { + return nil, helper.HandleGrpcErr(err, d.doneCtx) + } + + result := &ExecTaskResult{ + Stdout: resp.Stdout, + Stderr: resp.Stderr, + ExitResult: exitResultFromProto(resp.Result), + } + + return result, nil +} + +var _ ExecTaskStreamingRawDriver = (*driverPluginClient)(nil) + +func (d *driverPluginClient) ExecTaskStreamingRaw(ctx context.Context, + taskID string, + command []string, + tty bool, + execStream ExecTaskStream) error { + + stream, err := d.client.ExecTaskStreaming(ctx) + if err != nil { + return helper.HandleGrpcErr(err, d.doneCtx) + } + + err = stream.Send(&proto.ExecTaskStreamingRequest{ + Setup: &proto.ExecTaskStreamingRequest_Setup{ + TaskId: taskID, + Command: command, + Tty: tty, + }, + }) + if err != nil { + return helper.HandleGrpcErr(err, d.doneCtx) + } + + errCh := make(chan error, 1) + + go func() { + for { + m, err := execStream.Recv() + if err == io.EOF { + return + } else if err != nil { + errCh <- err + return + } + + if err := stream.Send(m); err != nil { + errCh <- err + return + } + + } + }() + + for { + select { + case err := <-errCh: + return err + default: + } + + m, err := stream.Recv() + if err == io.EOF { + // Once we get to the end of stream successfully, we can ignore errCh: + // e.g. input write failures after process terminates shouldn't cause method to fail + return nil + } else if err != nil { + return err + } + + if err := execStream.Send(m); err != nil { + return err + } + } +} + +var _ DriverNetworkManager = (*driverPluginClient)(nil) + +func (d *driverPluginClient) CreateNetwork(allocID string, _ *NetworkCreateRequest) (*NetworkIsolationSpec, bool, error) { + req := &proto.CreateNetworkRequest{ + AllocId: allocID, + } + + resp, err := d.client.CreateNetwork(d.doneCtx, req) + if err != nil { + return nil, false, helper.HandleGrpcErr(err, d.doneCtx) + } + + return NetworkIsolationSpecFromProto(resp.IsolationSpec), resp.Created, nil +} + +func (d *driverPluginClient) DestroyNetwork(allocID string, spec *NetworkIsolationSpec) error { + if spec == nil { + return nil + } + + req := &proto.DestroyNetworkRequest{ + AllocId: allocID, + IsolationSpec: NetworkIsolationSpecToProto(spec), + } + + _, err := d.client.DestroyNetwork(d.doneCtx, req) + if err != nil { + return helper.HandleGrpcErr(err, d.doneCtx) + } + + return nil +} diff --git a/plugin_interface/drivers/cstructs.go b/plugin_interface/drivers/cstructs.go new file mode 100644 index 00000000000..76844e31f19 --- /dev/null +++ b/plugin_interface/drivers/cstructs.go @@ -0,0 +1,32 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( +// cstructs "github.com/hashicorp/nomad/client/structs" +) + +// This files present an indirection layer to client structs used by drivers, +// and represent the public interface for drivers, as client interfaces are +// internal and subject to change. + +// MemoryStats holds memory usage related stats +// type MemoryStats = cstructs.MemoryStats + +// CpuStats holds cpu usage related stats +// type CpuStats = cstructs.CpuStats + +// ResourceUsage holds information related to cpu and memory stats +// type ResourceUsage = cstructs.ResourceUsage + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +// type TaskResourceUsage = cstructs.TaskResourceUsage + +// CheckBufSize is the size of the buffer that is used for job output +// const CheckBufSize = cstructs.CheckBufSize + +// DriverStatsNotImplemented is the error to be returned if a driver doesn't +// implement stats. +// var DriverStatsNotImplemented = cstructs.DriverStatsNotImplemented diff --git a/plugin_interface/drivers/driver.go b/plugin_interface/drivers/driver.go new file mode 100644 index 00000000000..6c20ca05db4 --- /dev/null +++ b/plugin_interface/drivers/driver.go @@ -0,0 +1,631 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + "crypto/md5" + "fmt" + "io" + "maps" + "path/filepath" + "sort" + "strconv" + "time" + + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/drivers/filesystem" + "github.com/hashicorp/nomad/plugin-interface/drivers/fsisolation" + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + pstructs "github.com/hashicorp/nomad/plugin-interface/shared/structs" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" +) + +const ( + // DriverHealthy is the default health description that should be used + // if the driver is nominal + DriverHealthy = "Healthy" + + // Pre09TaskHandleVersion is the version used to identify that the task + // handle is from a driver that existed before driver plugins (v0.9). The + // driver should take appropriate action to handle the old driver state. + Pre09TaskHandleVersion = 0 + + // DetachSignal is a special signal sent to remote task drivers when a + // task should be detached instead of killed. This allows a remote task + // to be left running and transferred to a replacement allocation in + // cases like down or drained nodes causing the original allocation to + // be terminal. + DetachSignal = "DETACH" +) + +// DriverPlugin is the interface with drivers will implement. It is also +// implemented by a plugin client which proxies the calls to go-plugin. See +// the proto/driver.proto file for detailed information about each RPC and +// message structure. +type DriverPlugin interface { + base.BasePlugin + + TaskConfigSchema() (*hclspec.Spec, error) + Capabilities() (*Capabilities, error) + Fingerprint(context.Context) (<-chan *Fingerprint, error) + + RecoverTask(*TaskHandle) error + StartTask(*TaskConfig) (*TaskHandle, *DriverNetwork, error) + WaitTask(ctx context.Context, taskID string) (<-chan *ExitResult, error) + StopTask(taskID string, timeout time.Duration, signal string) error + DestroyTask(taskID string, force bool) error + InspectTask(taskID string) (*TaskStatus, error) + TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *TaskResourceUsage, error) + TaskEvents(context.Context) (<-chan *TaskEvent, error) + + SignalTask(taskID string, signal string) error + ExecTask(taskID string, cmd []string, timeout time.Duration) (*ExecTaskResult, error) +} + +// ExecTaskStreamingDriver marks that a driver supports streaming exec task. This represents a user friendly +// interface to implement, as an alternative to the ExecTaskStreamingRawDriver, the low level interface. +type ExecTaskStreamingDriver interface { + ExecTaskStreaming(ctx context.Context, taskID string, execOptions *ExecOptions) (*ExitResult, error) +} + +type ExecOptions struct { + // Command is command to run + Command []string + + // Tty indicates whether pseudo-terminal is to be allocated + Tty bool + + // streams + Stdin io.ReadCloser + Stdout io.WriteCloser + Stderr io.WriteCloser + + // terminal size channel + ResizeCh <-chan TerminalSize +} + +// DriverNetworkManager is the interface with exposes function for creating a +// network namespace for which tasks can join. This only needs to be implemented +// if the driver MUST create the network namespace +type DriverNetworkManager interface { + CreateNetwork(allocID string, request *NetworkCreateRequest) (*NetworkIsolationSpec, bool, error) + DestroyNetwork(allocID string, spec *NetworkIsolationSpec) error +} + +// DriverSignalTaskNotSupported can be embedded by drivers which don't support +// the SignalTask RPC. This satisfies the SignalTask func requirement for the +// DriverPlugin interface. +type DriverSignalTaskNotSupported struct{} + +func (DriverSignalTaskNotSupported) SignalTask(taskID, signal string) error { + return fmt.Errorf("SignalTask is not supported by this driver") +} + +// DriverExecTaskNotSupported can be embedded by drivers which don't support +// the ExecTask RPC. This satisfies the ExecTask func requirement of the +// DriverPlugin interface. +type DriverExecTaskNotSupported struct{} + +func (_ DriverExecTaskNotSupported) ExecTask(taskID string, cmd []string, timeout time.Duration) (*ExecTaskResult, error) { + return nil, fmt.Errorf("ExecTask is not supported by this driver") +} + +type HealthState string + +var ( + HealthStateUndetected = HealthState("undetected") + HealthStateUnhealthy = HealthState("unhealthy") + HealthStateHealthy = HealthState("healthy") +) + +type Fingerprint struct { + Attributes map[string]*pstructs.Attribute + Health HealthState + HealthDescription string + + // Err is set by the plugin if an error occurred during fingerprinting + Err error +} + +// Deprecated: use fsisolation.Mode instead. +type FSIsolation = fsisolation.Mode + +var ( + // Deprecated: use fsisolation.None instead. + FSIsolationNone = fsisolation.None + + // Deprecated: use fsisolation.Chroot instead. + FSIsolationChroot = fsisolation.Chroot + + // Deprecated: use fsisolation.Image instead. + FSIsolationImage = fsisolation.Image +) + +type Capabilities struct { + // SendSignals marks the driver as being able to send signals + SendSignals bool + + // Exec marks the driver as being able to execute arbitrary commands + // such as health checks. Used by the ScriptExecutor interface. + Exec bool + + //FSIsolation indicates what kind of filesystem isolation the driver supports. + FSIsolation fsisolation.Mode + + //NetIsolationModes lists the set of isolation modes supported by the driver + NetIsolationModes []NetIsolationMode + + // MustInitiateNetwork tells Nomad that the driver must create the network + // namespace and that the CreateNetwork and DestroyNetwork RPCs are implemented. + MustInitiateNetwork bool + + // MountConfigs tells Nomad which mounting config options the driver supports. + MountConfigs MountConfigSupport + + // DisableLogCollection indicates this driver has disabled log collection + // and the client should not start a logmon process. + DisableLogCollection bool + + // DynamicWorkloadUsers indicates this driver is capable (but not required) + // of making use of UID/GID not backed by a user known to the operating system. + // The allocation of a unique, not-in-use UID/GID is managed by Nomad client + // ensuring no overlap. + DynamicWorkloadUsers bool +} + +func (c *Capabilities) HasNetIsolationMode(m NetIsolationMode) bool { + for _, mode := range c.NetIsolationModes { + if mode == m { + return true + } + } + return false +} + +type NetIsolationMode string + +var ( + // NetIsolationModeHost disables network isolation and uses the host network + NetIsolationModeHost = NetIsolationMode("host") + + // NetIsolationModeGroup uses the group network namespace for isolation + NetIsolationModeGroup = NetIsolationMode("group") + + // NetIsolationModeTask isolates the network to just the task + NetIsolationModeTask = NetIsolationMode("task") + + // NetIsolationModeNone indicates that there is no network to isolate and is + // intended to be used for tasks that the client manages remotely + NetIsolationModeNone = NetIsolationMode("none") +) + +type NetworkIsolationSpec struct { + Mode NetIsolationMode + Path string + Labels map[string]string + HostsConfig *HostsConfig +} + +type HostsConfig struct { + Hostname string + Address string +} + +// NetworkCreateRequest contains all the relevant information when creating a +// network via DriverNetworkManager.CreateNetwork. +type NetworkCreateRequest struct { + + // Hostname is the hostname the user has specified that the network should + // be configured with. + Hostname string +} + +// MountConfigSupport is an enum that defaults to "all" for backwards +// compatibility with community drivers. +type MountConfigSupport int32 + +const ( + MountConfigSupportAll MountConfigSupport = iota + MountConfigSupportNone +) + +type TerminalSize struct { + Height int + Width int +} + +type DNSConfig struct { + Servers []string + Searches []string + Options []string +} + +func (c *DNSConfig) Copy() *DNSConfig { + if c == nil { + return nil + } + + cfg := new(DNSConfig) + if len(c.Servers) > 0 { + cfg.Servers = make([]string, len(c.Servers)) + copy(cfg.Servers, c.Servers) + } + if len(c.Searches) > 0 { + cfg.Searches = make([]string, len(c.Searches)) + copy(cfg.Searches, c.Searches) + } + if len(c.Options) > 0 { + cfg.Options = make([]string, len(c.Options)) + copy(cfg.Options, c.Options) + } + + return cfg +} + +type TaskConfig struct { + ID string + JobName string + JobID string + TaskGroupName string + ParentJobID string + Name string // task.Name + Namespace string + NodeName string + NodeID string + Env map[string]string + DeviceEnv map[string]string + Resources *Resources + Devices []*DeviceConfig + Mounts []*MountConfig + User string + AllocDir string + rawDriverConfig []byte + StdoutPath string + StderrPath string + AllocID string + NetworkIsolation *NetworkIsolationSpec + DNS *DNSConfig +} + +func (tc *TaskConfig) Copy() *TaskConfig { + if tc == nil { + return nil + } + c := new(TaskConfig) + *c = *tc + c.Env = maps.Clone(c.Env) + c.DeviceEnv = maps.Clone(c.DeviceEnv) + c.Resources = tc.Resources.Copy() + c.DNS = tc.DNS.Copy() + + if c.Devices != nil { + dc := make([]*DeviceConfig, len(c.Devices)) + for i, c := range c.Devices { + dc[i] = c.Copy() + } + c.Devices = dc + } + + if c.Mounts != nil { + mc := make([]*MountConfig, len(c.Mounts)) + for i, m := range c.Mounts { + mc[i] = m.Copy() + } + c.Mounts = mc + } + + return c +} + +func (tc *TaskConfig) EnvList() []string { + l := make([]string, 0, len(tc.Env)) + for k, v := range tc.Env { + l = append(l, k+"="+v) + } + + sort.Strings(l) + return l +} + +func (tc *TaskConfig) TaskDir() *filesystem.TaskDir { + taskDir := filepath.Join(tc.AllocDir, tc.Name) + return &filesystem.TaskDir{ + Dir: taskDir, + SharedAllocDir: filepath.Join(tc.AllocDir, filesystem.SharedAllocName), + LogDir: filepath.Join(tc.AllocDir, filesystem.SharedAllocName, filesystem.LogDirName), + SharedTaskDir: filepath.Join(taskDir, filesystem.SharedAllocName), + LocalDir: filepath.Join(taskDir, filesystem.TaskLocal), + SecretsDir: filepath.Join(taskDir, filesystem.TaskSecrets), + } +} + +func (tc *TaskConfig) DecodeDriverConfig(t interface{}) error { + return base.MsgPackDecode(tc.rawDriverConfig, t) +} + +func (tc *TaskConfig) EncodeDriverConfig(val cty.Value) error { + data, err := msgpack.Marshal(val, val.Type()) + if err != nil { + return err + } + + tc.rawDriverConfig = data + return nil +} + +func (tc *TaskConfig) EncodeConcreteDriverConfig(t interface{}) error { + data := []byte{} + err := base.MsgPackEncode(&data, t) + if err != nil { + return err + } + + tc.rawDriverConfig = data + return nil +} + +type MemoryResources = AllocatedMemoryResources + +type Resources struct { + NomadResources *AllocatedTaskResources + LinuxResources *LinuxResources + Ports *AllocatedPorts +} + +func (r *Resources) Copy() *Resources { + if r == nil { + return nil + } + res := new(Resources) + if r.NomadResources != nil { + res.NomadResources = r.NomadResources.Copy() + } + if r.LinuxResources != nil { + res.LinuxResources = r.LinuxResources.Copy() + } + + if r.Ports != nil { + ports := AllocatedPorts(append(make([]AllocatedPortMapping, 0, len(*r.Ports)), *r.Ports...)) + res.Ports = &ports + } + return res +} + +type LinuxResources struct { + CPUPeriod int64 + CPUQuota int64 + CPUShares int64 + MemoryLimitBytes int64 + + // OOMScoreAdj field in LinuxResources is never used and left for + // compatibility reasons. Docker, raw_exec and exec2 drivers allow tasks to + // set per-task oom_score_adj values using their own TaskConfig OOMScoreAdj + // fields + OOMScoreAdj int64 + + CpusetCpus string + CpusetCgroupPath string + + // PrecentTicks is used to calculate the CPUQuota, currently the docker + // driver exposes cpu period and quota through the driver configuration + // and thus the calculation for CPUQuota cannot be done on the client. + // This is a capatability and should only be used by docker until the docker + // specific options are deprecated in favor of exposes CPUPeriod and + // CPUQuota at the task resource block. + PercentTicks float64 +} + +func (r *LinuxResources) Copy() *LinuxResources { + res := new(LinuxResources) + *res = *r + return res +} + +type DeviceConfig struct { + TaskPath string + HostPath string + Permissions string +} + +func (d *DeviceConfig) Copy() *DeviceConfig { + if d == nil { + return nil + } + + dc := new(DeviceConfig) + *dc = *d + return dc +} + +type MountConfig struct { + TaskPath string + HostPath string + Readonly bool + PropagationMode string + SELinuxLabel string +} + +func (m *MountConfig) IsEqual(o *MountConfig) bool { + return m.TaskPath == o.TaskPath && + m.HostPath == o.HostPath && + m.Readonly == o.Readonly && + m.PropagationMode == o.PropagationMode && + m.SELinuxLabel == o.SELinuxLabel +} + +func (m *MountConfig) Copy() *MountConfig { + if m == nil { + return nil + } + + mc := new(MountConfig) + *mc = *m + return mc +} + +const ( + TaskStateUnknown TaskState = "unknown" + TaskStateRunning TaskState = "running" + TaskStateExited TaskState = "exited" +) + +type TaskState string + +type ExitResult struct { + ExitCode int + Signal int + OOMKilled bool + Err error +} + +func (r *ExitResult) Successful() bool { + return r.ExitCode == 0 && r.Signal == 0 && r.Err == nil +} + +func (r *ExitResult) Copy() *ExitResult { + if r == nil { + return nil + } + res := new(ExitResult) + *res = *r + return res +} + +type TaskStatus struct { + ID string + Name string + State TaskState + StartedAt time.Time + CompletedAt time.Time + ExitResult *ExitResult + DriverAttributes map[string]string + NetworkOverride *DriverNetwork +} + +type TaskEvent struct { + TaskID string + TaskName string + AllocID string + Timestamp time.Time + Message string + Annotations map[string]string + + // Err is only used if an error occurred while consuming the RPC stream + Err error +} + +type ExecTaskResult struct { + Stdout []byte + Stderr []byte + ExitResult *ExitResult +} + +// DriverNetwork is the network created by driver's (eg Docker's bridge +// network) during Prestart. +type DriverNetwork struct { + // PortMap can be set by drivers to replace ports in environment + // variables with driver-specific mappings. + PortMap map[string]int + + // IP is the IP address for the task created by the driver. + IP string + + // AutoAdvertise indicates whether the driver thinks services that + // choose to auto-advertise-addresses should use this IP instead of the + // host's. eg If a Docker network plugin is used + AutoAdvertise bool +} + +// Advertise returns true if the driver suggests using the IP set. May be +// called on a nil Network in which case it returns false. +func (d *DriverNetwork) Advertise() bool { + return d != nil && d.AutoAdvertise +} + +// Copy a DriverNetwork struct. If it is nil, nil is returned. +func (d *DriverNetwork) Copy() *DriverNetwork { + if d == nil { + return nil + } + pm := make(map[string]int, len(d.PortMap)) + for k, v := range d.PortMap { + pm[k] = v + } + return &DriverNetwork{ + PortMap: pm, + IP: d.IP, + AutoAdvertise: d.AutoAdvertise, + } +} + +// Hash the contents of a DriverNetwork struct to detect changes. If it is nil, +// an empty slice is returned. +func (d *DriverNetwork) Hash() []byte { + if d == nil { + return []byte{} + } + h := md5.New() + io.WriteString(h, d.IP) + io.WriteString(h, strconv.FormatBool(d.AutoAdvertise)) + for k, v := range d.PortMap { + io.WriteString(h, k) + io.WriteString(h, strconv.Itoa(v)) + } + return h.Sum(nil) +} + +//// helper types for operating on raw exec operation +// we alias proto instances as much as possible to avoid conversion overhead + +// ExecTaskStreamingRawDriver represents a low-level interface for executing a streaming exec +// call, and is intended to be used when driver instance is to delegate exec handling to another +// backend, e.g. to a executor or a driver behind a grpc/rpc protocol +// +// Nomad client would prefer this interface method over `ExecTaskStreaming` if driver implements it. +type ExecTaskStreamingRawDriver interface { + ExecTaskStreamingRaw( + ctx context.Context, + taskID string, + command []string, + tty bool, + stream ExecTaskStream) error +} + +// ExecTaskStream represents a stream of exec streaming messages, +// and is a handle to get stdin and tty size and send back +// stdout/stderr and exit operations. +// +// The methods are not concurrent safe; callers must ensure that methods are called +// from at most one goroutine. +type ExecTaskStream interface { + // Send relays response message back to API. + // + // The call is synchronous and no references to message is held: once + // method call completes, the message reference can be reused or freed. + Send(*ExecTaskStreamingResponseMsg) error + + // Receive exec streaming messages from API. Returns `io.EOF` on completion of stream. + Recv() (*ExecTaskStreamingRequestMsg, error) +} + +type ExecTaskStreamingRequestMsg = proto.ExecTaskStreamingRequest +type ExecTaskStreamingResponseMsg = proto.ExecTaskStreamingResponse + +// InternalCapabilitiesDriver is an experimental interface enabling a driver +// to disable some nomad functionality (e.g. logs or metrics). +// +// Intended for internal drivers only while the interface is stabalized. +type InternalCapabilitiesDriver interface { + InternalCapabilities() InternalCapabilities +} + +// InternalCapabilities flags disabled functionality. +// Zero value means all is supported. +type InternalCapabilities struct { + DisableLogCollection bool + DisableMetricsCollection bool +} diff --git a/plugin_interface/drivers/errors.go b/plugin_interface/drivers/errors.go new file mode 100644 index 00000000000..1fc082ff63e --- /dev/null +++ b/plugin_interface/drivers/errors.go @@ -0,0 +1,16 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import "errors" + +var ErrTaskNotFound = errors.New("task not found for given id") + +var ErrChannelClosed = errors.New("channel closed") + +var DriverRequiresRootMessage = "Driver must run as root" + +var NoCgroupMountMessage = "Failed to discover cgroup mount point" + +var CgroupMountEmpty = "Cgroup mount point unavailable" diff --git a/plugin_interface/drivers/execstreaming.go b/plugin_interface/drivers/execstreaming.go new file mode 100644 index 00000000000..adac646a7af --- /dev/null +++ b/plugin_interface/drivers/execstreaming.go @@ -0,0 +1,188 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + "fmt" + "io" + "sync" + + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" +) + +// StreamToExecOptions is a convenience method to convert exec stream into +// ExecOptions object. +func StreamToExecOptions( + ctx context.Context, + command []string, + tty bool, + stream ExecTaskStream) (*ExecOptions, <-chan error) { + + inReader, inWriter := io.Pipe() + outReader, outWriter := io.Pipe() + errReader, errWriter := io.Pipe() + resize := make(chan TerminalSize, 2) + + errCh := make(chan error, 3) + + // handle input + go func() { + for { + msg, err := stream.Recv() + if err == io.EOF { + return + } else if err != nil { + errCh <- err + return + } + + if msg.Stdin != nil && !msg.Stdin.Close { + _, err := inWriter.Write(msg.Stdin.Data) + if err != nil { + errCh <- err + return + } + } else if msg.Stdin != nil && msg.Stdin.Close { + inWriter.Close() + } else if msg.TtySize != nil { + select { + case resize <- TerminalSize{ + Height: int(msg.TtySize.Height), + Width: int(msg.TtySize.Width), + }: + case <-ctx.Done(): + // process terminated before resize is processed + return + } + } else if isHeartbeat(msg) { + // do nothing + } else { + errCh <- fmt.Errorf("unexpected message type: %#v", msg) + } + } + }() + + var sendLock sync.Mutex + send := func(v *ExecTaskStreamingResponseMsg) error { + sendLock.Lock() + defer sendLock.Unlock() + + return stream.Send(v) + } + + var outWg sync.WaitGroup + outWg.Add(2) + // handle Stdout + go func() { + defer outWg.Done() + + reader := outReader + bytes := make([]byte, 1024) + msg := &ExecTaskStreamingResponseMsg{Stdout: &proto.ExecTaskStreamingIOOperation{}} + + for { + n, err := reader.Read(bytes) + // always send data if we read some + if n != 0 { + msg.Stdout.Data = bytes[:n] + if err := send(msg); err != nil { + errCh <- err + break + } + } + + // then handle error + if err == io.EOF || err == io.ErrClosedPipe { + msg.Stdout.Data = nil + msg.Stdout.Close = true + + if err := send(msg); err != nil { + errCh <- err + } + break + } + + if err != nil { + errCh <- err + break + } + } + + }() + // handle Stderr + go func() { + defer outWg.Done() + + reader := errReader + bytes := make([]byte, 1024) + msg := &ExecTaskStreamingResponseMsg{Stderr: &proto.ExecTaskStreamingIOOperation{}} + + for { + n, err := reader.Read(bytes) + // always send data if we read some + if n != 0 { + msg.Stderr.Data = bytes[:n] + if err := send(msg); err != nil { + errCh <- err + break + } + } + + // then handle error + if err == io.EOF || err == io.ErrClosedPipe { + msg.Stderr.Data = nil + msg.Stderr.Close = true + + if err := send(msg); err != nil { + errCh <- err + } + break + } + + if err != nil { + errCh <- err + break + } + } + + }() + + doneCh := make(chan error, 1) + go func() { + outWg.Wait() + + select { + case err := <-errCh: + doneCh <- err + default: + } + close(doneCh) + }() + + return &ExecOptions{ + Command: command, + Tty: tty, + + Stdin: inReader, + Stdout: outWriter, + Stderr: errWriter, + + ResizeCh: resize, + }, doneCh +} + +func NewExecStreamingResponseExit(exitCode int) *ExecTaskStreamingResponseMsg { + return &ExecTaskStreamingResponseMsg{ + Exited: true, + Result: &proto.ExitResult{ + ExitCode: int32(exitCode), + }, + } + +} + +func isHeartbeat(r *ExecTaskStreamingRequestMsg) bool { + return r.Stdin == nil && r.Setup == nil && r.TtySize == nil +} diff --git a/plugin_interface/drivers/filesystem/taskdir.go b/plugin_interface/drivers/filesystem/taskdir.go new file mode 100644 index 00000000000..afb977ca507 --- /dev/null +++ b/plugin_interface/drivers/filesystem/taskdir.go @@ -0,0 +1,146 @@ +package filesystem + +import ( + "os" + "path/filepath" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-set/v3" +) + +const ( + // idUnsupported is what the uid/gid will be set to on platforms (eg + // Windows) that don't support integer ownership identifiers. + idUnsupported = -1 + + // fileMode777 is a constant that represents the file mode rwxrwxrwx + fileMode777 = os.FileMode(0o777) + + // fileMode710 is a constant that represents the file mode rwx--x--- + fileMode710 = os.FileMode(0o710) + + // fileMode755 is a constant that represents the file mode rwxr-xr-x + fileMode755 = os.FileMode(0o755) + + // fileMode666 is a constant that represents the file mode rw-rw-rw- + fileMode666 = os.FileMode(0o666) +) + +var ( + // SnapshotErrorTime is the sentinel time that will be used on the + // error file written by Snapshot when it encounters as error. + SnapshotErrorTime = time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC) + + // The name of the directory that is shared across tasks in a task group. + SharedAllocName = "alloc" + + // Name of the directory where logs of Tasks are written + LogDirName = "logs" + + // SharedDataDir is one of the shared allocation directories. It is + // included in snapshots. + SharedDataDir = "data" + + // TmpDirName is the name of the temporary directory in each alloc and + // task. + TmpDirName = "tmp" + + // The set of directories that exist inside each shared alloc directory. + SharedAllocDirs = []string{LogDirName, TmpDirName, SharedDataDir} + + // The name of the directory that exists inside each task directory + // regardless of driver. + TaskLocal = "local" + + // TaskSecrets is the name of the secret directory inside each task + // directory + TaskSecrets = "secrets" + + // TaskPrivate is the name of the private directory inside each task + // directory + TaskPrivate = "private" + + // TaskDirs is the set of directories created in each tasks directory. + TaskDirs = map[string]os.FileMode{TmpDirName: os.ModeSticky | fileMode777} + + // AllocGRPCSocket is the path relative to the task dir root for the + // unix socket connected to Consul's gRPC endpoint. + AllocGRPCSocket = filepath.Join(SharedAllocName, TmpDirName, "consul_grpc.sock") + + // AllocHTTPSocket is the path relative to the task dir root for the unix + // socket connected to Consul's HTTP endpoint. + AllocHTTPSocket = filepath.Join(SharedAllocName, TmpDirName, "consul_http.sock") +) + +type TaskDir struct { + // AllocDir is the path to the alloc directory on the host. + // (not to be conflated with client.alloc_dir) + // + // + AllocDir string + + // Dir is the path to Task directory on the host. + // + // + Dir string + + // MountsAllocDir is the path to the alloc directory on the host that has + // been bind mounted under + // + // //alloc -> + MountsAllocDir string + + // MountsTaskDir is the path to the task directory on the host that has been + // bind mounted under + // + // //task -> + MountsTaskDir string + + // MountsSecretsDir is the path to the secrets directory on the host that + // has been bind mounted under + // + // //task/secrets -> + MountsSecretsDir string + + // SharedAllocDir is the path to shared alloc directory on the host + // + // /alloc/ + SharedAllocDir string + + // SharedTaskDir is the path to the shared alloc directory linked into + // the task directory on the host. + // + // /alloc/ + SharedTaskDir string + + // LocalDir is the path to the task's local directory on the host + // + // /local/ + LocalDir string + + // LogDir is the path to the task's log directory on the host + // + // /alloc/logs/ + LogDir string + + // SecretsDir is the path to secrets/ directory on the host + // + // /secrets/ + SecretsDir string + + // secretsInMB is the configured size of the secrets directory + secretsInMB int + + // PrivateDir is the path to private/ directory on the host + // + // /private/ + PrivateDir string + + // skip embedding these paths in chroots. Used for avoiding embedding + // client.alloc_dir and client.mounts_dir recursively. + skip *set.Set[string] + + // logger for this task + logger hclog.Logger +} diff --git a/plugin_interface/drivers/fsisolation/isolation.go b/plugin_interface/drivers/fsisolation/isolation.go new file mode 100644 index 00000000000..e7a9ad2680c --- /dev/null +++ b/plugin_interface/drivers/fsisolation/isolation.go @@ -0,0 +1,25 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package fsisolation + +// Mode is an enum to describe what kind of filesystem isolation a +// driver supports. +type Mode string + +const ( + // IsolationNone means no isolation. The host filesystem is used. + None = Mode("none") + + // IsolationChroot means the driver will use a chroot on the host + // filesystem. + Chroot = Mode("chroot") + + // IsolationImage means the driver uses an image. + Image = Mode("image") + + // IsolationUnveil means the driver and client will work together using + // unveil() syscall semantics (i.e. landlock on linux) isolate the host + // filesytem from workloads. + Unveil = Mode("unveil") +) diff --git a/plugin_interface/drivers/mock.go b/plugin_interface/drivers/mock.go new file mode 100644 index 00000000000..f2a3b05238b --- /dev/null +++ b/plugin_interface/drivers/mock.go @@ -0,0 +1,96 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + "time" + + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" +) + +type TaskConfigSchemaFn func() (*hclspec.Spec, error) +type CapabilitiesFn func() (*Capabilities, error) +type FingerprintFn func(context.Context) (<-chan *Fingerprint, error) +type RecoverTaskFn func(*TaskHandle) error +type StartTaskFn func(*TaskConfig) (*TaskHandle, *DriverNetwork, error) +type WaitTaskFn func(context.Context, string) (<-chan *ExitResult, error) +type StopTaskFn func(string, time.Duration, string) error +type DestroyTaskFn func(string, bool) error +type InspectTaskFn func(string) (*TaskStatus, error) +type TaskStatsFn func(context.Context, string, time.Duration) (<-chan *TaskResourceUsage, error) +type TaskEventsFn func(context.Context) (<-chan *TaskEvent, error) +type SignalTaskFn func(string, string) error +type ExecTaskFn func(string, []string, time.Duration) (*ExecTaskResult, error) + +type MockDriverPlugin struct { + *base.MockPlugin + + TaskConfigSchemaFn TaskConfigSchemaFn + CapabilitiesFn CapabilitiesFn + FingerprintFn FingerprintFn + RecoverTaskFn RecoverTaskFn + StartTaskFn StartTaskFn + WaitTaskFn WaitTaskFn + StopTaskFn StopTaskFn + DestroyTaskFn DestroyTaskFn + InspectTaskFn InspectTaskFn + TaskStatsFn TaskStatsFn + TaskEventsFn TaskEventsFn + SignalTaskFn SignalTaskFn + ExecTaskFn ExecTaskFn +} + +func (p *MockDriverPlugin) TaskConfigSchema() (*hclspec.Spec, error) { + return p.TaskConfigSchemaFn() +} + +func (p *MockDriverPlugin) Capabilities() (*Capabilities, error) { + return p.CapabilitiesFn() +} + +func (p *MockDriverPlugin) Fingerprint(ctx context.Context) (<-chan *Fingerprint, error) { + return p.FingerprintFn(ctx) +} + +func (p *MockDriverPlugin) RecoverTask(handle *TaskHandle) error { + return p.RecoverTaskFn(handle) +} + +func (p *MockDriverPlugin) StartTask(config *TaskConfig) (*TaskHandle, *DriverNetwork, error) { + return p.StartTaskFn(config) +} + +func (p *MockDriverPlugin) WaitTask(ctx context.Context, taskID string) (<-chan *ExitResult, error) { + return p.WaitTaskFn(ctx, taskID) +} + +func (p *MockDriverPlugin) StopTask(taskID string, timeout time.Duration, signal string) error { + return p.StopTaskFn(taskID, timeout, signal) +} + +func (p *MockDriverPlugin) DestroyTask(taskID string, force bool) error { + return p.DestroyTaskFn(taskID, force) +} + +func (p *MockDriverPlugin) InspectTask(taskID string) (*TaskStatus, error) { + return p.InspectTaskFn(taskID) +} + +func (p *MockDriverPlugin) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *TaskResourceUsage, error) { + return p.TaskStatsFn(ctx, taskID, interval) +} + +func (p *MockDriverPlugin) TaskEvents(ctx context.Context) (<-chan *TaskEvent, error) { + return p.TaskEventsFn(ctx) +} + +func (p *MockDriverPlugin) SignalTask(taskID string, signal string) error { + return p.SignalTaskFn(taskID, signal) +} + +func (p *MockDriverPlugin) ExecTask(taskID string, cmd []string, timeout time.Duration) (*ExecTaskResult, error) { + return p.ExecTaskFn(taskID, cmd, timeout) +} diff --git a/plugin_interface/drivers/plugin.go b/plugin_interface/drivers/plugin.go new file mode 100644 index 00000000000..03c8e69a37c --- /dev/null +++ b/plugin_interface/drivers/plugin.go @@ -0,0 +1,65 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base" + baseproto "github.com/hashicorp/nomad/plugin-interface/base/proto" + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" + "google.golang.org/grpc" +) + +var _ plugin.GRPCPlugin = &PluginDriver{} + +// PluginDriver wraps a DriverPlugin and implements go-plugins GRPCPlugin +// interface to expose the interface over gRPC +type PluginDriver struct { + plugin.NetRPCUnsupportedPlugin + impl DriverPlugin + logger hclog.Logger +} + +func NewDriverPlugin(d DriverPlugin, logger hclog.Logger) *PluginDriver { + return &PluginDriver{ + impl: d, + logger: logger, + } +} + +func (p *PluginDriver) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterDriverServer(s, &driverPluginServer{ + impl: p.impl, + broker: broker, + }) + return nil +} + +func (p *PluginDriver) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &driverPluginClient{ + BasePluginClient: &base.BasePluginClient{ + DoneCtx: ctx, + Client: baseproto.NewBasePluginClient(c), + }, + client: proto.NewDriverClient(c), + doneCtx: ctx, + logger: p.logger, + }, nil +} + +// Serve is used to serve a driverplugin +func Serve(d DriverPlugin, logger hclog.Logger) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: base.Handshake, + Plugins: map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: d}, + base.PluginTypeDriver: &PluginDriver{impl: d, logger: logger}, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/plugin_interface/drivers/plugin_test.go b/plugin_interface/drivers/plugin_test.go new file mode 100644 index 00000000000..c20b1f4fae6 --- /dev/null +++ b/plugin_interface/drivers/plugin_test.go @@ -0,0 +1,974 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + "errors" + "testing" + "time" + + pb "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + + "github.com/shoenig/test/must" +) + +var ( + errTest = errors.New("testing error") + taskID = "test-task-id" +) + +func TestDriverPlugin_PluginInfo(t *testing.T) { + // ci.Parallel(t) + + var ( + apiVersions = []string{"v0.1.0", "v0.2.0"} + ) + + const ( + pluginVersion = "v0.2.1" + pluginName = "mock_driver" + ) + + knownType := func() (*base.PluginInfoResponse, error) { + info := &base.PluginInfoResponse{ + Type: base.PluginTypeDriver, + PluginApiVersions: apiVersions, + PluginVersion: pluginVersion, + Name: pluginName, + } + return info, nil + } + unknownType := func() (*base.PluginInfoResponse, error) { + info := &base.PluginInfoResponse{ + Type: "bad", + PluginApiVersions: apiVersions, + PluginVersion: pluginVersion, + Name: pluginName, + } + return info, nil + } + + mock := &MockDriverPlugin{ + MockPlugin: &base.MockPlugin{ + PluginInfoF: knownType, + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDriver{impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DriverPlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + resp, err := impl.PluginInfo() + must.NoError(t, err) + must.Eq(t, apiVersions, resp.PluginApiVersions) + must.Eq(t, pluginVersion, resp.PluginVersion) + must.Eq(t, pluginName, resp.Name) + must.Eq(t, base.PluginTypeDriver, resp.Type) + + // Swap the implementation to return an unknown type + mock.PluginInfoF = unknownType + _, err = impl.PluginInfo() + must.ErrorContains(t, err, "unknown type") +} + +func TestDriverPlugin_ConfigSchema(t *testing.T) { + // ci.Parallel(t) + + mock := &MockDriverPlugin{ + MockPlugin: &base.MockPlugin{ + ConfigSchemaF: func() (*hclspec.Spec, error) { + return base.TestSpec, nil + }, + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDriver{impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DriverPlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + specOut, err := impl.ConfigSchema() + must.NoError(t, err) + must.True(t, pb.Equal(base.TestSpec, specOut)) +} + +func TestDriverPlugin_SetConfig(t *testing.T) { + // ci.Parallel(t) + + var receivedData []byte + mock := &MockDriverPlugin{ + MockPlugin: &base.MockPlugin{ + PluginInfoF: func() (*base.PluginInfoResponse, error) { + return &base.PluginInfoResponse{ + Type: base.PluginTypeDevice, + PluginApiVersions: []string{"v0.0.1"}, + PluginVersion: "v0.0.1", + Name: "mock_device", + }, nil + }, + ConfigSchemaF: func() (*hclspec.Spec, error) { + return base.TestSpec, nil + }, + SetConfigF: func(cfg *base.Config) error { + receivedData = cfg.PluginConfig + return nil + }, + }, + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDevice: &PluginDriver{impl: mock}, + }) + defer server.Stop() + defer client.Close() + + raw, err := client.Dispense(base.PluginTypeDevice) + if err != nil { + t.Fatalf("err: %s", err) + } + + impl, ok := raw.(DriverPlugin) + if !ok { + t.Fatalf("bad: %#v", raw) + } + + config := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("v1"), + "bar": cty.NumberIntVal(1337), + "baz": cty.BoolVal(true), + }) + cdata, err := msgpack.Marshal(config, config.Type()) + must.NoError(t, err) + must.NoError(t, impl.SetConfig(&base.Config{PluginConfig: cdata})) + must.Eq(t, cdata, receivedData) + + // Decode the value back + var actual base.TestConfig + must.NoError(t, structs.Decode(receivedData, &actual)) + must.Eq(t, "v1", actual.Foo) + must.Eq(t, 1337, actual.Bar) + must.True(t, actual.Baz) +} + +func makeTestPlugin(t *testing.T, mock DriverPlugin) DriverPlugin { + t.Helper() + + logger := hclog.New(hclog.DefaultOptions) + if testing.Verbose() { + logger.SetLevel(hclog.Trace) + } else { + logger.SetLevel(hclog.Info) + } + + client, server := plugin.TestPluginGRPCConn(t, true, map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: mock}, + base.PluginTypeDriver: &PluginDriver{impl: mock, logger: logger}, + }) + + t.Cleanup(func() { + server.Stop() + client.Close() + }) + + raw, err := client.Dispense(base.PluginTypeDriver) + must.NoError(t, err) + impl, ok := raw.(DriverPlugin) + must.True(t, ok, must.Sprintf("not valid DriverPlugin - %#v", impl)) + return impl +} + +func TestDriverPlugin_Capabilities(t *testing.T) { + // ci.Parallel(t) + + t.Run("ok", func(t *testing.T) { + caps := &Capabilities{SendSignals: true, Exec: true, FSIsolation: "none"} + mock := &MockDriverPlugin{ + CapabilitiesFn: func() (*Capabilities, error) { + return caps, nil + }, + } + + impl := makeTestPlugin(t, mock) + result, err := impl.Capabilities() + must.NoError(t, err) + must.Eq(t, caps, result) + }) + + t.Run("bad", func(t *testing.T) { + mock := &MockDriverPlugin{ + CapabilitiesFn: func() (*Capabilities, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + _, err := impl.Capabilities() + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_Fingerprint(t *testing.T) { + // ci.Parallel(t) + ctx := context.Background() + + t.Run("ok", func(t *testing.T) { + f := &Fingerprint{ + Health: HealthStateHealthy, + HealthDescription: "very healthy", + } + mock := &MockDriverPlugin{ + FingerprintFn: func(_ context.Context) (<-chan *Fingerprint, error) { + outCh := make(chan *Fingerprint, 1) + outCh <- f + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + fCh, err := impl.Fingerprint(ctx) + must.NoError(t, err) + + var result *Fingerprint + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-fCh: + } + must.Eq(t, f, result) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + FingerprintFn: func(_ context.Context) (<-chan *Fingerprint, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + fCh, err := impl.Fingerprint(ctx) + must.NoError(t, err) + + var result *Fingerprint + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-fCh: + } + must.ErrorContains(t, result.Err, errTest.Error()) + }) + + t.Run("driver error", func(t *testing.T) { + f := &Fingerprint{ + Err: errTest, + } + mock := &MockDriverPlugin{ + FingerprintFn: func(_ context.Context) (<-chan *Fingerprint, error) { + outCh := make(chan *Fingerprint, 1) + outCh <- f + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + fCh, err := impl.Fingerprint(ctx) + must.NoError(t, err) + + var result *Fingerprint + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-fCh: + } + + must.ErrorContains(t, result.Err, errTest.Error()) + }) + + t.Run("channel closed", func(t *testing.T) { + mock := &MockDriverPlugin{ + FingerprintFn: func(_ context.Context) (<-chan *Fingerprint, error) { + outCh := make(chan *Fingerprint, 1) + close(outCh) + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + fCh, err := impl.Fingerprint(ctx) + must.NoError(t, err) + + var result *Fingerprint + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-fCh: + } + + must.ErrorContains(t, result.Err, ErrChannelClosed.Error()) + + select { + case _, ok := <-fCh: + must.False(t, ok) + case <-time.After(10 * time.Millisecond): + t.Fatal("channel not closed") + } + }) +} + +func TestDriverPlugin_RecoverTask(t *testing.T) { + // ci.Parallel(t) + + t.Run("ok", func(t *testing.T) { + handle := &TaskHandle{ + Version: 42, + State: TaskStateRunning, + Config: &TaskConfig{ + ID: "test-id", + Resources: &Resources{}, + }, + } + mock := &MockDriverPlugin{ + RecoverTaskFn: func(th *TaskHandle) error { + must.Eq(t, handle, th) + must.NotNil(t, th.Config) + must.Eq(t, handle.Config.ID, th.Config.ID) + return nil + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.RecoverTask(handle) + must.NoError(t, err) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + RecoverTaskFn: func(*TaskHandle) error { + return errTest + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.RecoverTask(&TaskHandle{}) + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_StartTask(t *testing.T) { + // ci.Parallel(t) + + t.Run("ok", func(t *testing.T) { + config := &TaskConfig{ + ID: "test-id", + Resources: &Resources{}, + } + handle := &TaskHandle{ + Version: 42, + Config: config, + State: TaskStateRunning, + } + dnet := &DriverNetwork{IP: "127.0.0.1", PortMap: map[string]int{}} + mock := &MockDriverPlugin{ + StartTaskFn: func(tc *TaskConfig) (*TaskHandle, *DriverNetwork, error) { + must.Eq(t, config, tc) + return handle, dnet, nil + }, + } + + impl := makeTestPlugin(t, mock) + resultHandle, resultNet, err := impl.StartTask(config) + must.NoError(t, err) + must.Eq(t, dnet, resultNet) + must.Eq(t, handle, resultHandle) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + StartTaskFn: func(*TaskConfig) (*TaskHandle, *DriverNetwork, error) { + return nil, nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + _, _, err := impl.StartTask(&TaskConfig{}) + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_WaitTask(t *testing.T) { + // ci.Parallel(t) + ctx := context.Background() + + t.Run("ok", func(t *testing.T) { + e := &ExitResult{ + ExitCode: 1, + Signal: 2, + } + mock := &MockDriverPlugin{ + WaitTaskFn: func(ctx context.Context, tid string) (<-chan *ExitResult, error) { + must.Eq(t, taskID, tid) + + outCh := make(chan *ExitResult, 1) + outCh <- e + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + wCh, err := impl.WaitTask(ctx, taskID) + must.NoError(t, err) + + var result *ExitResult + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-wCh: + } + + must.Eq(t, e, result) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + WaitTaskFn: func(context.Context, string) (<-chan *ExitResult, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + wCh, err := impl.WaitTask(ctx, taskID) + must.NoError(t, err) + + var result *ExitResult + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-wCh: + } + + must.ErrorContains(t, result.Err, errTest.Error()) + }) + + t.Run("driver error", func(t *testing.T) { + mock := &MockDriverPlugin{ + WaitTaskFn: func(context.Context, string) (<-chan *ExitResult, error) { + outCh := make(chan *ExitResult, 1) + outCh <- &ExitResult{Err: errTest} + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + wCh, err := impl.WaitTask(ctx, taskID) + must.NoError(t, err) + + var result *ExitResult + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-wCh: + } + + must.ErrorContains(t, result.Err, errTest.Error()) + + }) + + t.Run("channel closed", func(t *testing.T) { + mock := &MockDriverPlugin{ + WaitTaskFn: func(context.Context, string) (<-chan *ExitResult, error) { + outCh := make(chan *ExitResult, 1) + close(outCh) + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + wCh, err := impl.WaitTask(ctx, taskID) + must.NoError(t, err) + + var result *ExitResult + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-wCh: + } + + must.ErrorContains(t, result.Err, ErrChannelClosed.Error()) + + select { + case _, ok := <-wCh: + must.False(t, ok) + case <-time.After(10 * time.Millisecond): + t.Fatal("channel not closed") + } + }) +} + +func TestDriverPlugin_StopTask(t *testing.T) { + // ci.Parallel(t) + signal := "test-signal" + + t.Run("ok", func(t *testing.T) { + timeout := 42 * time.Second + mock := &MockDriverPlugin{ + StopTaskFn: func(tid string, to time.Duration, sig string) error { + must.Eq(t, taskID, tid) + must.Eq(t, timeout, to) + must.Eq(t, signal, sig) + return nil + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.StopTask(taskID, timeout, signal) + must.NoError(t, err) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + StopTaskFn: func(string, time.Duration, string) error { + return errTest + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.StopTask(taskID, 0, signal) + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_DestroyTask(t *testing.T) { + // ci.Parallel(t) + + t.Run("ok", func(t *testing.T) { + mock := &MockDriverPlugin{ + DestroyTaskFn: func(tid string, force bool) error { + must.Eq(t, taskID, tid) + must.False(t, force, must.Sprint("force should be false")) + return nil + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.DestroyTask(taskID, false) + must.NoError(t, err) + }) + + t.Run("ok - force", func(t *testing.T) { + mock := &MockDriverPlugin{ + DestroyTaskFn: func(tid string, force bool) error { + must.Eq(t, taskID, tid) + must.True(t, force, must.Sprint("force should be true")) + return nil + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.DestroyTask(taskID, true) + must.NoError(t, err) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + DestroyTaskFn: func(string, bool) error { + return errTest + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.DestroyTask(taskID, false) + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_InspectTask(t *testing.T) { + // ci.Parallel(t) + + t.Run("ok", func(t *testing.T) { + mock := &MockDriverPlugin{ + InspectTaskFn: func(tid string) (*TaskStatus, error) { + must.Eq(t, taskID, tid) + return &TaskStatus{ + ID: tid, + State: TaskStateRunning, + }, nil + }, + } + + impl := makeTestPlugin(t, mock) + result, err := impl.InspectTask(taskID) + must.NoError(t, err) + expected := &TaskStatus{ + ID: taskID, + State: TaskStateRunning, + ExitResult: new(ExitResult), + } + must.Eq(t, expected, result) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + InspectTaskFn: func(string) (*TaskStatus, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + _, err := impl.InspectTask(taskID) + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_TaskStats(t *testing.T) { + // ci.Parallel(t) + ctx := context.Background() + + t.Run("ok - empty", func(t *testing.T) { + duration := 42 * time.Second + + mock := &MockDriverPlugin{ + TaskStatsFn: func(ctx context.Context, tid string, dur time.Duration) (<-chan *TaskResourceUsage, error) { + must.Eq(t, taskID, tid) + must.Eq(t, duration, dur) + outCh := make(chan *TaskResourceUsage, 1) + outCh <- &TaskResourceUsage{} + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskStats(ctx, taskID, duration) + must.NoError(t, err) + + var result *TaskResourceUsage + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + expected := &TaskResourceUsage{ + ResourceUsage: &ResourceUsage{ + MemoryStats: &MemoryStats{}, + CpuStats: &CpuStats{}, + }, + Pids: map[string]*ResourceUsage{}, + } + must.Eq(t, expected, result) + }) + + t.Run("ok", func(t *testing.T) { + tru := &TaskResourceUsage{ + ResourceUsage: &ResourceUsage{ + MemoryStats: &MemoryStats{ + RSS: 42, + Usage: 42, + }, + CpuStats: &CpuStats{ + ThrottledTime: 42, + }, + }, + Pids: map[string]*ResourceUsage{ + "42": { + MemoryStats: &MemoryStats{ + RSS: 42, + Usage: 42, + }, + CpuStats: &CpuStats{ + ThrottledTime: 42, + }, + }, + }, + } + duration := 42 * time.Second + mock := &MockDriverPlugin{ + TaskStatsFn: func(ctx context.Context, tid string, dur time.Duration) (<-chan *TaskResourceUsage, error) { + must.Eq(t, taskID, tid) + must.Eq(t, duration, dur) + outCh := make(chan *TaskResourceUsage, 1) + outCh <- tru + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskStats(ctx, taskID, duration) + must.NoError(t, err) + + var result *TaskResourceUsage + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + expected := &TaskResourceUsage{ + ResourceUsage: &ResourceUsage{ + MemoryStats: &MemoryStats{ + RSS: 42, + Usage: 42, + Measured: []string{}, + }, + CpuStats: &CpuStats{ + ThrottledTime: 42, + Measured: []string{}, + }, + }, + Pids: map[string]*ResourceUsage{ + "42": { + MemoryStats: &MemoryStats{ + RSS: 42, + Usage: 42, + Measured: []string{}, + }, + CpuStats: &CpuStats{ + ThrottledTime: 42, + Measured: []string{}, + }, + }, + }, + } + + must.Eq(t, expected, result) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + TaskStatsFn: func(context.Context, string, time.Duration) (<-chan *TaskResourceUsage, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskStats(ctx, taskID, 0) + must.NoError(t, err) + + var result *TaskResourceUsage + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + must.Nil(t, result) + }) + + t.Run("channel closed", func(t *testing.T) { + mock := &MockDriverPlugin{ + TaskStatsFn: func(context.Context, string, time.Duration) (<-chan *TaskResourceUsage, error) { + outCh := make(chan *TaskResourceUsage, 1) + close(outCh) + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskStats(ctx, taskID, 0) + must.NoError(t, err) + + var result *TaskResourceUsage + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + must.Nil(t, result) + + select { + case _, ok := <-tCh: + must.False(t, ok) + case <-time.After(10 * time.Millisecond): + t.Fatal("channel not closed") + } + }) +} + +func TestDriverPlugin_TaskEvents(t *testing.T) { + // ci.Parallel(t) + ctx := context.Background() + + t.Run("ok", func(t *testing.T) { + te := &TaskEvent{ + TaskID: "test-task-id", + TaskName: "test-task-name", + } + mock := &MockDriverPlugin{ + TaskEventsFn: func(context.Context) (<-chan *TaskEvent, error) { + outCh := make(chan *TaskEvent, 1) + outCh <- te + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskEvents(ctx) + must.NoError(t, err) + + var result *TaskEvent + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + must.Eq(t, te, result) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + TaskEventsFn: func(context.Context) (<-chan *TaskEvent, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskEvents(ctx) + must.NoError(t, err) + + var result *TaskEvent + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + must.ErrorContains(t, result.Err, errTest.Error()) + }) + + t.Run("closed channel", func(t *testing.T) { + mock := &MockDriverPlugin{ + TaskEventsFn: func(context.Context) (<-chan *TaskEvent, error) { + outCh := make(chan *TaskEvent, 1) + close(outCh) + return outCh, nil + }, + } + + impl := makeTestPlugin(t, mock) + tCh, err := impl.TaskEvents(ctx) + must.NoError(t, err) + + var result *TaskEvent + select { + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout") + case result = <-tCh: + } + + must.Nil(t, result) + select { + case _, ok := <-tCh: + if ok { + t.Fatal("channel not closed") + } + default: + } + }) +} + +func TestDriverPlugin_SignalTask(t *testing.T) { + // ci.Parallel(t) + signal := "test-signal" + + t.Run("ok", func(t *testing.T) { + mock := &MockDriverPlugin{ + SignalTaskFn: func(tid string, sig string) error { + must.Eq(t, taskID, tid) + must.Eq(t, signal, sig) + return nil + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.SignalTask(taskID, signal) + must.NoError(t, err) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + SignalTaskFn: func(string, string) error { + return errTest + }, + } + + impl := makeTestPlugin(t, mock) + err := impl.SignalTask(taskID, signal) + must.ErrorContains(t, err, errTest.Error()) + }) +} + +func TestDriverPlugin_ExecTask(t *testing.T) { + // ci.Parallel(t) + + t.Run("ok", func(t *testing.T) { + commands := []string{"first-cmd", "second-cmd"} + timeout := 42 * time.Second + etr := &ExecTaskResult{ + Stdout: []byte("stdout content"), + Stderr: []byte("stderr content"), + ExitResult: &ExitResult{ + ExitCode: 42, + }, + } + mock := &MockDriverPlugin{ + ExecTaskFn: func(tid string, cmds []string, to time.Duration) (*ExecTaskResult, error) { + must.Eq(t, taskID, tid) + must.Eq(t, commands, cmds) + must.Eq(t, timeout, to) + return etr, nil + }, + } + + impl := makeTestPlugin(t, mock) + result, err := impl.ExecTask(taskID, commands, timeout) + must.NoError(t, err) + must.Eq(t, etr, result) + }) + + t.Run("error", func(t *testing.T) { + mock := &MockDriverPlugin{ + ExecTaskFn: func(string, []string, time.Duration) (*ExecTaskResult, error) { + return nil, errTest + }, + } + + impl := makeTestPlugin(t, mock) + _, err := impl.ExecTask(taskID, []string{}, 0) + must.ErrorContains(t, err, err.Error()) + }) +} diff --git a/plugin_interface/drivers/proto/driver.pb.go b/plugin_interface/drivers/proto/driver.pb.go new file mode 100644 index 00000000000..232da9f47a5 --- /dev/null +++ b/plugin_interface/drivers/proto/driver.pb.go @@ -0,0 +1,4826 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/drivers/proto/driver.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + hclspec "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + proto1 "github.com/hashicorp/nomad/plugin-interface/shared/structs/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TaskState int32 + +const ( + TaskState_UNKNOWN TaskState = 0 + TaskState_RUNNING TaskState = 1 + TaskState_EXITED TaskState = 2 +) + +var TaskState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RUNNING", + 2: "EXITED", +} + +var TaskState_value = map[string]int32{ + "UNKNOWN": 0, + "RUNNING": 1, + "EXITED": 2, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} + +func (TaskState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{0} +} + +type FingerprintResponse_HealthState int32 + +const ( + FingerprintResponse_UNDETECTED FingerprintResponse_HealthState = 0 + FingerprintResponse_UNHEALTHY FingerprintResponse_HealthState = 1 + FingerprintResponse_HEALTHY FingerprintResponse_HealthState = 2 +) + +var FingerprintResponse_HealthState_name = map[int32]string{ + 0: "UNDETECTED", + 1: "UNHEALTHY", + 2: "HEALTHY", +} + +var FingerprintResponse_HealthState_value = map[string]int32{ + "UNDETECTED": 0, + "UNHEALTHY": 1, + "HEALTHY": 2, +} + +func (x FingerprintResponse_HealthState) String() string { + return proto.EnumName(FingerprintResponse_HealthState_name, int32(x)) +} + +func (FingerprintResponse_HealthState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{5, 0} +} + +type StartTaskResponse_Result int32 + +const ( + StartTaskResponse_SUCCESS StartTaskResponse_Result = 0 + StartTaskResponse_RETRY StartTaskResponse_Result = 1 + StartTaskResponse_FATAL StartTaskResponse_Result = 2 +) + +var StartTaskResponse_Result_name = map[int32]string{ + 0: "SUCCESS", + 1: "RETRY", + 2: "FATAL", +} + +var StartTaskResponse_Result_value = map[string]int32{ + "SUCCESS": 0, + "RETRY": 1, + "FATAL": 2, +} + +func (x StartTaskResponse_Result) String() string { + return proto.EnumName(StartTaskResponse_Result_name, int32(x)) +} + +func (StartTaskResponse_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{9, 0} +} + +type DriverCapabilities_FSIsolation int32 + +const ( + DriverCapabilities_NONE DriverCapabilities_FSIsolation = 0 + DriverCapabilities_CHROOT DriverCapabilities_FSIsolation = 1 + DriverCapabilities_IMAGE DriverCapabilities_FSIsolation = 2 + DriverCapabilities_UNVEIL DriverCapabilities_FSIsolation = 3 +) + +var DriverCapabilities_FSIsolation_name = map[int32]string{ + 0: "NONE", + 1: "CHROOT", + 2: "IMAGE", + 3: "UNVEIL", +} + +var DriverCapabilities_FSIsolation_value = map[string]int32{ + "NONE": 0, + "CHROOT": 1, + "IMAGE": 2, + "UNVEIL": 3, +} + +func (x DriverCapabilities_FSIsolation) String() string { + return proto.EnumName(DriverCapabilities_FSIsolation_name, int32(x)) +} + +func (DriverCapabilities_FSIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{32, 0} +} + +type DriverCapabilities_MountConfigs int32 + +const ( + DriverCapabilities_UNKNOWN_MOUNTS DriverCapabilities_MountConfigs = 0 + DriverCapabilities_ANY_MOUNTS DriverCapabilities_MountConfigs = 0 + DriverCapabilities_NO_MOUNTS DriverCapabilities_MountConfigs = 1 +) + +var DriverCapabilities_MountConfigs_name = map[int32]string{ + 0: "UNKNOWN_MOUNTS", + // Duplicate value: 0: "ANY_MOUNTS", + 1: "NO_MOUNTS", +} + +var DriverCapabilities_MountConfigs_value = map[string]int32{ + "UNKNOWN_MOUNTS": 0, + "ANY_MOUNTS": 0, + "NO_MOUNTS": 1, +} + +func (x DriverCapabilities_MountConfigs) String() string { + return proto.EnumName(DriverCapabilities_MountConfigs_name, int32(x)) +} + +func (DriverCapabilities_MountConfigs) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{32, 1} +} + +type NetworkIsolationSpec_NetworkIsolationMode int32 + +const ( + NetworkIsolationSpec_HOST NetworkIsolationSpec_NetworkIsolationMode = 0 + NetworkIsolationSpec_GROUP NetworkIsolationSpec_NetworkIsolationMode = 1 + NetworkIsolationSpec_TASK NetworkIsolationSpec_NetworkIsolationMode = 2 + NetworkIsolationSpec_NONE NetworkIsolationSpec_NetworkIsolationMode = 3 +) + +var NetworkIsolationSpec_NetworkIsolationMode_name = map[int32]string{ + 0: "HOST", + 1: "GROUP", + 2: "TASK", + 3: "NONE", +} + +var NetworkIsolationSpec_NetworkIsolationMode_value = map[string]int32{ + "HOST": 0, + "GROUP": 1, + "TASK": 2, + "NONE": 3, +} + +func (x NetworkIsolationSpec_NetworkIsolationMode) String() string { + return proto.EnumName(NetworkIsolationSpec_NetworkIsolationMode_name, int32(x)) +} + +func (NetworkIsolationSpec_NetworkIsolationMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{33, 0} +} + +type CPUUsage_Fields int32 + +const ( + CPUUsage_SYSTEM_MODE CPUUsage_Fields = 0 + CPUUsage_USER_MODE CPUUsage_Fields = 1 + CPUUsage_TOTAL_TICKS CPUUsage_Fields = 2 + CPUUsage_THROTTLED_PERIODS CPUUsage_Fields = 3 + CPUUsage_THROTTLED_TIME CPUUsage_Fields = 4 + CPUUsage_PERCENT CPUUsage_Fields = 5 +) + +var CPUUsage_Fields_name = map[int32]string{ + 0: "SYSTEM_MODE", + 1: "USER_MODE", + 2: "TOTAL_TICKS", + 3: "THROTTLED_PERIODS", + 4: "THROTTLED_TIME", + 5: "PERCENT", +} + +var CPUUsage_Fields_value = map[string]int32{ + "SYSTEM_MODE": 0, + "USER_MODE": 1, + "TOTAL_TICKS": 2, + "THROTTLED_PERIODS": 3, + "THROTTLED_TIME": 4, + "PERCENT": 5, +} + +func (x CPUUsage_Fields) String() string { + return proto.EnumName(CPUUsage_Fields_name, int32(x)) +} + +func (CPUUsage_Fields) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{54, 0} +} + +type MemoryUsage_Fields int32 + +const ( + MemoryUsage_RSS MemoryUsage_Fields = 0 + MemoryUsage_CACHE MemoryUsage_Fields = 1 + MemoryUsage_MAX_USAGE MemoryUsage_Fields = 2 + MemoryUsage_KERNEL_USAGE MemoryUsage_Fields = 3 + MemoryUsage_KERNEL_MAX_USAGE MemoryUsage_Fields = 4 + MemoryUsage_USAGE MemoryUsage_Fields = 5 + MemoryUsage_SWAP MemoryUsage_Fields = 6 +) + +var MemoryUsage_Fields_name = map[int32]string{ + 0: "RSS", + 1: "CACHE", + 2: "MAX_USAGE", + 3: "KERNEL_USAGE", + 4: "KERNEL_MAX_USAGE", + 5: "USAGE", + 6: "SWAP", +} + +var MemoryUsage_Fields_value = map[string]int32{ + "RSS": 0, + "CACHE": 1, + "MAX_USAGE": 2, + "KERNEL_USAGE": 3, + "KERNEL_MAX_USAGE": 4, + "USAGE": 5, + "SWAP": 6, +} + +func (x MemoryUsage_Fields) String() string { + return proto.EnumName(MemoryUsage_Fields_name, int32(x)) +} + +func (MemoryUsage_Fields) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{55, 0} +} + +type TaskConfigSchemaRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskConfigSchemaRequest) Reset() { *m = TaskConfigSchemaRequest{} } +func (m *TaskConfigSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*TaskConfigSchemaRequest) ProtoMessage() {} +func (*TaskConfigSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{0} +} + +func (m *TaskConfigSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskConfigSchemaRequest.Unmarshal(m, b) +} +func (m *TaskConfigSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskConfigSchemaRequest.Marshal(b, m, deterministic) +} +func (m *TaskConfigSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskConfigSchemaRequest.Merge(m, src) +} +func (m *TaskConfigSchemaRequest) XXX_Size() int { + return xxx_messageInfo_TaskConfigSchemaRequest.Size(m) +} +func (m *TaskConfigSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskConfigSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskConfigSchemaRequest proto.InternalMessageInfo + +type TaskConfigSchemaResponse struct { + // Spec is the configuration schema for the job driver config block + Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskConfigSchemaResponse) Reset() { *m = TaskConfigSchemaResponse{} } +func (m *TaskConfigSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*TaskConfigSchemaResponse) ProtoMessage() {} +func (*TaskConfigSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{1} +} + +func (m *TaskConfigSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskConfigSchemaResponse.Unmarshal(m, b) +} +func (m *TaskConfigSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskConfigSchemaResponse.Marshal(b, m, deterministic) +} +func (m *TaskConfigSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskConfigSchemaResponse.Merge(m, src) +} +func (m *TaskConfigSchemaResponse) XXX_Size() int { + return xxx_messageInfo_TaskConfigSchemaResponse.Size(m) +} +func (m *TaskConfigSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskConfigSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskConfigSchemaResponse proto.InternalMessageInfo + +func (m *TaskConfigSchemaResponse) GetSpec() *hclspec.Spec { + if m != nil { + return m.Spec + } + return nil +} + +type CapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapabilitiesRequest) Reset() { *m = CapabilitiesRequest{} } +func (m *CapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesRequest) ProtoMessage() {} +func (*CapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{2} +} + +func (m *CapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapabilitiesRequest.Unmarshal(m, b) +} +func (m *CapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *CapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapabilitiesRequest.Merge(m, src) +} +func (m *CapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_CapabilitiesRequest.Size(m) +} +func (m *CapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CapabilitiesRequest proto.InternalMessageInfo + +type CapabilitiesResponse struct { + // Capabilities provides a way for the driver to denote if it implements + // non-core RPCs. Some Driver service RPCs expose additional information + // or functionality outside of the core task management functions. These + // RPCs are only implemented if the driver sets the corresponding capability. + Capabilities *DriverCapabilities `protobuf:"bytes,1,opt,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapabilitiesResponse) Reset() { *m = CapabilitiesResponse{} } +func (m *CapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesResponse) ProtoMessage() {} +func (*CapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{3} +} + +func (m *CapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapabilitiesResponse.Unmarshal(m, b) +} +func (m *CapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *CapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapabilitiesResponse.Merge(m, src) +} +func (m *CapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_CapabilitiesResponse.Size(m) +} +func (m *CapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CapabilitiesResponse proto.InternalMessageInfo + +func (m *CapabilitiesResponse) GetCapabilities() *DriverCapabilities { + if m != nil { + return m.Capabilities + } + return nil +} + +type FingerprintRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintRequest) Reset() { *m = FingerprintRequest{} } +func (m *FingerprintRequest) String() string { return proto.CompactTextString(m) } +func (*FingerprintRequest) ProtoMessage() {} +func (*FingerprintRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{4} +} + +func (m *FingerprintRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintRequest.Unmarshal(m, b) +} +func (m *FingerprintRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintRequest.Marshal(b, m, deterministic) +} +func (m *FingerprintRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintRequest.Merge(m, src) +} +func (m *FingerprintRequest) XXX_Size() int { + return xxx_messageInfo_FingerprintRequest.Size(m) +} +func (m *FingerprintRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintRequest proto.InternalMessageInfo + +type FingerprintResponse struct { + // Attributes are key/value pairs that annotate the nomad client and can be + // used in scheduling constraints and affinities. + Attributes map[string]*proto1.Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Health is used to determine the state of the health the driver is in. + // Health can be one of the following states: + // - UNDETECTED: driver dependencies are not met and the driver can not start + // - UNHEALTHY: driver dependencies are met but the driver is unable to + // perform operations due to some other problem + // - HEALTHY: driver is able to perform all operations + Health FingerprintResponse_HealthState `protobuf:"varint,2,opt,name=health,proto3,enum=hashicorp.nomad.plugins.drivers.proto.FingerprintResponse_HealthState" json:"health,omitempty"` + // HealthDescription is a human readable message describing the current + // state of driver health + HealthDescription string `protobuf:"bytes,3,opt,name=health_description,json=healthDescription,proto3" json:"health_description,omitempty"` + // Err is set if any driver error occurred while waiting for the fingerprint + Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintResponse) Reset() { *m = FingerprintResponse{} } +func (m *FingerprintResponse) String() string { return proto.CompactTextString(m) } +func (*FingerprintResponse) ProtoMessage() {} +func (*FingerprintResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{5} +} + +func (m *FingerprintResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintResponse.Unmarshal(m, b) +} +func (m *FingerprintResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintResponse.Marshal(b, m, deterministic) +} +func (m *FingerprintResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintResponse.Merge(m, src) +} +func (m *FingerprintResponse) XXX_Size() int { + return xxx_messageInfo_FingerprintResponse.Size(m) +} +func (m *FingerprintResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintResponse proto.InternalMessageInfo + +func (m *FingerprintResponse) GetAttributes() map[string]*proto1.Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *FingerprintResponse) GetHealth() FingerprintResponse_HealthState { + if m != nil { + return m.Health + } + return FingerprintResponse_UNDETECTED +} + +func (m *FingerprintResponse) GetHealthDescription() string { + if m != nil { + return m.HealthDescription + } + return "" +} + +func (m *FingerprintResponse) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type RecoverTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Handle is the TaskHandle returned from StartTask + Handle *TaskHandle `protobuf:"bytes,2,opt,name=handle,proto3" json:"handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecoverTaskRequest) Reset() { *m = RecoverTaskRequest{} } +func (m *RecoverTaskRequest) String() string { return proto.CompactTextString(m) } +func (*RecoverTaskRequest) ProtoMessage() {} +func (*RecoverTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{6} +} + +func (m *RecoverTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecoverTaskRequest.Unmarshal(m, b) +} +func (m *RecoverTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecoverTaskRequest.Marshal(b, m, deterministic) +} +func (m *RecoverTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecoverTaskRequest.Merge(m, src) +} +func (m *RecoverTaskRequest) XXX_Size() int { + return xxx_messageInfo_RecoverTaskRequest.Size(m) +} +func (m *RecoverTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RecoverTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RecoverTaskRequest proto.InternalMessageInfo + +func (m *RecoverTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *RecoverTaskRequest) GetHandle() *TaskHandle { + if m != nil { + return m.Handle + } + return nil +} + +type RecoverTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecoverTaskResponse) Reset() { *m = RecoverTaskResponse{} } +func (m *RecoverTaskResponse) String() string { return proto.CompactTextString(m) } +func (*RecoverTaskResponse) ProtoMessage() {} +func (*RecoverTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{7} +} + +func (m *RecoverTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecoverTaskResponse.Unmarshal(m, b) +} +func (m *RecoverTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecoverTaskResponse.Marshal(b, m, deterministic) +} +func (m *RecoverTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecoverTaskResponse.Merge(m, src) +} +func (m *RecoverTaskResponse) XXX_Size() int { + return xxx_messageInfo_RecoverTaskResponse.Size(m) +} +func (m *RecoverTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RecoverTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RecoverTaskResponse proto.InternalMessageInfo + +type StartTaskRequest struct { + // Task configuration to launch + Task *TaskConfig `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} } +func (m *StartTaskRequest) String() string { return proto.CompactTextString(m) } +func (*StartTaskRequest) ProtoMessage() {} +func (*StartTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{8} +} + +func (m *StartTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartTaskRequest.Unmarshal(m, b) +} +func (m *StartTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartTaskRequest.Marshal(b, m, deterministic) +} +func (m *StartTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskRequest.Merge(m, src) +} +func (m *StartTaskRequest) XXX_Size() int { + return xxx_messageInfo_StartTaskRequest.Size(m) +} +func (m *StartTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskRequest proto.InternalMessageInfo + +func (m *StartTaskRequest) GetTask() *TaskConfig { + if m != nil { + return m.Task + } + return nil +} + +type StartTaskResponse struct { + // Result is set depending on the type of error that occurred while starting + // a task: + // + // - SUCCESS: No error occurred, handle is set + // - RETRY: An error occurred, but is recoverable and the RPC should be retried + // - FATAL: A fatal error occurred and is not likely to succeed if retried + // + // If Result is not successful, the DriverErrorMsg will be set. + Result StartTaskResponse_Result `protobuf:"varint,1,opt,name=result,proto3,enum=hashicorp.nomad.plugins.drivers.proto.StartTaskResponse_Result" json:"result,omitempty"` + // DriverErrorMsg is set if an error occurred + DriverErrorMsg string `protobuf:"bytes,2,opt,name=driver_error_msg,json=driverErrorMsg,proto3" json:"driver_error_msg,omitempty"` + // Handle is opaque to the client, but must be stored in order to recover + // the task. + Handle *TaskHandle `protobuf:"bytes,3,opt,name=handle,proto3" json:"handle,omitempty"` + // NetworkOverride is set if the driver sets network settings and the service ip/port + // needs to be set differently. + NetworkOverride *NetworkOverride `protobuf:"bytes,4,opt,name=network_override,json=networkOverride,proto3" json:"network_override,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartTaskResponse) Reset() { *m = StartTaskResponse{} } +func (m *StartTaskResponse) String() string { return proto.CompactTextString(m) } +func (*StartTaskResponse) ProtoMessage() {} +func (*StartTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{9} +} + +func (m *StartTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartTaskResponse.Unmarshal(m, b) +} +func (m *StartTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartTaskResponse.Marshal(b, m, deterministic) +} +func (m *StartTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskResponse.Merge(m, src) +} +func (m *StartTaskResponse) XXX_Size() int { + return xxx_messageInfo_StartTaskResponse.Size(m) +} +func (m *StartTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskResponse proto.InternalMessageInfo + +func (m *StartTaskResponse) GetResult() StartTaskResponse_Result { + if m != nil { + return m.Result + } + return StartTaskResponse_SUCCESS +} + +func (m *StartTaskResponse) GetDriverErrorMsg() string { + if m != nil { + return m.DriverErrorMsg + } + return "" +} + +func (m *StartTaskResponse) GetHandle() *TaskHandle { + if m != nil { + return m.Handle + } + return nil +} + +func (m *StartTaskResponse) GetNetworkOverride() *NetworkOverride { + if m != nil { + return m.NetworkOverride + } + return nil +} + +type WaitTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitTaskRequest) Reset() { *m = WaitTaskRequest{} } +func (m *WaitTaskRequest) String() string { return proto.CompactTextString(m) } +func (*WaitTaskRequest) ProtoMessage() {} +func (*WaitTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{10} +} + +func (m *WaitTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitTaskRequest.Unmarshal(m, b) +} +func (m *WaitTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitTaskRequest.Marshal(b, m, deterministic) +} +func (m *WaitTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitTaskRequest.Merge(m, src) +} +func (m *WaitTaskRequest) XXX_Size() int { + return xxx_messageInfo_WaitTaskRequest.Size(m) +} +func (m *WaitTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WaitTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitTaskRequest proto.InternalMessageInfo + +func (m *WaitTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type WaitTaskResponse struct { + // Result is the exit status of the task + Result *ExitResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // Err is set if any driver error occurred while waiting for the task + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitTaskResponse) Reset() { *m = WaitTaskResponse{} } +func (m *WaitTaskResponse) String() string { return proto.CompactTextString(m) } +func (*WaitTaskResponse) ProtoMessage() {} +func (*WaitTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{11} +} + +func (m *WaitTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitTaskResponse.Unmarshal(m, b) +} +func (m *WaitTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitTaskResponse.Marshal(b, m, deterministic) +} +func (m *WaitTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitTaskResponse.Merge(m, src) +} +func (m *WaitTaskResponse) XXX_Size() int { + return xxx_messageInfo_WaitTaskResponse.Size(m) +} +func (m *WaitTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WaitTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitTaskResponse proto.InternalMessageInfo + +func (m *WaitTaskResponse) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *WaitTaskResponse) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type StopTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Timeout defines the amount of time to wait before forcefully killing + // the task. For example, on Unix clients, this means sending a SIGKILL to + // the process. + Timeout *duration.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Signal can be set to override the Task's configured shutdown signal + Signal string `protobuf:"bytes,3,opt,name=signal,proto3" json:"signal,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopTaskRequest) Reset() { *m = StopTaskRequest{} } +func (m *StopTaskRequest) String() string { return proto.CompactTextString(m) } +func (*StopTaskRequest) ProtoMessage() {} +func (*StopTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{12} +} + +func (m *StopTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopTaskRequest.Unmarshal(m, b) +} +func (m *StopTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopTaskRequest.Marshal(b, m, deterministic) +} +func (m *StopTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopTaskRequest.Merge(m, src) +} +func (m *StopTaskRequest) XXX_Size() int { + return xxx_messageInfo_StopTaskRequest.Size(m) +} +func (m *StopTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopTaskRequest proto.InternalMessageInfo + +func (m *StopTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *StopTaskRequest) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *StopTaskRequest) GetSignal() string { + if m != nil { + return m.Signal + } + return "" +} + +type StopTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopTaskResponse) Reset() { *m = StopTaskResponse{} } +func (m *StopTaskResponse) String() string { return proto.CompactTextString(m) } +func (*StopTaskResponse) ProtoMessage() {} +func (*StopTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{13} +} + +func (m *StopTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopTaskResponse.Unmarshal(m, b) +} +func (m *StopTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopTaskResponse.Marshal(b, m, deterministic) +} +func (m *StopTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopTaskResponse.Merge(m, src) +} +func (m *StopTaskResponse) XXX_Size() int { + return xxx_messageInfo_StopTaskResponse.Size(m) +} +func (m *StopTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopTaskResponse proto.InternalMessageInfo + +type DestroyTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Force destroys the task even if it is still in a running state + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyTaskRequest) Reset() { *m = DestroyTaskRequest{} } +func (m *DestroyTaskRequest) String() string { return proto.CompactTextString(m) } +func (*DestroyTaskRequest) ProtoMessage() {} +func (*DestroyTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{14} +} + +func (m *DestroyTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyTaskRequest.Unmarshal(m, b) +} +func (m *DestroyTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyTaskRequest.Marshal(b, m, deterministic) +} +func (m *DestroyTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyTaskRequest.Merge(m, src) +} +func (m *DestroyTaskRequest) XXX_Size() int { + return xxx_messageInfo_DestroyTaskRequest.Size(m) +} +func (m *DestroyTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyTaskRequest proto.InternalMessageInfo + +func (m *DestroyTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *DestroyTaskRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +type DestroyTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyTaskResponse) Reset() { *m = DestroyTaskResponse{} } +func (m *DestroyTaskResponse) String() string { return proto.CompactTextString(m) } +func (*DestroyTaskResponse) ProtoMessage() {} +func (*DestroyTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{15} +} + +func (m *DestroyTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyTaskResponse.Unmarshal(m, b) +} +func (m *DestroyTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyTaskResponse.Marshal(b, m, deterministic) +} +func (m *DestroyTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyTaskResponse.Merge(m, src) +} +func (m *DestroyTaskResponse) XXX_Size() int { + return xxx_messageInfo_DestroyTaskResponse.Size(m) +} +func (m *DestroyTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyTaskResponse proto.InternalMessageInfo + +type InspectTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InspectTaskRequest) Reset() { *m = InspectTaskRequest{} } +func (m *InspectTaskRequest) String() string { return proto.CompactTextString(m) } +func (*InspectTaskRequest) ProtoMessage() {} +func (*InspectTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{16} +} + +func (m *InspectTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InspectTaskRequest.Unmarshal(m, b) +} +func (m *InspectTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InspectTaskRequest.Marshal(b, m, deterministic) +} +func (m *InspectTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InspectTaskRequest.Merge(m, src) +} +func (m *InspectTaskRequest) XXX_Size() int { + return xxx_messageInfo_InspectTaskRequest.Size(m) +} +func (m *InspectTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InspectTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InspectTaskRequest proto.InternalMessageInfo + +func (m *InspectTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type InspectTaskResponse struct { + // Task details + Task *TaskStatus `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + // Driver details for task + Driver *TaskDriverStatus `protobuf:"bytes,2,opt,name=driver,proto3" json:"driver,omitempty"` + // NetworkOverride info if set + NetworkOverride *NetworkOverride `protobuf:"bytes,3,opt,name=network_override,json=networkOverride,proto3" json:"network_override,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InspectTaskResponse) Reset() { *m = InspectTaskResponse{} } +func (m *InspectTaskResponse) String() string { return proto.CompactTextString(m) } +func (*InspectTaskResponse) ProtoMessage() {} +func (*InspectTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{17} +} + +func (m *InspectTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InspectTaskResponse.Unmarshal(m, b) +} +func (m *InspectTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InspectTaskResponse.Marshal(b, m, deterministic) +} +func (m *InspectTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InspectTaskResponse.Merge(m, src) +} +func (m *InspectTaskResponse) XXX_Size() int { + return xxx_messageInfo_InspectTaskResponse.Size(m) +} +func (m *InspectTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InspectTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InspectTaskResponse proto.InternalMessageInfo + +func (m *InspectTaskResponse) GetTask() *TaskStatus { + if m != nil { + return m.Task + } + return nil +} + +func (m *InspectTaskResponse) GetDriver() *TaskDriverStatus { + if m != nil { + return m.Driver + } + return nil +} + +func (m *InspectTaskResponse) GetNetworkOverride() *NetworkOverride { + if m != nil { + return m.NetworkOverride + } + return nil +} + +type TaskStatsRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // CollectionInterval is the interval at which to stream stats to the caller + CollectionInterval *duration.Duration `protobuf:"bytes,2,opt,name=collection_interval,json=collectionInterval,proto3" json:"collection_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStatsRequest) Reset() { *m = TaskStatsRequest{} } +func (m *TaskStatsRequest) String() string { return proto.CompactTextString(m) } +func (*TaskStatsRequest) ProtoMessage() {} +func (*TaskStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{18} +} + +func (m *TaskStatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStatsRequest.Unmarshal(m, b) +} +func (m *TaskStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStatsRequest.Marshal(b, m, deterministic) +} +func (m *TaskStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatsRequest.Merge(m, src) +} +func (m *TaskStatsRequest) XXX_Size() int { + return xxx_messageInfo_TaskStatsRequest.Size(m) +} +func (m *TaskStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatsRequest proto.InternalMessageInfo + +func (m *TaskStatsRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *TaskStatsRequest) GetCollectionInterval() *duration.Duration { + if m != nil { + return m.CollectionInterval + } + return nil +} + +type TaskStatsResponse struct { + // Stats for the task + Stats *TaskStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStatsResponse) Reset() { *m = TaskStatsResponse{} } +func (m *TaskStatsResponse) String() string { return proto.CompactTextString(m) } +func (*TaskStatsResponse) ProtoMessage() {} +func (*TaskStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{19} +} + +func (m *TaskStatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStatsResponse.Unmarshal(m, b) +} +func (m *TaskStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStatsResponse.Marshal(b, m, deterministic) +} +func (m *TaskStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatsResponse.Merge(m, src) +} +func (m *TaskStatsResponse) XXX_Size() int { + return xxx_messageInfo_TaskStatsResponse.Size(m) +} +func (m *TaskStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatsResponse proto.InternalMessageInfo + +func (m *TaskStatsResponse) GetStats() *TaskStats { + if m != nil { + return m.Stats + } + return nil +} + +type TaskEventsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskEventsRequest) Reset() { *m = TaskEventsRequest{} } +func (m *TaskEventsRequest) String() string { return proto.CompactTextString(m) } +func (*TaskEventsRequest) ProtoMessage() {} +func (*TaskEventsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{20} +} + +func (m *TaskEventsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskEventsRequest.Unmarshal(m, b) +} +func (m *TaskEventsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskEventsRequest.Marshal(b, m, deterministic) +} +func (m *TaskEventsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskEventsRequest.Merge(m, src) +} +func (m *TaskEventsRequest) XXX_Size() int { + return xxx_messageInfo_TaskEventsRequest.Size(m) +} +func (m *TaskEventsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskEventsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskEventsRequest proto.InternalMessageInfo + +type SignalTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Signal is the operating system signal to send to the task. Ex: SIGHUP + Signal string `protobuf:"bytes,2,opt,name=signal,proto3" json:"signal,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignalTaskRequest) Reset() { *m = SignalTaskRequest{} } +func (m *SignalTaskRequest) String() string { return proto.CompactTextString(m) } +func (*SignalTaskRequest) ProtoMessage() {} +func (*SignalTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{21} +} + +func (m *SignalTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignalTaskRequest.Unmarshal(m, b) +} +func (m *SignalTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignalTaskRequest.Marshal(b, m, deterministic) +} +func (m *SignalTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignalTaskRequest.Merge(m, src) +} +func (m *SignalTaskRequest) XXX_Size() int { + return xxx_messageInfo_SignalTaskRequest.Size(m) +} +func (m *SignalTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignalTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignalTaskRequest proto.InternalMessageInfo + +func (m *SignalTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *SignalTaskRequest) GetSignal() string { + if m != nil { + return m.Signal + } + return "" +} + +type SignalTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignalTaskResponse) Reset() { *m = SignalTaskResponse{} } +func (m *SignalTaskResponse) String() string { return proto.CompactTextString(m) } +func (*SignalTaskResponse) ProtoMessage() {} +func (*SignalTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{22} +} + +func (m *SignalTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignalTaskResponse.Unmarshal(m, b) +} +func (m *SignalTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignalTaskResponse.Marshal(b, m, deterministic) +} +func (m *SignalTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignalTaskResponse.Merge(m, src) +} +func (m *SignalTaskResponse) XXX_Size() int { + return xxx_messageInfo_SignalTaskResponse.Size(m) +} +func (m *SignalTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignalTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignalTaskResponse proto.InternalMessageInfo + +type ExecTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Command is the command to execute in the task environment + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + // Timeout is the amount of time to wait for the command to stop. + // Defaults to 0 (run forever) + Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskRequest) Reset() { *m = ExecTaskRequest{} } +func (m *ExecTaskRequest) String() string { return proto.CompactTextString(m) } +func (*ExecTaskRequest) ProtoMessage() {} +func (*ExecTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{23} +} + +func (m *ExecTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskRequest.Unmarshal(m, b) +} +func (m *ExecTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskRequest.Marshal(b, m, deterministic) +} +func (m *ExecTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskRequest.Merge(m, src) +} +func (m *ExecTaskRequest) XXX_Size() int { + return xxx_messageInfo_ExecTaskRequest.Size(m) +} +func (m *ExecTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskRequest proto.InternalMessageInfo + +func (m *ExecTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *ExecTaskRequest) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *ExecTaskRequest) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ExecTaskResponse struct { + // Stdout from the exec + Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Stderr from the exec + Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3" json:"stderr,omitempty"` + // Result from the exec + Result *ExitResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskResponse) Reset() { *m = ExecTaskResponse{} } +func (m *ExecTaskResponse) String() string { return proto.CompactTextString(m) } +func (*ExecTaskResponse) ProtoMessage() {} +func (*ExecTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{24} +} + +func (m *ExecTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskResponse.Unmarshal(m, b) +} +func (m *ExecTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskResponse.Marshal(b, m, deterministic) +} +func (m *ExecTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskResponse.Merge(m, src) +} +func (m *ExecTaskResponse) XXX_Size() int { + return xxx_messageInfo_ExecTaskResponse.Size(m) +} +func (m *ExecTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskResponse proto.InternalMessageInfo + +func (m *ExecTaskResponse) GetStdout() []byte { + if m != nil { + return m.Stdout + } + return nil +} + +func (m *ExecTaskResponse) GetStderr() []byte { + if m != nil { + return m.Stderr + } + return nil +} + +func (m *ExecTaskResponse) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +type ExecTaskStreamingIOOperation struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Close bool `protobuf:"varint,2,opt,name=close,proto3" json:"close,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskStreamingIOOperation) Reset() { *m = ExecTaskStreamingIOOperation{} } +func (m *ExecTaskStreamingIOOperation) String() string { return proto.CompactTextString(m) } +func (*ExecTaskStreamingIOOperation) ProtoMessage() {} +func (*ExecTaskStreamingIOOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{25} +} + +func (m *ExecTaskStreamingIOOperation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskStreamingIOOperation.Unmarshal(m, b) +} +func (m *ExecTaskStreamingIOOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskStreamingIOOperation.Marshal(b, m, deterministic) +} +func (m *ExecTaskStreamingIOOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskStreamingIOOperation.Merge(m, src) +} +func (m *ExecTaskStreamingIOOperation) XXX_Size() int { + return xxx_messageInfo_ExecTaskStreamingIOOperation.Size(m) +} +func (m *ExecTaskStreamingIOOperation) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskStreamingIOOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskStreamingIOOperation proto.InternalMessageInfo + +func (m *ExecTaskStreamingIOOperation) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ExecTaskStreamingIOOperation) GetClose() bool { + if m != nil { + return m.Close + } + return false +} + +type ExecTaskStreamingRequest struct { + Setup *ExecTaskStreamingRequest_Setup `protobuf:"bytes,1,opt,name=setup,proto3" json:"setup,omitempty"` + TtySize *ExecTaskStreamingRequest_TerminalSize `protobuf:"bytes,2,opt,name=tty_size,json=ttySize,proto3" json:"tty_size,omitempty"` + Stdin *ExecTaskStreamingIOOperation `protobuf:"bytes,3,opt,name=stdin,proto3" json:"stdin,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskStreamingRequest) Reset() { *m = ExecTaskStreamingRequest{} } +func (m *ExecTaskStreamingRequest) String() string { return proto.CompactTextString(m) } +func (*ExecTaskStreamingRequest) ProtoMessage() {} +func (*ExecTaskStreamingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{26} +} + +func (m *ExecTaskStreamingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskStreamingRequest.Unmarshal(m, b) +} +func (m *ExecTaskStreamingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskStreamingRequest.Marshal(b, m, deterministic) +} +func (m *ExecTaskStreamingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskStreamingRequest.Merge(m, src) +} +func (m *ExecTaskStreamingRequest) XXX_Size() int { + return xxx_messageInfo_ExecTaskStreamingRequest.Size(m) +} +func (m *ExecTaskStreamingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskStreamingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskStreamingRequest proto.InternalMessageInfo + +func (m *ExecTaskStreamingRequest) GetSetup() *ExecTaskStreamingRequest_Setup { + if m != nil { + return m.Setup + } + return nil +} + +func (m *ExecTaskStreamingRequest) GetTtySize() *ExecTaskStreamingRequest_TerminalSize { + if m != nil { + return m.TtySize + } + return nil +} + +func (m *ExecTaskStreamingRequest) GetStdin() *ExecTaskStreamingIOOperation { + if m != nil { + return m.Stdin + } + return nil +} + +type ExecTaskStreamingRequest_Setup struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + Tty bool `protobuf:"varint,3,opt,name=tty,proto3" json:"tty,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskStreamingRequest_Setup) Reset() { *m = ExecTaskStreamingRequest_Setup{} } +func (m *ExecTaskStreamingRequest_Setup) String() string { return proto.CompactTextString(m) } +func (*ExecTaskStreamingRequest_Setup) ProtoMessage() {} +func (*ExecTaskStreamingRequest_Setup) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{26, 0} +} + +func (m *ExecTaskStreamingRequest_Setup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskStreamingRequest_Setup.Unmarshal(m, b) +} +func (m *ExecTaskStreamingRequest_Setup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskStreamingRequest_Setup.Marshal(b, m, deterministic) +} +func (m *ExecTaskStreamingRequest_Setup) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskStreamingRequest_Setup.Merge(m, src) +} +func (m *ExecTaskStreamingRequest_Setup) XXX_Size() int { + return xxx_messageInfo_ExecTaskStreamingRequest_Setup.Size(m) +} +func (m *ExecTaskStreamingRequest_Setup) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskStreamingRequest_Setup.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskStreamingRequest_Setup proto.InternalMessageInfo + +func (m *ExecTaskStreamingRequest_Setup) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *ExecTaskStreamingRequest_Setup) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *ExecTaskStreamingRequest_Setup) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +type ExecTaskStreamingRequest_TerminalSize struct { + Height int32 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Width int32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskStreamingRequest_TerminalSize) Reset() { *m = ExecTaskStreamingRequest_TerminalSize{} } +func (m *ExecTaskStreamingRequest_TerminalSize) String() string { return proto.CompactTextString(m) } +func (*ExecTaskStreamingRequest_TerminalSize) ProtoMessage() {} +func (*ExecTaskStreamingRequest_TerminalSize) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{26, 1} +} + +func (m *ExecTaskStreamingRequest_TerminalSize) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskStreamingRequest_TerminalSize.Unmarshal(m, b) +} +func (m *ExecTaskStreamingRequest_TerminalSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskStreamingRequest_TerminalSize.Marshal(b, m, deterministic) +} +func (m *ExecTaskStreamingRequest_TerminalSize) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskStreamingRequest_TerminalSize.Merge(m, src) +} +func (m *ExecTaskStreamingRequest_TerminalSize) XXX_Size() int { + return xxx_messageInfo_ExecTaskStreamingRequest_TerminalSize.Size(m) +} +func (m *ExecTaskStreamingRequest_TerminalSize) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskStreamingRequest_TerminalSize.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskStreamingRequest_TerminalSize proto.InternalMessageInfo + +func (m *ExecTaskStreamingRequest_TerminalSize) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ExecTaskStreamingRequest_TerminalSize) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +type ExecTaskStreamingResponse struct { + Stdout *ExecTaskStreamingIOOperation `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr *ExecTaskStreamingIOOperation `protobuf:"bytes,2,opt,name=stderr,proto3" json:"stderr,omitempty"` + Exited bool `protobuf:"varint,3,opt,name=exited,proto3" json:"exited,omitempty"` + Result *ExitResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskStreamingResponse) Reset() { *m = ExecTaskStreamingResponse{} } +func (m *ExecTaskStreamingResponse) String() string { return proto.CompactTextString(m) } +func (*ExecTaskStreamingResponse) ProtoMessage() {} +func (*ExecTaskStreamingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{27} +} + +func (m *ExecTaskStreamingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskStreamingResponse.Unmarshal(m, b) +} +func (m *ExecTaskStreamingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskStreamingResponse.Marshal(b, m, deterministic) +} +func (m *ExecTaskStreamingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskStreamingResponse.Merge(m, src) +} +func (m *ExecTaskStreamingResponse) XXX_Size() int { + return xxx_messageInfo_ExecTaskStreamingResponse.Size(m) +} +func (m *ExecTaskStreamingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskStreamingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskStreamingResponse proto.InternalMessageInfo + +func (m *ExecTaskStreamingResponse) GetStdout() *ExecTaskStreamingIOOperation { + if m != nil { + return m.Stdout + } + return nil +} + +func (m *ExecTaskStreamingResponse) GetStderr() *ExecTaskStreamingIOOperation { + if m != nil { + return m.Stderr + } + return nil +} + +func (m *ExecTaskStreamingResponse) GetExited() bool { + if m != nil { + return m.Exited + } + return false +} + +func (m *ExecTaskStreamingResponse) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +type CreateNetworkRequest struct { + // AllocID of the allocation the network is associated with + AllocId string `protobuf:"bytes,1,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"` + // Hostname of the network namespace + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} } +func (m *CreateNetworkRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNetworkRequest) ProtoMessage() {} +func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{28} +} + +func (m *CreateNetworkRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNetworkRequest.Unmarshal(m, b) +} +func (m *CreateNetworkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNetworkRequest.Marshal(b, m, deterministic) +} +func (m *CreateNetworkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNetworkRequest.Merge(m, src) +} +func (m *CreateNetworkRequest) XXX_Size() int { + return xxx_messageInfo_CreateNetworkRequest.Size(m) +} +func (m *CreateNetworkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNetworkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNetworkRequest proto.InternalMessageInfo + +func (m *CreateNetworkRequest) GetAllocId() string { + if m != nil { + return m.AllocId + } + return "" +} + +func (m *CreateNetworkRequest) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +type CreateNetworkResponse struct { + IsolationSpec *NetworkIsolationSpec `protobuf:"bytes,1,opt,name=isolation_spec,json=isolationSpec,proto3" json:"isolation_spec,omitempty"` + // created indicates that the network namespace is newly created + // as a result of this request. if false, the NetworkIsolationSpec + // value returned is an existing spec. + Created bool `protobuf:"varint,2,opt,name=created,proto3" json:"created,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNetworkResponse) Reset() { *m = CreateNetworkResponse{} } +func (m *CreateNetworkResponse) String() string { return proto.CompactTextString(m) } +func (*CreateNetworkResponse) ProtoMessage() {} +func (*CreateNetworkResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{29} +} + +func (m *CreateNetworkResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNetworkResponse.Unmarshal(m, b) +} +func (m *CreateNetworkResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNetworkResponse.Marshal(b, m, deterministic) +} +func (m *CreateNetworkResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNetworkResponse.Merge(m, src) +} +func (m *CreateNetworkResponse) XXX_Size() int { + return xxx_messageInfo_CreateNetworkResponse.Size(m) +} +func (m *CreateNetworkResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNetworkResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNetworkResponse proto.InternalMessageInfo + +func (m *CreateNetworkResponse) GetIsolationSpec() *NetworkIsolationSpec { + if m != nil { + return m.IsolationSpec + } + return nil +} + +func (m *CreateNetworkResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +type DestroyNetworkRequest struct { + // AllocID of the allocation the network is associated with + AllocId string `protobuf:"bytes,1,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"` + IsolationSpec *NetworkIsolationSpec `protobuf:"bytes,2,opt,name=isolation_spec,json=isolationSpec,proto3" json:"isolation_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyNetworkRequest) Reset() { *m = DestroyNetworkRequest{} } +func (m *DestroyNetworkRequest) String() string { return proto.CompactTextString(m) } +func (*DestroyNetworkRequest) ProtoMessage() {} +func (*DestroyNetworkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{30} +} + +func (m *DestroyNetworkRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyNetworkRequest.Unmarshal(m, b) +} +func (m *DestroyNetworkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyNetworkRequest.Marshal(b, m, deterministic) +} +func (m *DestroyNetworkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyNetworkRequest.Merge(m, src) +} +func (m *DestroyNetworkRequest) XXX_Size() int { + return xxx_messageInfo_DestroyNetworkRequest.Size(m) +} +func (m *DestroyNetworkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyNetworkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyNetworkRequest proto.InternalMessageInfo + +func (m *DestroyNetworkRequest) GetAllocId() string { + if m != nil { + return m.AllocId + } + return "" +} + +func (m *DestroyNetworkRequest) GetIsolationSpec() *NetworkIsolationSpec { + if m != nil { + return m.IsolationSpec + } + return nil +} + +type DestroyNetworkResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyNetworkResponse) Reset() { *m = DestroyNetworkResponse{} } +func (m *DestroyNetworkResponse) String() string { return proto.CompactTextString(m) } +func (*DestroyNetworkResponse) ProtoMessage() {} +func (*DestroyNetworkResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{31} +} + +func (m *DestroyNetworkResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyNetworkResponse.Unmarshal(m, b) +} +func (m *DestroyNetworkResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyNetworkResponse.Marshal(b, m, deterministic) +} +func (m *DestroyNetworkResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyNetworkResponse.Merge(m, src) +} +func (m *DestroyNetworkResponse) XXX_Size() int { + return xxx_messageInfo_DestroyNetworkResponse.Size(m) +} +func (m *DestroyNetworkResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyNetworkResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyNetworkResponse proto.InternalMessageInfo + +type DriverCapabilities struct { + // SendSignals indicates that the driver can send process signals (ex. SIGUSR1) + // to the task. + SendSignals bool `protobuf:"varint,1,opt,name=send_signals,json=sendSignals,proto3" json:"send_signals,omitempty"` + // Exec indicates that the driver supports executing arbitrary commands + // in the task's execution environment. + Exec bool `protobuf:"varint,2,opt,name=exec,proto3" json:"exec,omitempty"` + // FsIsolation indicates what kind of filesystem isolation a driver supports. + FsIsolation DriverCapabilities_FSIsolation `protobuf:"varint,3,opt,name=fs_isolation,json=fsIsolation,proto3,enum=hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_FSIsolation" json:"fs_isolation,omitempty"` + NetworkIsolationModes []NetworkIsolationSpec_NetworkIsolationMode `protobuf:"varint,4,rep,packed,name=network_isolation_modes,json=networkIsolationModes,proto3,enum=hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec_NetworkIsolationMode" json:"network_isolation_modes,omitempty"` + MustCreateNetwork bool `protobuf:"varint,5,opt,name=must_create_network,json=mustCreateNetwork,proto3" json:"must_create_network,omitempty"` + // MountConfigs indicates whether the driver supports mount configurations. + MountConfigs DriverCapabilities_MountConfigs `protobuf:"varint,6,opt,name=mount_configs,json=mountConfigs,proto3,enum=hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_MountConfigs" json:"mount_configs,omitempty"` + // disable_log_collection indicates whether the driver has the capability of + // disabling log collection + DisableLogCollection bool `protobuf:"varint,8,opt,name=disable_log_collection,json=disableLogCollection,proto3" json:"disable_log_collection,omitempty"` + // dynamic_workload_users indicates the task is capable of using UID/GID + // assigned from the Nomad client as user credentials for the task. + DynamicWorkloadUsers bool `protobuf:"varint,9,opt,name=dynamic_workload_users,json=dynamicWorkloadUsers,proto3" json:"dynamic_workload_users,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DriverCapabilities) Reset() { *m = DriverCapabilities{} } +func (m *DriverCapabilities) String() string { return proto.CompactTextString(m) } +func (*DriverCapabilities) ProtoMessage() {} +func (*DriverCapabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{32} +} + +func (m *DriverCapabilities) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DriverCapabilities.Unmarshal(m, b) +} +func (m *DriverCapabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DriverCapabilities.Marshal(b, m, deterministic) +} +func (m *DriverCapabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_DriverCapabilities.Merge(m, src) +} +func (m *DriverCapabilities) XXX_Size() int { + return xxx_messageInfo_DriverCapabilities.Size(m) +} +func (m *DriverCapabilities) XXX_DiscardUnknown() { + xxx_messageInfo_DriverCapabilities.DiscardUnknown(m) +} + +var xxx_messageInfo_DriverCapabilities proto.InternalMessageInfo + +func (m *DriverCapabilities) GetSendSignals() bool { + if m != nil { + return m.SendSignals + } + return false +} + +func (m *DriverCapabilities) GetExec() bool { + if m != nil { + return m.Exec + } + return false +} + +func (m *DriverCapabilities) GetFsIsolation() DriverCapabilities_FSIsolation { + if m != nil { + return m.FsIsolation + } + return DriverCapabilities_NONE +} + +func (m *DriverCapabilities) GetNetworkIsolationModes() []NetworkIsolationSpec_NetworkIsolationMode { + if m != nil { + return m.NetworkIsolationModes + } + return nil +} + +func (m *DriverCapabilities) GetMustCreateNetwork() bool { + if m != nil { + return m.MustCreateNetwork + } + return false +} + +func (m *DriverCapabilities) GetMountConfigs() DriverCapabilities_MountConfigs { + if m != nil { + return m.MountConfigs + } + return DriverCapabilities_UNKNOWN_MOUNTS +} + +func (m *DriverCapabilities) GetDisableLogCollection() bool { + if m != nil { + return m.DisableLogCollection + } + return false +} + +func (m *DriverCapabilities) GetDynamicWorkloadUsers() bool { + if m != nil { + return m.DynamicWorkloadUsers + } + return false +} + +type NetworkIsolationSpec struct { + Mode NetworkIsolationSpec_NetworkIsolationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec_NetworkIsolationMode" json:"mode,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + HostsConfig *HostsConfig `protobuf:"bytes,4,opt,name=hostsConfig,proto3" json:"hostsConfig,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkIsolationSpec) Reset() { *m = NetworkIsolationSpec{} } +func (m *NetworkIsolationSpec) String() string { return proto.CompactTextString(m) } +func (*NetworkIsolationSpec) ProtoMessage() {} +func (*NetworkIsolationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{33} +} + +func (m *NetworkIsolationSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkIsolationSpec.Unmarshal(m, b) +} +func (m *NetworkIsolationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkIsolationSpec.Marshal(b, m, deterministic) +} +func (m *NetworkIsolationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkIsolationSpec.Merge(m, src) +} +func (m *NetworkIsolationSpec) XXX_Size() int { + return xxx_messageInfo_NetworkIsolationSpec.Size(m) +} +func (m *NetworkIsolationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkIsolationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkIsolationSpec proto.InternalMessageInfo + +func (m *NetworkIsolationSpec) GetMode() NetworkIsolationSpec_NetworkIsolationMode { + if m != nil { + return m.Mode + } + return NetworkIsolationSpec_HOST +} + +func (m *NetworkIsolationSpec) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *NetworkIsolationSpec) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NetworkIsolationSpec) GetHostsConfig() *HostsConfig { + if m != nil { + return m.HostsConfig + } + return nil +} + +type HostsConfig struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HostsConfig) Reset() { *m = HostsConfig{} } +func (m *HostsConfig) String() string { return proto.CompactTextString(m) } +func (*HostsConfig) ProtoMessage() {} +func (*HostsConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{34} +} + +func (m *HostsConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HostsConfig.Unmarshal(m, b) +} +func (m *HostsConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HostsConfig.Marshal(b, m, deterministic) +} +func (m *HostsConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostsConfig.Merge(m, src) +} +func (m *HostsConfig) XXX_Size() int { + return xxx_messageInfo_HostsConfig.Size(m) +} +func (m *HostsConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HostsConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HostsConfig proto.InternalMessageInfo + +func (m *HostsConfig) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *HostsConfig) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +type DNSConfig struct { + Servers []string `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + Searches []string `protobuf:"bytes,2,rep,name=searches,proto3" json:"searches,omitempty"` + Options []string `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DNSConfig) Reset() { *m = DNSConfig{} } +func (m *DNSConfig) String() string { return proto.CompactTextString(m) } +func (*DNSConfig) ProtoMessage() {} +func (*DNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{35} +} + +func (m *DNSConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DNSConfig.Unmarshal(m, b) +} +func (m *DNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DNSConfig.Marshal(b, m, deterministic) +} +func (m *DNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNSConfig.Merge(m, src) +} +func (m *DNSConfig) XXX_Size() int { + return xxx_messageInfo_DNSConfig.Size(m) +} +func (m *DNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DNSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DNSConfig proto.InternalMessageInfo + +func (m *DNSConfig) GetServers() []string { + if m != nil { + return m.Servers + } + return nil +} + +func (m *DNSConfig) GetSearches() []string { + if m != nil { + return m.Searches + } + return nil +} + +func (m *DNSConfig) GetOptions() []string { + if m != nil { + return m.Options + } + return nil +} + +type TaskConfig struct { + // Id of the task, recommended to the globally unique, must be unique to the driver. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Name of the task + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // MsgpackDriverConfig is the encoded driver configuation of the task + MsgpackDriverConfig []byte `protobuf:"bytes,3,opt,name=msgpack_driver_config,json=msgpackDriverConfig,proto3" json:"msgpack_driver_config,omitempty"` + // Env is the a set of key/value pairs to be set as environment variables + Env map[string]string `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // DeviceEnv is the set of environment variables that are defined by device + // plugins. This allows the driver to differentiate environment variables + // set by the device plugins and those by the user. When populating the + // task's environment env should be used. + DeviceEnv map[string]string `protobuf:"bytes,5,rep,name=device_env,json=deviceEnv,proto3" json:"device_env,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Resources defines the resources to isolate + Resources *Resources `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources,omitempty"` + // Mounts is a list of targets to bind mount into the task directory + Mounts []*Mount `protobuf:"bytes,7,rep,name=mounts,proto3" json:"mounts,omitempty"` + // Devices is a list of system devices to mount into the task's execution + // environment. + Devices []*Device `protobuf:"bytes,8,rep,name=devices,proto3" json:"devices,omitempty"` + // User defines the operating system user the tasks should run as + User string `protobuf:"bytes,9,opt,name=user,proto3" json:"user,omitempty"` + // AllocDir is the directory on the host where the allocation directory + // exists. + AllocDir string `protobuf:"bytes,10,opt,name=alloc_dir,json=allocDir,proto3" json:"alloc_dir,omitempty"` + // StdoutPath is the path to the file to open and write task stdout to + StdoutPath string `protobuf:"bytes,11,opt,name=stdout_path,json=stdoutPath,proto3" json:"stdout_path,omitempty"` + // StderrPath is the path to the file to open and write task stderr to + StderrPath string `protobuf:"bytes,12,opt,name=stderr_path,json=stderrPath,proto3" json:"stderr_path,omitempty"` + // TaskGroupName is the name of the task group which this task is a member of + TaskGroupName string `protobuf:"bytes,13,opt,name=task_group_name,json=taskGroupName,proto3" json:"task_group_name,omitempty"` + // JobName is the name of the job of which this task is part of + JobName string `protobuf:"bytes,14,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` + // AllocId is the ID of the associated allocation + AllocId string `protobuf:"bytes,15,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"` + // NetworkIsolationSpec specifies the configuration for the network namespace + // to use for the task. *Only supported on Linux + NetworkIsolationSpec *NetworkIsolationSpec `protobuf:"bytes,16,opt,name=network_isolation_spec,json=networkIsolationSpec,proto3" json:"network_isolation_spec,omitempty"` + // DNSConfig is the configuration for task DNS resolvers and other options + Dns *DNSConfig `protobuf:"bytes,17,opt,name=dns,proto3" json:"dns,omitempty"` + // JobId is the ID of the job of which this task is part of + JobId string `protobuf:"bytes,18,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + // Namespace is the namespace of the job of which this task is part of + Namespace string `protobuf:"bytes,19,opt,name=namespace,proto3" json:"namespace,omitempty"` + // NodeName is the name of the node where the associated allocation is running + NodeName string `protobuf:"bytes,20,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // NodeId is the ID of the node where the associated allocation is running + NodeId string `protobuf:"bytes,21,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // ParentJobID is the parent id for dispatch and periodic jobs + ParentJobId string `protobuf:"bytes,22,opt,name=parent_job_id,json=parentJobId,proto3" json:"parent_job_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskConfig) Reset() { *m = TaskConfig{} } +func (m *TaskConfig) String() string { return proto.CompactTextString(m) } +func (*TaskConfig) ProtoMessage() {} +func (*TaskConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{36} +} + +func (m *TaskConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskConfig.Unmarshal(m, b) +} +func (m *TaskConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskConfig.Marshal(b, m, deterministic) +} +func (m *TaskConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskConfig.Merge(m, src) +} +func (m *TaskConfig) XXX_Size() int { + return xxx_messageInfo_TaskConfig.Size(m) +} +func (m *TaskConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TaskConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskConfig proto.InternalMessageInfo + +func (m *TaskConfig) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TaskConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TaskConfig) GetMsgpackDriverConfig() []byte { + if m != nil { + return m.MsgpackDriverConfig + } + return nil +} + +func (m *TaskConfig) GetEnv() map[string]string { + if m != nil { + return m.Env + } + return nil +} + +func (m *TaskConfig) GetDeviceEnv() map[string]string { + if m != nil { + return m.DeviceEnv + } + return nil +} + +func (m *TaskConfig) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *TaskConfig) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *TaskConfig) GetDevices() []*Device { + if m != nil { + return m.Devices + } + return nil +} + +func (m *TaskConfig) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *TaskConfig) GetAllocDir() string { + if m != nil { + return m.AllocDir + } + return "" +} + +func (m *TaskConfig) GetStdoutPath() string { + if m != nil { + return m.StdoutPath + } + return "" +} + +func (m *TaskConfig) GetStderrPath() string { + if m != nil { + return m.StderrPath + } + return "" +} + +func (m *TaskConfig) GetTaskGroupName() string { + if m != nil { + return m.TaskGroupName + } + return "" +} + +func (m *TaskConfig) GetJobName() string { + if m != nil { + return m.JobName + } + return "" +} + +func (m *TaskConfig) GetAllocId() string { + if m != nil { + return m.AllocId + } + return "" +} + +func (m *TaskConfig) GetNetworkIsolationSpec() *NetworkIsolationSpec { + if m != nil { + return m.NetworkIsolationSpec + } + return nil +} + +func (m *TaskConfig) GetDns() *DNSConfig { + if m != nil { + return m.Dns + } + return nil +} + +func (m *TaskConfig) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *TaskConfig) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *TaskConfig) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *TaskConfig) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *TaskConfig) GetParentJobId() string { + if m != nil { + return m.ParentJobId + } + return "" +} + +type Resources struct { + // AllocatedResources are the resources set for the task + AllocatedResources *AllocatedTaskResources `protobuf:"bytes,1,opt,name=allocated_resources,json=allocatedResources,proto3" json:"allocated_resources,omitempty"` + // LinuxResources are the computed values to set for specific Linux features + LinuxResources *LinuxResources `protobuf:"bytes,2,opt,name=linux_resources,json=linuxResources,proto3" json:"linux_resources,omitempty"` + // Ports are the allocated port mappings for the allocation. + // A task may use these to manually configure port mapping if shared network namespaces aren't being used. + Ports []*PortMapping `protobuf:"bytes,3,rep,name=ports,proto3" json:"ports,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{37} +} + +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (m *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(m, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetAllocatedResources() *AllocatedTaskResources { + if m != nil { + return m.AllocatedResources + } + return nil +} + +func (m *Resources) GetLinuxResources() *LinuxResources { + if m != nil { + return m.LinuxResources + } + return nil +} + +func (m *Resources) GetPorts() []*PortMapping { + if m != nil { + return m.Ports + } + return nil +} + +type AllocatedTaskResources struct { + Cpu *AllocatedCpuResources `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *AllocatedMemoryResources `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + Networks []*NetworkResource `protobuf:"bytes,5,rep,name=networks,proto3" json:"networks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatedTaskResources) Reset() { *m = AllocatedTaskResources{} } +func (m *AllocatedTaskResources) String() string { return proto.CompactTextString(m) } +func (*AllocatedTaskResources) ProtoMessage() {} +func (*AllocatedTaskResources) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{38} +} + +func (m *AllocatedTaskResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatedTaskResources.Unmarshal(m, b) +} +func (m *AllocatedTaskResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatedTaskResources.Marshal(b, m, deterministic) +} +func (m *AllocatedTaskResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedTaskResources.Merge(m, src) +} +func (m *AllocatedTaskResources) XXX_Size() int { + return xxx_messageInfo_AllocatedTaskResources.Size(m) +} +func (m *AllocatedTaskResources) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedTaskResources.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedTaskResources proto.InternalMessageInfo + +func (m *AllocatedTaskResources) GetCpu() *AllocatedCpuResources { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *AllocatedTaskResources) GetMemory() *AllocatedMemoryResources { + if m != nil { + return m.Memory + } + return nil +} + +func (m *AllocatedTaskResources) GetNetworks() []*NetworkResource { + if m != nil { + return m.Networks + } + return nil +} + +type AllocatedCpuResources struct { + CpuShares int64 `protobuf:"varint,1,opt,name=cpu_shares,json=cpuShares,proto3" json:"cpu_shares,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatedCpuResources) Reset() { *m = AllocatedCpuResources{} } +func (m *AllocatedCpuResources) String() string { return proto.CompactTextString(m) } +func (*AllocatedCpuResources) ProtoMessage() {} +func (*AllocatedCpuResources) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{39} +} + +func (m *AllocatedCpuResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatedCpuResources.Unmarshal(m, b) +} +func (m *AllocatedCpuResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatedCpuResources.Marshal(b, m, deterministic) +} +func (m *AllocatedCpuResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedCpuResources.Merge(m, src) +} +func (m *AllocatedCpuResources) XXX_Size() int { + return xxx_messageInfo_AllocatedCpuResources.Size(m) +} +func (m *AllocatedCpuResources) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedCpuResources.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedCpuResources proto.InternalMessageInfo + +func (m *AllocatedCpuResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +type AllocatedMemoryResources struct { + MemoryMb int64 `protobuf:"varint,2,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb,omitempty"` + MemoryMaxMb int64 `protobuf:"varint,3,opt,name=memory_max_mb,json=memoryMaxMb,proto3" json:"memory_max_mb,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatedMemoryResources) Reset() { *m = AllocatedMemoryResources{} } +func (m *AllocatedMemoryResources) String() string { return proto.CompactTextString(m) } +func (*AllocatedMemoryResources) ProtoMessage() {} +func (*AllocatedMemoryResources) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{40} +} + +func (m *AllocatedMemoryResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatedMemoryResources.Unmarshal(m, b) +} +func (m *AllocatedMemoryResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatedMemoryResources.Marshal(b, m, deterministic) +} +func (m *AllocatedMemoryResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedMemoryResources.Merge(m, src) +} +func (m *AllocatedMemoryResources) XXX_Size() int { + return xxx_messageInfo_AllocatedMemoryResources.Size(m) +} +func (m *AllocatedMemoryResources) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedMemoryResources.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedMemoryResources proto.InternalMessageInfo + +func (m *AllocatedMemoryResources) GetMemoryMb() int64 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *AllocatedMemoryResources) GetMemoryMaxMb() int64 { + if m != nil { + return m.MemoryMaxMb + } + return 0 +} + +type NetworkResource struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + Cidr string `protobuf:"bytes,2,opt,name=cidr,proto3" json:"cidr,omitempty"` + Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` + Mbits int32 `protobuf:"varint,4,opt,name=mbits,proto3" json:"mbits,omitempty"` + ReservedPorts []*NetworkPort `protobuf:"bytes,5,rep,name=reserved_ports,json=reservedPorts,proto3" json:"reserved_ports,omitempty"` + DynamicPorts []*NetworkPort `protobuf:"bytes,6,rep,name=dynamic_ports,json=dynamicPorts,proto3" json:"dynamic_ports,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkResource) Reset() { *m = NetworkResource{} } +func (m *NetworkResource) String() string { return proto.CompactTextString(m) } +func (*NetworkResource) ProtoMessage() {} +func (*NetworkResource) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{41} +} + +func (m *NetworkResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkResource.Unmarshal(m, b) +} +func (m *NetworkResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkResource.Marshal(b, m, deterministic) +} +func (m *NetworkResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkResource.Merge(m, src) +} +func (m *NetworkResource) XXX_Size() int { + return xxx_messageInfo_NetworkResource.Size(m) +} +func (m *NetworkResource) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkResource.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkResource proto.InternalMessageInfo + +func (m *NetworkResource) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *NetworkResource) GetCidr() string { + if m != nil { + return m.Cidr + } + return "" +} + +func (m *NetworkResource) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *NetworkResource) GetMbits() int32 { + if m != nil { + return m.Mbits + } + return 0 +} + +func (m *NetworkResource) GetReservedPorts() []*NetworkPort { + if m != nil { + return m.ReservedPorts + } + return nil +} + +func (m *NetworkResource) GetDynamicPorts() []*NetworkPort { + if m != nil { + return m.DynamicPorts + } + return nil +} + +type NetworkPort struct { + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkPort) Reset() { *m = NetworkPort{} } +func (m *NetworkPort) String() string { return proto.CompactTextString(m) } +func (*NetworkPort) ProtoMessage() {} +func (*NetworkPort) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{42} +} + +func (m *NetworkPort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkPort.Unmarshal(m, b) +} +func (m *NetworkPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkPort.Marshal(b, m, deterministic) +} +func (m *NetworkPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPort.Merge(m, src) +} +func (m *NetworkPort) XXX_Size() int { + return xxx_messageInfo_NetworkPort.Size(m) +} +func (m *NetworkPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPort proto.InternalMessageInfo + +func (m *NetworkPort) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *NetworkPort) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +type PortMapping struct { + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + To int32 `protobuf:"varint,3,opt,name=to,proto3" json:"to,omitempty"` + HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortMapping) Reset() { *m = PortMapping{} } +func (m *PortMapping) String() string { return proto.CompactTextString(m) } +func (*PortMapping) ProtoMessage() {} +func (*PortMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{43} +} + +func (m *PortMapping) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PortMapping.Unmarshal(m, b) +} +func (m *PortMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PortMapping.Marshal(b, m, deterministic) +} +func (m *PortMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortMapping.Merge(m, src) +} +func (m *PortMapping) XXX_Size() int { + return xxx_messageInfo_PortMapping.Size(m) +} +func (m *PortMapping) XXX_DiscardUnknown() { + xxx_messageInfo_PortMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_PortMapping proto.InternalMessageInfo + +func (m *PortMapping) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *PortMapping) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *PortMapping) GetTo() int32 { + if m != nil { + return m.To + } + return 0 +} + +func (m *PortMapping) GetHostIp() string { + if m != nil { + return m.HostIp + } + return "" +} + +type LinuxResources struct { + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified) + CpuPeriod int64 `protobuf:"varint,1,opt,name=cpu_period,json=cpuPeriod,proto3" json:"cpu_period,omitempty"` + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified) + CpuQuota int64 `protobuf:"varint,2,opt,name=cpu_quota,json=cpuQuota,proto3" json:"cpu_quota,omitempty"` + // CPU shares (relative weight vs. other containers). Default: 0 (not specified) + CpuShares int64 `protobuf:"varint,3,opt,name=cpu_shares,json=cpuShares,proto3" json:"cpu_shares,omitempty"` + // Memory limit in bytes. Default: 0 (not specified) + MemoryLimitBytes int64 `protobuf:"varint,4,opt,name=memory_limit_bytes,json=memoryLimitBytes,proto3" json:"memory_limit_bytes,omitempty"` + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified) + OomScoreAdj int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj,proto3" json:"oom_score_adj,omitempty"` + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified) + // This field exists to support drivers which can't set a cgroup path. + CpusetCpus string `protobuf:"bytes,6,opt,name=cpuset_cpus,json=cpusetCpus,proto3" json:"cpuset_cpus,omitempty"` + // CpusetCgroup is the path to the cpuset cgroup managed by the client + CpusetCgroup string `protobuf:"bytes,9,opt,name=cpuset_cgroup,json=cpusetCgroup,proto3" json:"cpuset_cgroup,omitempty"` + // PercentTicks is a compatibility option for docker and should not be used + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + PercentTicks float64 `protobuf:"fixed64,8,opt,name=PercentTicks,proto3" json:"PercentTicks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxResources) Reset() { *m = LinuxResources{} } +func (m *LinuxResources) String() string { return proto.CompactTextString(m) } +func (*LinuxResources) ProtoMessage() {} +func (*LinuxResources) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{44} +} + +func (m *LinuxResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LinuxResources.Unmarshal(m, b) +} +func (m *LinuxResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LinuxResources.Marshal(b, m, deterministic) +} +func (m *LinuxResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxResources.Merge(m, src) +} +func (m *LinuxResources) XXX_Size() int { + return xxx_messageInfo_LinuxResources.Size(m) +} +func (m *LinuxResources) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxResources.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxResources proto.InternalMessageInfo + +func (m *LinuxResources) GetCpuPeriod() int64 { + if m != nil { + return m.CpuPeriod + } + return 0 +} + +func (m *LinuxResources) GetCpuQuota() int64 { + if m != nil { + return m.CpuQuota + } + return 0 +} + +func (m *LinuxResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *LinuxResources) GetMemoryLimitBytes() int64 { + if m != nil { + return m.MemoryLimitBytes + } + return 0 +} + +func (m *LinuxResources) GetOomScoreAdj() int64 { + if m != nil { + return m.OomScoreAdj + } + return 0 +} + +func (m *LinuxResources) GetCpusetCpus() string { + if m != nil { + return m.CpusetCpus + } + return "" +} + +func (m *LinuxResources) GetCpusetCgroup() string { + if m != nil { + return m.CpusetCgroup + } + return "" +} + +func (m *LinuxResources) GetPercentTicks() float64 { + if m != nil { + return m.PercentTicks + } + return 0 +} + +type Mount struct { + // TaskPath is the file path within the task directory to mount to + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // HostPath is the file path on the host to mount from + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // Readonly if set true, mounts the path in readonly mode + Readonly bool `protobuf:"varint,3,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Propagation mode for the mount. Not exactly the same as the unix mount + // propagation flags. See callsite usage for details. + PropagationMode string `protobuf:"bytes,4,opt,name=propagation_mode,json=propagationMode,proto3" json:"propagation_mode,omitempty"` + SelinuxLabel string `protobuf:"bytes,5,opt,name=selinux_label,json=selinuxLabel,proto3" json:"selinux_label,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{45} +} + +func (m *Mount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mount.Unmarshal(m, b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mount.Marshal(b, m, deterministic) +} +func (m *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(m, src) +} +func (m *Mount) XXX_Size() int { + return xxx_messageInfo_Mount.Size(m) +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) +} + +var xxx_messageInfo_Mount proto.InternalMessageInfo + +func (m *Mount) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *Mount) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *Mount) GetPropagationMode() string { + if m != nil { + return m.PropagationMode + } + return "" +} + +func (m *Mount) GetSelinuxLabel() string { + if m != nil { + return m.SelinuxLabel + } + return "" +} + +type Device struct { + // TaskPath is the file path within the task to mount the device to + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // HostPath is the path on the host to the source device + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // CgroupPermissions defines the Cgroup permissions of the device. + // One or more of the following options can be set: + // - r - allows the task to read from the specified device. + // - w - allows the task to write to the specified device. + // - m - allows the task to create device files that do not yet exist. + // + // Example: "rw" + CgroupPermissions string `protobuf:"bytes,3,opt,name=cgroup_permissions,json=cgroupPermissions,proto3" json:"cgroup_permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Device) Reset() { *m = Device{} } +func (m *Device) String() string { return proto.CompactTextString(m) } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{46} +} + +func (m *Device) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Device.Unmarshal(m, b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Device.Marshal(b, m, deterministic) +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return xxx_messageInfo_Device.Size(m) +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *Device) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *Device) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Device) GetCgroupPermissions() string { + if m != nil { + return m.CgroupPermissions + } + return "" +} + +// TaskHandle is created when starting a task and is used to recover task +type TaskHandle struct { + // Version is used by the driver to version the DriverState schema. + // Version 0 is reserved by Nomad and should not be used. + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Config is the TaskConfig for the task + Config *TaskConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + // State is the state of the task's execution + State TaskState `protobuf:"varint,3,opt,name=state,proto3,enum=hashicorp.nomad.plugins.drivers.proto.TaskState" json:"state,omitempty"` + // DriverState is the encoded state for the specific driver + DriverState []byte `protobuf:"bytes,4,opt,name=driver_state,json=driverState,proto3" json:"driver_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskHandle) Reset() { *m = TaskHandle{} } +func (m *TaskHandle) String() string { return proto.CompactTextString(m) } +func (*TaskHandle) ProtoMessage() {} +func (*TaskHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{47} +} + +func (m *TaskHandle) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskHandle.Unmarshal(m, b) +} +func (m *TaskHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskHandle.Marshal(b, m, deterministic) +} +func (m *TaskHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskHandle.Merge(m, src) +} +func (m *TaskHandle) XXX_Size() int { + return xxx_messageInfo_TaskHandle.Size(m) +} +func (m *TaskHandle) XXX_DiscardUnknown() { + xxx_messageInfo_TaskHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskHandle proto.InternalMessageInfo + +func (m *TaskHandle) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *TaskHandle) GetConfig() *TaskConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *TaskHandle) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_UNKNOWN +} + +func (m *TaskHandle) GetDriverState() []byte { + if m != nil { + return m.DriverState + } + return nil +} + +// NetworkOverride contains network settings which the driver may override +// for the task, such as when the driver is setting up the task's network. +type NetworkOverride struct { + // PortMap can be set to replace ports with driver-specific mappings + PortMap map[string]int32 `protobuf:"bytes,1,rep,name=port_map,json=portMap,proto3" json:"port_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // Addr is the IP address for the task created by the driver + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + // AutoAdvertise indicates whether the driver thinks services that choose + // to auto_advertise_addresses should use this IP instead of the host's. + AutoAdvertise bool `protobuf:"varint,3,opt,name=auto_advertise,json=autoAdvertise,proto3" json:"auto_advertise,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkOverride) Reset() { *m = NetworkOverride{} } +func (m *NetworkOverride) String() string { return proto.CompactTextString(m) } +func (*NetworkOverride) ProtoMessage() {} +func (*NetworkOverride) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{48} +} + +func (m *NetworkOverride) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkOverride.Unmarshal(m, b) +} +func (m *NetworkOverride) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkOverride.Marshal(b, m, deterministic) +} +func (m *NetworkOverride) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkOverride.Merge(m, src) +} +func (m *NetworkOverride) XXX_Size() int { + return xxx_messageInfo_NetworkOverride.Size(m) +} +func (m *NetworkOverride) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkOverride.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkOverride proto.InternalMessageInfo + +func (m *NetworkOverride) GetPortMap() map[string]int32 { + if m != nil { + return m.PortMap + } + return nil +} + +func (m *NetworkOverride) GetAddr() string { + if m != nil { + return m.Addr + } + return "" +} + +func (m *NetworkOverride) GetAutoAdvertise() bool { + if m != nil { + return m.AutoAdvertise + } + return false +} + +// ExitResult contains information about the exit status of a task +type ExitResult struct { + // ExitCode returned from the task on exit + ExitCode int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + // Signal is set if a signal was sent to the task + Signal int32 `protobuf:"varint,2,opt,name=signal,proto3" json:"signal,omitempty"` + // OomKilled is true if the task exited as a result of the OOM Killer + OomKilled bool `protobuf:"varint,3,opt,name=oom_killed,json=oomKilled,proto3" json:"oom_killed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExitResult) Reset() { *m = ExitResult{} } +func (m *ExitResult) String() string { return proto.CompactTextString(m) } +func (*ExitResult) ProtoMessage() {} +func (*ExitResult) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{49} +} + +func (m *ExitResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExitResult.Unmarshal(m, b) +} +func (m *ExitResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExitResult.Marshal(b, m, deterministic) +} +func (m *ExitResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExitResult.Merge(m, src) +} +func (m *ExitResult) XXX_Size() int { + return xxx_messageInfo_ExitResult.Size(m) +} +func (m *ExitResult) XXX_DiscardUnknown() { + xxx_messageInfo_ExitResult.DiscardUnknown(m) +} + +var xxx_messageInfo_ExitResult proto.InternalMessageInfo + +func (m *ExitResult) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +func (m *ExitResult) GetSignal() int32 { + if m != nil { + return m.Signal + } + return 0 +} + +func (m *ExitResult) GetOomKilled() bool { + if m != nil { + return m.OomKilled + } + return false +} + +// TaskStatus includes information of a specific task +type TaskStatus struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // State is the state of the task's execution + State TaskState `protobuf:"varint,3,opt,name=state,proto3,enum=hashicorp.nomad.plugins.drivers.proto.TaskState" json:"state,omitempty"` + // StartedAt is the timestamp when the task was started + StartedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + // CompletedAt is the timestamp when the task exited. + // If the task is still running, CompletedAt will not be set + CompletedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + // Result is set when CompletedAt is set. + Result *ExitResult `protobuf:"bytes,6,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (m *TaskStatus) String() string { return proto.CompactTextString(m) } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{50} +} + +func (m *TaskStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStatus.Unmarshal(m, b) +} +func (m *TaskStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStatus.Marshal(b, m, deterministic) +} +func (m *TaskStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatus.Merge(m, src) +} +func (m *TaskStatus) XXX_Size() int { + return xxx_messageInfo_TaskStatus.Size(m) +} +func (m *TaskStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatus proto.InternalMessageInfo + +func (m *TaskStatus) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TaskStatus) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TaskStatus) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_UNKNOWN +} + +func (m *TaskStatus) GetStartedAt() *timestamp.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func (m *TaskStatus) GetCompletedAt() *timestamp.Timestamp { + if m != nil { + return m.CompletedAt + } + return nil +} + +func (m *TaskStatus) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +type TaskDriverStatus struct { + // Attributes is a set of string/string key value pairs specific to the + // implementing driver + Attributes map[string]string `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskDriverStatus) Reset() { *m = TaskDriverStatus{} } +func (m *TaskDriverStatus) String() string { return proto.CompactTextString(m) } +func (*TaskDriverStatus) ProtoMessage() {} +func (*TaskDriverStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{51} +} + +func (m *TaskDriverStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskDriverStatus.Unmarshal(m, b) +} +func (m *TaskDriverStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskDriverStatus.Marshal(b, m, deterministic) +} +func (m *TaskDriverStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskDriverStatus.Merge(m, src) +} +func (m *TaskDriverStatus) XXX_Size() int { + return xxx_messageInfo_TaskDriverStatus.Size(m) +} +func (m *TaskDriverStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TaskDriverStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskDriverStatus proto.InternalMessageInfo + +func (m *TaskDriverStatus) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +type TaskStats struct { + // Id of the task + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Timestamp for which the stats were collected + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // AggResourceUsage is the aggreate usage of all processes + AggResourceUsage *TaskResourceUsage `protobuf:"bytes,3,opt,name=agg_resource_usage,json=aggResourceUsage,proto3" json:"agg_resource_usage,omitempty"` + // ResourceUsageByPid breaks the usage stats by process + ResourceUsageByPid map[string]*TaskResourceUsage `protobuf:"bytes,4,rep,name=resource_usage_by_pid,json=resourceUsageByPid,proto3" json:"resource_usage_by_pid,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStats) Reset() { *m = TaskStats{} } +func (m *TaskStats) String() string { return proto.CompactTextString(m) } +func (*TaskStats) ProtoMessage() {} +func (*TaskStats) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{52} +} + +func (m *TaskStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStats.Unmarshal(m, b) +} +func (m *TaskStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStats.Marshal(b, m, deterministic) +} +func (m *TaskStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStats.Merge(m, src) +} +func (m *TaskStats) XXX_Size() int { + return xxx_messageInfo_TaskStats.Size(m) +} +func (m *TaskStats) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStats.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStats proto.InternalMessageInfo + +func (m *TaskStats) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TaskStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *TaskStats) GetAggResourceUsage() *TaskResourceUsage { + if m != nil { + return m.AggResourceUsage + } + return nil +} + +func (m *TaskStats) GetResourceUsageByPid() map[string]*TaskResourceUsage { + if m != nil { + return m.ResourceUsageByPid + } + return nil +} + +type TaskResourceUsage struct { + // CPU usage stats + Cpu *CPUUsage `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + // Memory usage stats + Memory *MemoryUsage `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskResourceUsage) Reset() { *m = TaskResourceUsage{} } +func (m *TaskResourceUsage) String() string { return proto.CompactTextString(m) } +func (*TaskResourceUsage) ProtoMessage() {} +func (*TaskResourceUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{53} +} + +func (m *TaskResourceUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskResourceUsage.Unmarshal(m, b) +} +func (m *TaskResourceUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskResourceUsage.Marshal(b, m, deterministic) +} +func (m *TaskResourceUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskResourceUsage.Merge(m, src) +} +func (m *TaskResourceUsage) XXX_Size() int { + return xxx_messageInfo_TaskResourceUsage.Size(m) +} +func (m *TaskResourceUsage) XXX_DiscardUnknown() { + xxx_messageInfo_TaskResourceUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskResourceUsage proto.InternalMessageInfo + +func (m *TaskResourceUsage) GetCpu() *CPUUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *TaskResourceUsage) GetMemory() *MemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +type CPUUsage struct { + SystemMode float64 `protobuf:"fixed64,1,opt,name=system_mode,json=systemMode,proto3" json:"system_mode,omitempty"` + UserMode float64 `protobuf:"fixed64,2,opt,name=user_mode,json=userMode,proto3" json:"user_mode,omitempty"` + TotalTicks float64 `protobuf:"fixed64,3,opt,name=total_ticks,json=totalTicks,proto3" json:"total_ticks,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,4,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,5,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` + Percent float64 `protobuf:"fixed64,6,opt,name=percent,proto3" json:"percent,omitempty"` + // MeasuredFields indicates which fields were actually sampled + MeasuredFields []CPUUsage_Fields `protobuf:"varint,7,rep,packed,name=measured_fields,json=measuredFields,proto3,enum=hashicorp.nomad.plugins.drivers.proto.CPUUsage_Fields" json:"measured_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (m *CPUUsage) String() string { return proto.CompactTextString(m) } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{54} +} + +func (m *CPUUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CPUUsage.Unmarshal(m, b) +} +func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) +} +func (m *CPUUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUUsage.Merge(m, src) +} +func (m *CPUUsage) XXX_Size() int { + return xxx_messageInfo_CPUUsage.Size(m) +} +func (m *CPUUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CPUUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUUsage proto.InternalMessageInfo + +func (m *CPUUsage) GetSystemMode() float64 { + if m != nil { + return m.SystemMode + } + return 0 +} + +func (m *CPUUsage) GetUserMode() float64 { + if m != nil { + return m.UserMode + } + return 0 +} + +func (m *CPUUsage) GetTotalTicks() float64 { + if m != nil { + return m.TotalTicks + } + return 0 +} + +func (m *CPUUsage) GetThrottledPeriods() uint64 { + if m != nil { + return m.ThrottledPeriods + } + return 0 +} + +func (m *CPUUsage) GetThrottledTime() uint64 { + if m != nil { + return m.ThrottledTime + } + return 0 +} + +func (m *CPUUsage) GetPercent() float64 { + if m != nil { + return m.Percent + } + return 0 +} + +func (m *CPUUsage) GetMeasuredFields() []CPUUsage_Fields { + if m != nil { + return m.MeasuredFields + } + return nil +} + +type MemoryUsage struct { + Rss uint64 `protobuf:"varint,1,opt,name=rss,proto3" json:"rss,omitempty"` + Cache uint64 `protobuf:"varint,2,opt,name=cache,proto3" json:"cache,omitempty"` + MaxUsage uint64 `protobuf:"varint,3,opt,name=max_usage,json=maxUsage,proto3" json:"max_usage,omitempty"` + KernelUsage uint64 `protobuf:"varint,4,opt,name=kernel_usage,json=kernelUsage,proto3" json:"kernel_usage,omitempty"` + KernelMaxUsage uint64 `protobuf:"varint,5,opt,name=kernel_max_usage,json=kernelMaxUsage,proto3" json:"kernel_max_usage,omitempty"` + Usage uint64 `protobuf:"varint,7,opt,name=usage,proto3" json:"usage,omitempty"` + Swap uint64 `protobuf:"varint,8,opt,name=swap,proto3" json:"swap,omitempty"` + // MeasuredFields indicates which fields were actually sampled + MeasuredFields []MemoryUsage_Fields `protobuf:"varint,6,rep,packed,name=measured_fields,json=measuredFields,proto3,enum=hashicorp.nomad.plugins.drivers.proto.MemoryUsage_Fields" json:"measured_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryUsage) Reset() { *m = MemoryUsage{} } +func (m *MemoryUsage) String() string { return proto.CompactTextString(m) } +func (*MemoryUsage) ProtoMessage() {} +func (*MemoryUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{55} +} + +func (m *MemoryUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryUsage.Unmarshal(m, b) +} +func (m *MemoryUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryUsage.Marshal(b, m, deterministic) +} +func (m *MemoryUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryUsage.Merge(m, src) +} +func (m *MemoryUsage) XXX_Size() int { + return xxx_messageInfo_MemoryUsage.Size(m) +} +func (m *MemoryUsage) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryUsage proto.InternalMessageInfo + +func (m *MemoryUsage) GetRss() uint64 { + if m != nil { + return m.Rss + } + return 0 +} + +func (m *MemoryUsage) GetCache() uint64 { + if m != nil { + return m.Cache + } + return 0 +} + +func (m *MemoryUsage) GetMaxUsage() uint64 { + if m != nil { + return m.MaxUsage + } + return 0 +} + +func (m *MemoryUsage) GetKernelUsage() uint64 { + if m != nil { + return m.KernelUsage + } + return 0 +} + +func (m *MemoryUsage) GetKernelMaxUsage() uint64 { + if m != nil { + return m.KernelMaxUsage + } + return 0 +} + +func (m *MemoryUsage) GetUsage() uint64 { + if m != nil { + return m.Usage + } + return 0 +} + +func (m *MemoryUsage) GetSwap() uint64 { + if m != nil { + return m.Swap + } + return 0 +} + +func (m *MemoryUsage) GetMeasuredFields() []MemoryUsage_Fields { + if m != nil { + return m.MeasuredFields + } + return nil +} + +type DriverTaskEvent struct { + // TaskId is the id of the task for the event + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // AllocId of the task for the event + AllocId string `protobuf:"bytes,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"` + // TaskName is the name of the task for the event + TaskName string `protobuf:"bytes,3,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + // Timestamp when the event occurred + Timestamp *timestamp.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Message is the body of the event + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + // Annotations allows for additional key/value data to be sent along with the event + Annotations map[string]string `protobuf:"bytes,6,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DriverTaskEvent) Reset() { *m = DriverTaskEvent{} } +func (m *DriverTaskEvent) String() string { return proto.CompactTextString(m) } +func (*DriverTaskEvent) ProtoMessage() {} +func (*DriverTaskEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_4a8f45747846a74d, []int{56} +} + +func (m *DriverTaskEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DriverTaskEvent.Unmarshal(m, b) +} +func (m *DriverTaskEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DriverTaskEvent.Marshal(b, m, deterministic) +} +func (m *DriverTaskEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DriverTaskEvent.Merge(m, src) +} +func (m *DriverTaskEvent) XXX_Size() int { + return xxx_messageInfo_DriverTaskEvent.Size(m) +} +func (m *DriverTaskEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DriverTaskEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DriverTaskEvent proto.InternalMessageInfo + +func (m *DriverTaskEvent) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *DriverTaskEvent) GetAllocId() string { + if m != nil { + return m.AllocId + } + return "" +} + +func (m *DriverTaskEvent) GetTaskName() string { + if m != nil { + return m.TaskName + } + return "" +} + +func (m *DriverTaskEvent) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DriverTaskEvent) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *DriverTaskEvent) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func init() { + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.FingerprintResponse_HealthState", FingerprintResponse_HealthState_name, FingerprintResponse_HealthState_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.StartTaskResponse_Result", StartTaskResponse_Result_name, StartTaskResponse_Result_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_FSIsolation", DriverCapabilities_FSIsolation_name, DriverCapabilities_FSIsolation_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_MountConfigs", DriverCapabilities_MountConfigs_name, DriverCapabilities_MountConfigs_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec_NetworkIsolationMode", NetworkIsolationSpec_NetworkIsolationMode_name, NetworkIsolationSpec_NetworkIsolationMode_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.CPUUsage_Fields", CPUUsage_Fields_name, CPUUsage_Fields_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.MemoryUsage_Fields", MemoryUsage_Fields_name, MemoryUsage_Fields_value) + proto.RegisterType((*TaskConfigSchemaRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfigSchemaRequest") + proto.RegisterType((*TaskConfigSchemaResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfigSchemaResponse") + proto.RegisterType((*CapabilitiesRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.CapabilitiesRequest") + proto.RegisterType((*CapabilitiesResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.CapabilitiesResponse") + proto.RegisterType((*FingerprintRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.FingerprintRequest") + proto.RegisterType((*FingerprintResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.FingerprintResponse") + proto.RegisterMapType((map[string]*proto1.Attribute)(nil), "hashicorp.nomad.plugins.drivers.proto.FingerprintResponse.AttributesEntry") + proto.RegisterType((*RecoverTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.RecoverTaskRequest") + proto.RegisterType((*RecoverTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.RecoverTaskResponse") + proto.RegisterType((*StartTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.StartTaskRequest") + proto.RegisterType((*StartTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.StartTaskResponse") + proto.RegisterType((*WaitTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.WaitTaskRequest") + proto.RegisterType((*WaitTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.WaitTaskResponse") + proto.RegisterType((*StopTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.StopTaskRequest") + proto.RegisterType((*StopTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.StopTaskResponse") + proto.RegisterType((*DestroyTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.DestroyTaskRequest") + proto.RegisterType((*DestroyTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.DestroyTaskResponse") + proto.RegisterType((*InspectTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.InspectTaskRequest") + proto.RegisterType((*InspectTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.InspectTaskResponse") + proto.RegisterType((*TaskStatsRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStatsRequest") + proto.RegisterType((*TaskStatsResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStatsResponse") + proto.RegisterType((*TaskEventsRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskEventsRequest") + proto.RegisterType((*SignalTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.SignalTaskRequest") + proto.RegisterType((*SignalTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.SignalTaskResponse") + proto.RegisterType((*ExecTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskRequest") + proto.RegisterType((*ExecTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskResponse") + proto.RegisterType((*ExecTaskStreamingIOOperation)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskStreamingIOOperation") + proto.RegisterType((*ExecTaskStreamingRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskStreamingRequest") + proto.RegisterType((*ExecTaskStreamingRequest_Setup)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskStreamingRequest.Setup") + proto.RegisterType((*ExecTaskStreamingRequest_TerminalSize)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskStreamingRequest.TerminalSize") + proto.RegisterType((*ExecTaskStreamingResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskStreamingResponse") + proto.RegisterType((*CreateNetworkRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.CreateNetworkRequest") + proto.RegisterType((*CreateNetworkResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.CreateNetworkResponse") + proto.RegisterType((*DestroyNetworkRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.DestroyNetworkRequest") + proto.RegisterType((*DestroyNetworkResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.DestroyNetworkResponse") + proto.RegisterType((*DriverCapabilities)(nil), "hashicorp.nomad.plugins.drivers.proto.DriverCapabilities") + proto.RegisterType((*NetworkIsolationSpec)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec.LabelsEntry") + proto.RegisterType((*HostsConfig)(nil), "hashicorp.nomad.plugins.drivers.proto.HostsConfig") + proto.RegisterType((*DNSConfig)(nil), "hashicorp.nomad.plugins.drivers.proto.DNSConfig") + proto.RegisterType((*TaskConfig)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfig") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfig.DeviceEnvEntry") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfig.EnvEntry") + proto.RegisterType((*Resources)(nil), "hashicorp.nomad.plugins.drivers.proto.Resources") + proto.RegisterType((*AllocatedTaskResources)(nil), "hashicorp.nomad.plugins.drivers.proto.AllocatedTaskResources") + proto.RegisterType((*AllocatedCpuResources)(nil), "hashicorp.nomad.plugins.drivers.proto.AllocatedCpuResources") + proto.RegisterType((*AllocatedMemoryResources)(nil), "hashicorp.nomad.plugins.drivers.proto.AllocatedMemoryResources") + proto.RegisterType((*NetworkResource)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkResource") + proto.RegisterType((*NetworkPort)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkPort") + proto.RegisterType((*PortMapping)(nil), "hashicorp.nomad.plugins.drivers.proto.PortMapping") + proto.RegisterType((*LinuxResources)(nil), "hashicorp.nomad.plugins.drivers.proto.LinuxResources") + proto.RegisterType((*Mount)(nil), "hashicorp.nomad.plugins.drivers.proto.Mount") + proto.RegisterType((*Device)(nil), "hashicorp.nomad.plugins.drivers.proto.Device") + proto.RegisterType((*TaskHandle)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskHandle") + proto.RegisterType((*NetworkOverride)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkOverride") + proto.RegisterMapType((map[string]int32)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkOverride.PortMapEntry") + proto.RegisterType((*ExitResult)(nil), "hashicorp.nomad.plugins.drivers.proto.ExitResult") + proto.RegisterType((*TaskStatus)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStatus") + proto.RegisterType((*TaskDriverStatus)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskDriverStatus") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskDriverStatus.AttributesEntry") + proto.RegisterType((*TaskStats)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStats") + proto.RegisterMapType((map[string]*TaskResourceUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStats.ResourceUsageByPidEntry") + proto.RegisterType((*TaskResourceUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskResourceUsage") + proto.RegisterType((*CPUUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.CPUUsage") + proto.RegisterType((*MemoryUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.MemoryUsage") + proto.RegisterType((*DriverTaskEvent)(nil), "hashicorp.nomad.plugins.drivers.proto.DriverTaskEvent") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.DriverTaskEvent.AnnotationsEntry") +} + +func init() { + proto.RegisterFile("plugins/drivers/proto/driver.proto", fileDescriptor_4a8f45747846a74d) +} + +var fileDescriptor_4a8f45747846a74d = []byte{ + // 3938 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4f, 0x73, 0xdb, 0x48, + 0x76, 0x17, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0x41, 0x2d, 0xc9, 0xa6, 0x39, 0x9b, 0x8c, 0x07, 0x5b, + 0x93, 0x52, 0x76, 0x67, 0xe8, 0x59, 0x6d, 0x32, 0x1e, 0x7b, 0x3d, 0xeb, 0xe1, 0x50, 0xb4, 0x45, + 0x5b, 0xa2, 0x94, 0x26, 0x15, 0xaf, 0xe3, 0x64, 0x10, 0x08, 0x68, 0x53, 0xb0, 0x48, 0x00, 0x83, + 0x06, 0x65, 0x69, 0x53, 0xa9, 0xa4, 0x36, 0x55, 0xa9, 0x4d, 0x55, 0x52, 0xc9, 0x65, 0xb2, 0x97, + 0x9c, 0xb6, 0x2a, 0xa7, 0x54, 0xee, 0xa9, 0x4d, 0xed, 0x29, 0x87, 0x7c, 0x89, 0x1c, 0x92, 0x5b, + 0xae, 0xf9, 0x04, 0xd9, 0xea, 0x3f, 0x00, 0x01, 0x92, 0x1e, 0x83, 0x94, 0x4f, 0xe4, 0x7b, 0xdd, + 0xfd, 0xeb, 0x87, 0xf7, 0x5e, 0xbf, 0x7e, 0xdd, 0xfd, 0x40, 0xf3, 0x86, 0xe3, 0x81, 0xed, 0xd0, + 0x3b, 0x96, 0x6f, 0x5f, 0x10, 0x9f, 0xde, 0xf1, 0x7c, 0x37, 0x70, 0x25, 0xd5, 0xe0, 0x04, 0xfa, + 0xf0, 0xcc, 0xa0, 0x67, 0xb6, 0xe9, 0xfa, 0x5e, 0xc3, 0x71, 0x47, 0x86, 0xd5, 0x90, 0x63, 0x1a, + 0x72, 0x8c, 0xe8, 0x56, 0xff, 0xed, 0x81, 0xeb, 0x0e, 0x86, 0x44, 0x20, 0x9c, 0x8e, 0x5f, 0xde, + 0xb1, 0xc6, 0xbe, 0x11, 0xd8, 0xae, 0x23, 0xdb, 0xdf, 0x9f, 0x6e, 0x0f, 0xec, 0x11, 0xa1, 0x81, + 0x31, 0xf2, 0x64, 0x87, 0x0f, 0x43, 0x59, 0xe8, 0x99, 0xe1, 0x13, 0xeb, 0xce, 0x99, 0x39, 0xa4, + 0x1e, 0x31, 0xd9, 0xaf, 0xce, 0xfe, 0xc8, 0x6e, 0x1f, 0x4d, 0x75, 0xa3, 0x81, 0x3f, 0x36, 0x83, + 0x50, 0x72, 0x23, 0x08, 0x7c, 0xfb, 0x74, 0x1c, 0x10, 0xd1, 0x5b, 0xbb, 0x05, 0x37, 0xfb, 0x06, + 0x3d, 0x6f, 0xb9, 0xce, 0x4b, 0x7b, 0xd0, 0x33, 0xcf, 0xc8, 0xc8, 0xc0, 0xe4, 0xeb, 0x31, 0xa1, + 0x81, 0xf6, 0xc7, 0x50, 0x9b, 0x6d, 0xa2, 0x9e, 0xeb, 0x50, 0x82, 0xbe, 0x80, 0x1c, 0x9b, 0xb2, + 0xa6, 0xdc, 0x56, 0x76, 0xca, 0xbb, 0x1f, 0x35, 0xde, 0xa4, 0x02, 0x21, 0x43, 0x43, 0x8a, 0xda, + 0xe8, 0x79, 0xc4, 0xc4, 0x7c, 0xa4, 0xb6, 0x0d, 0x9b, 0x2d, 0xc3, 0x33, 0x4e, 0xed, 0xa1, 0x1d, + 0xd8, 0x84, 0x86, 0x93, 0x8e, 0x61, 0x2b, 0xc9, 0x96, 0x13, 0xfe, 0x09, 0x54, 0xcc, 0x18, 0x5f, + 0x4e, 0x7c, 0xaf, 0x91, 0x4a, 0xf7, 0x8d, 0x3d, 0x4e, 0x25, 0x80, 0x13, 0x70, 0xda, 0x16, 0xa0, + 0x47, 0xb6, 0x33, 0x20, 0xbe, 0xe7, 0xdb, 0x4e, 0x10, 0x0a, 0xf3, 0xeb, 0x2c, 0x6c, 0x26, 0xd8, + 0x52, 0x98, 0x57, 0x00, 0x91, 0x1e, 0x99, 0x28, 0xd9, 0x9d, 0xf2, 0xee, 0x93, 0x94, 0xa2, 0xcc, + 0xc1, 0x6b, 0x34, 0x23, 0xb0, 0xb6, 0x13, 0xf8, 0x57, 0x38, 0x86, 0x8e, 0xbe, 0x82, 0xc2, 0x19, + 0x31, 0x86, 0xc1, 0x59, 0x2d, 0x73, 0x5b, 0xd9, 0xa9, 0xee, 0x3e, 0xba, 0xc6, 0x3c, 0xfb, 0x1c, + 0xa8, 0x17, 0x18, 0x01, 0xc1, 0x12, 0x15, 0x7d, 0x0c, 0x48, 0xfc, 0xd3, 0x2d, 0x42, 0x4d, 0xdf, + 0xf6, 0x98, 0x4b, 0xd6, 0xb2, 0xb7, 0x95, 0x9d, 0x12, 0xde, 0x10, 0x2d, 0x7b, 0x93, 0x86, 0xba, + 0x07, 0xeb, 0x53, 0xd2, 0x22, 0x15, 0xb2, 0xe7, 0xe4, 0x8a, 0x5b, 0xa4, 0x84, 0xd9, 0x5f, 0xf4, + 0x18, 0xf2, 0x17, 0xc6, 0x70, 0x4c, 0xb8, 0xc8, 0xe5, 0xdd, 0x1f, 0xbc, 0xcd, 0x3d, 0xa4, 0x8b, + 0x4e, 0xf4, 0x80, 0xc5, 0xf8, 0xfb, 0x99, 0xcf, 0x14, 0xed, 0x1e, 0x94, 0x63, 0x72, 0xa3, 0x2a, + 0xc0, 0x49, 0x77, 0xaf, 0xdd, 0x6f, 0xb7, 0xfa, 0xed, 0x3d, 0x75, 0x05, 0xad, 0x41, 0xe9, 0xa4, + 0xbb, 0xdf, 0x6e, 0x1e, 0xf4, 0xf7, 0x9f, 0xab, 0x0a, 0x2a, 0xc3, 0x6a, 0x48, 0x64, 0xb4, 0x4b, + 0x40, 0x98, 0x98, 0xee, 0x05, 0xf1, 0x99, 0x23, 0x4b, 0xab, 0xa2, 0x9b, 0xb0, 0x1a, 0x18, 0xf4, + 0x5c, 0xb7, 0x2d, 0x29, 0x73, 0x81, 0x91, 0x1d, 0x0b, 0x75, 0xa0, 0x70, 0x66, 0x38, 0xd6, 0xf0, + 0xed, 0x72, 0x27, 0x55, 0xcd, 0xc0, 0xf7, 0xf9, 0x40, 0x2c, 0x01, 0x98, 0x77, 0x27, 0x66, 0x16, + 0x06, 0xd0, 0x9e, 0x83, 0xda, 0x0b, 0x0c, 0x3f, 0x88, 0x8b, 0xd3, 0x86, 0x1c, 0x9b, 0x5f, 0x7a, + 0xf4, 0x22, 0x73, 0x8a, 0x95, 0x89, 0xf9, 0x70, 0xed, 0xff, 0x32, 0xb0, 0x11, 0xc3, 0x96, 0x9e, + 0xfa, 0x0c, 0x0a, 0x3e, 0xa1, 0xe3, 0x61, 0xc0, 0xe1, 0xab, 0xbb, 0x0f, 0x53, 0xc2, 0xcf, 0x20, + 0x35, 0x30, 0x87, 0xc1, 0x12, 0x0e, 0xed, 0x80, 0x2a, 0x46, 0xe8, 0xc4, 0xf7, 0x5d, 0x5f, 0x1f, + 0xd1, 0x01, 0xd7, 0x5a, 0x09, 0x57, 0x05, 0xbf, 0xcd, 0xd8, 0x87, 0x74, 0x10, 0xd3, 0x6a, 0xf6, + 0x9a, 0x5a, 0x45, 0x06, 0xa8, 0x0e, 0x09, 0x5e, 0xbb, 0xfe, 0xb9, 0xce, 0x54, 0xeb, 0xdb, 0x16, + 0xa9, 0xe5, 0x38, 0xe8, 0xa7, 0x29, 0x41, 0xbb, 0x62, 0xf8, 0x91, 0x1c, 0x8d, 0xd7, 0x9d, 0x24, + 0x43, 0xfb, 0x3e, 0x14, 0xc4, 0x97, 0x32, 0x4f, 0xea, 0x9d, 0xb4, 0x5a, 0xed, 0x5e, 0x4f, 0x5d, + 0x41, 0x25, 0xc8, 0xe3, 0x76, 0x1f, 0x33, 0x0f, 0x2b, 0x41, 0xfe, 0x51, 0xb3, 0xdf, 0x3c, 0x50, + 0x33, 0xda, 0xf7, 0x60, 0xfd, 0x99, 0x61, 0x07, 0x69, 0x9c, 0x4b, 0x73, 0x41, 0x9d, 0xf4, 0x95, + 0xd6, 0xe9, 0x24, 0xac, 0x93, 0x5e, 0x35, 0xed, 0x4b, 0x3b, 0x98, 0xb2, 0x87, 0x0a, 0x59, 0xe2, + 0xfb, 0xd2, 0x04, 0xec, 0xaf, 0xf6, 0x1a, 0xd6, 0x7b, 0x81, 0xeb, 0xa5, 0xf2, 0xfc, 0x1f, 0xc2, + 0x2a, 0xdb, 0x6d, 0xdc, 0x71, 0x20, 0x5d, 0xff, 0x56, 0x43, 0xec, 0x46, 0x8d, 0x70, 0x37, 0x6a, + 0xec, 0xc9, 0xdd, 0x0a, 0x87, 0x3d, 0xd1, 0x0d, 0x28, 0x50, 0x7b, 0xe0, 0x18, 0x43, 0x19, 0x2d, + 0x24, 0xa5, 0x21, 0xe6, 0xe4, 0xe1, 0xc4, 0xd2, 0xf1, 0x5b, 0x80, 0xf6, 0x08, 0x0d, 0x7c, 0xf7, + 0x2a, 0x95, 0x3c, 0x5b, 0x90, 0x7f, 0xe9, 0xfa, 0xa6, 0x58, 0x88, 0x45, 0x2c, 0x08, 0xb6, 0xa8, + 0x12, 0x20, 0x12, 0xfb, 0x63, 0x40, 0x1d, 0x87, 0xed, 0x29, 0xe9, 0x0c, 0xf1, 0x0f, 0x19, 0xd8, + 0x4c, 0xf4, 0x97, 0xc6, 0x58, 0x7e, 0x1d, 0xb2, 0xc0, 0x34, 0xa6, 0x62, 0x1d, 0xa2, 0x23, 0x28, + 0x88, 0x1e, 0x52, 0x93, 0x77, 0x17, 0x00, 0x12, 0xdb, 0x94, 0x84, 0x93, 0x30, 0x73, 0x9d, 0x3e, + 0xfb, 0x6e, 0x9d, 0xfe, 0x35, 0xa8, 0xe1, 0x77, 0xd0, 0xb7, 0xda, 0xe6, 0x09, 0x6c, 0x9a, 0xee, + 0x70, 0x48, 0x4c, 0xe6, 0x0d, 0xba, 0xed, 0x04, 0xc4, 0xbf, 0x30, 0x86, 0x6f, 0xf7, 0x1b, 0x34, + 0x19, 0xd5, 0x91, 0x83, 0xb4, 0x17, 0xb0, 0x11, 0x9b, 0x58, 0x1a, 0xe2, 0x11, 0xe4, 0x29, 0x63, + 0x48, 0x4b, 0x7c, 0xb2, 0xa0, 0x25, 0x28, 0x16, 0xc3, 0xb5, 0x4d, 0x01, 0xde, 0xbe, 0x20, 0x4e, + 0xf4, 0x59, 0xda, 0x1e, 0x6c, 0xf4, 0xb8, 0x9b, 0xa6, 0xf2, 0xc3, 0x89, 0x8b, 0x67, 0x12, 0x2e, + 0xbe, 0x05, 0x28, 0x8e, 0x22, 0x1d, 0xf1, 0x0a, 0xd6, 0xdb, 0x97, 0xc4, 0x4c, 0x85, 0x5c, 0x83, + 0x55, 0xd3, 0x1d, 0x8d, 0x0c, 0xc7, 0xaa, 0x65, 0x6e, 0x67, 0x77, 0x4a, 0x38, 0x24, 0xe3, 0x6b, + 0x31, 0x9b, 0x76, 0x2d, 0x6a, 0x7f, 0xa7, 0x80, 0x3a, 0x99, 0x5b, 0x2a, 0x92, 0x49, 0x1f, 0x58, + 0x0c, 0x88, 0xcd, 0x5d, 0xc1, 0x92, 0x92, 0xfc, 0x30, 0x5c, 0x08, 0x3e, 0xf1, 0xfd, 0x58, 0x38, + 0xca, 0x5e, 0x33, 0x1c, 0x69, 0xfb, 0xf0, 0x9d, 0x50, 0x9c, 0x5e, 0xe0, 0x13, 0x63, 0x64, 0x3b, + 0x83, 0xce, 0xd1, 0x91, 0x47, 0x84, 0xe0, 0x08, 0x41, 0xce, 0x32, 0x02, 0x43, 0x0a, 0xc6, 0xff, + 0xb3, 0x45, 0x6f, 0x0e, 0x5d, 0x1a, 0x2d, 0x7a, 0x4e, 0x68, 0xff, 0x99, 0x85, 0xda, 0x0c, 0x54, + 0xa8, 0xde, 0x17, 0x90, 0xa7, 0x24, 0x18, 0x7b, 0xd2, 0x55, 0xda, 0xa9, 0x05, 0x9e, 0x8f, 0xd7, + 0xe8, 0x31, 0x30, 0x2c, 0x30, 0xd1, 0x00, 0x8a, 0x41, 0x70, 0xa5, 0x53, 0xfb, 0xa7, 0x61, 0x42, + 0x70, 0x70, 0x5d, 0xfc, 0x3e, 0xf1, 0x47, 0xb6, 0x63, 0x0c, 0x7b, 0xf6, 0x4f, 0x09, 0x5e, 0x0d, + 0x82, 0x2b, 0xf6, 0x07, 0x3d, 0x67, 0x0e, 0x6f, 0xd9, 0x8e, 0x54, 0x7b, 0x6b, 0xd9, 0x59, 0x62, + 0x0a, 0xc6, 0x02, 0xb1, 0x7e, 0x00, 0x79, 0xfe, 0x4d, 0xcb, 0x38, 0xa2, 0x0a, 0xd9, 0x20, 0xb8, + 0xe2, 0x42, 0x15, 0x31, 0xfb, 0x5b, 0x7f, 0x00, 0x95, 0xf8, 0x17, 0x30, 0x47, 0x3a, 0x23, 0xf6, + 0xe0, 0x4c, 0x38, 0x58, 0x1e, 0x4b, 0x8a, 0x59, 0xf2, 0xb5, 0x6d, 0xc9, 0x94, 0x35, 0x8f, 0x05, + 0xa1, 0xfd, 0x5b, 0x06, 0x6e, 0xcd, 0xd1, 0x8c, 0x74, 0xd6, 0x17, 0x09, 0x67, 0x7d, 0x47, 0x5a, + 0x08, 0x3d, 0xfe, 0x45, 0xc2, 0xe3, 0xdf, 0x21, 0x38, 0x5b, 0x36, 0x37, 0xa0, 0x40, 0x2e, 0xed, + 0x80, 0x58, 0x52, 0x55, 0x92, 0x8a, 0x2d, 0xa7, 0xdc, 0x75, 0x97, 0xd3, 0x21, 0x6c, 0xb5, 0x7c, + 0x62, 0x04, 0x44, 0x86, 0xf2, 0xd0, 0xff, 0x6f, 0x41, 0xd1, 0x18, 0x0e, 0x5d, 0x73, 0x62, 0xd6, + 0x55, 0x4e, 0x77, 0x2c, 0x54, 0x87, 0xe2, 0x99, 0x4b, 0x03, 0xc7, 0x18, 0x11, 0x19, 0xbc, 0x22, + 0x5a, 0xfb, 0x46, 0x81, 0xed, 0x29, 0x3c, 0x69, 0x85, 0x53, 0xa8, 0xda, 0xd4, 0x1d, 0xf2, 0x0f, + 0xd4, 0x63, 0x27, 0xbc, 0x1f, 0x2d, 0xb6, 0xd5, 0x74, 0x42, 0x0c, 0x7e, 0xe0, 0x5b, 0xb3, 0xe3, + 0x24, 0xf7, 0x38, 0x3e, 0xb9, 0x25, 0x57, 0x7a, 0x48, 0x6a, 0xff, 0xa8, 0xc0, 0xb6, 0xdc, 0xe1, + 0xd3, 0x7f, 0xe8, 0xac, 0xc8, 0x99, 0x77, 0x2d, 0xb2, 0x56, 0x83, 0x1b, 0xd3, 0x72, 0xc9, 0x98, + 0xff, 0xdf, 0x79, 0x40, 0xb3, 0xa7, 0x4b, 0xf4, 0x01, 0x54, 0x28, 0x71, 0x2c, 0x5d, 0xec, 0x17, + 0x62, 0x2b, 0x2b, 0xe2, 0x32, 0xe3, 0x89, 0x8d, 0x83, 0xb2, 0x10, 0x48, 0x2e, 0xa5, 0xb4, 0x45, + 0xcc, 0xff, 0xa3, 0x33, 0xa8, 0xbc, 0xa4, 0x7a, 0x34, 0x37, 0x77, 0xa8, 0x6a, 0xea, 0xb0, 0x36, + 0x2b, 0x47, 0xe3, 0x51, 0x2f, 0xfa, 0x2e, 0x5c, 0x7e, 0x49, 0x23, 0x02, 0xfd, 0x5c, 0x81, 0x9b, + 0x61, 0x5a, 0x31, 0x51, 0xdf, 0xc8, 0xb5, 0x08, 0xad, 0xe5, 0x6e, 0x67, 0x77, 0xaa, 0xbb, 0xc7, + 0xd7, 0xd0, 0xdf, 0x0c, 0xf3, 0xd0, 0xb5, 0x08, 0xde, 0x76, 0xe6, 0x70, 0x29, 0x6a, 0xc0, 0xe6, + 0x68, 0x4c, 0x03, 0x5d, 0x78, 0x81, 0x2e, 0x3b, 0xd5, 0xf2, 0x5c, 0x2f, 0x1b, 0xac, 0x29, 0xe1, + 0xab, 0xe8, 0x1c, 0xd6, 0x46, 0xee, 0xd8, 0x09, 0x74, 0x93, 0x9f, 0x7f, 0x68, 0xad, 0xb0, 0xd0, + 0xc1, 0x78, 0x8e, 0x96, 0x0e, 0x19, 0x9c, 0x38, 0x4d, 0x51, 0x5c, 0x19, 0xc5, 0x28, 0xf4, 0x7b, + 0x70, 0xc3, 0xb2, 0xa9, 0x71, 0x3a, 0x24, 0xfa, 0xd0, 0x1d, 0xe8, 0x93, 0x1c, 0xa6, 0x56, 0xe4, + 0xf2, 0x6d, 0xc9, 0xd6, 0x03, 0x77, 0xd0, 0x8a, 0xda, 0xf8, 0xa8, 0x2b, 0xc7, 0x18, 0xd9, 0xa6, + 0xce, 0x44, 0x1e, 0xba, 0x86, 0xa5, 0x8f, 0x29, 0xf1, 0x69, 0xad, 0x24, 0x47, 0x89, 0xd6, 0x67, + 0xb2, 0xf1, 0x84, 0xb5, 0x69, 0xf7, 0xa1, 0x1c, 0xb3, 0x17, 0x2a, 0x42, 0xae, 0x7b, 0xd4, 0x6d, + 0xab, 0x2b, 0x08, 0xa0, 0xd0, 0xda, 0xc7, 0x47, 0x47, 0x7d, 0x71, 0xfc, 0xe8, 0x1c, 0x36, 0x1f, + 0xb7, 0xd5, 0x0c, 0x63, 0x9f, 0x74, 0xff, 0xb0, 0xdd, 0x39, 0x50, 0xb3, 0x5a, 0x1b, 0x2a, 0xf1, + 0xaf, 0x40, 0x08, 0xaa, 0x27, 0xdd, 0xa7, 0xdd, 0xa3, 0x67, 0x5d, 0xfd, 0xf0, 0xe8, 0xa4, 0xdb, + 0x67, 0x87, 0x98, 0x2a, 0x40, 0xb3, 0xfb, 0x7c, 0x42, 0xaf, 0x41, 0xa9, 0x7b, 0x14, 0x92, 0x4a, + 0x3d, 0xa3, 0x2a, 0x4f, 0x72, 0xc5, 0x55, 0xb5, 0x88, 0x2b, 0x3e, 0x19, 0xb9, 0x01, 0xd1, 0xd9, + 0x16, 0x41, 0xb5, 0xff, 0xc8, 0xc2, 0xd6, 0x3c, 0x23, 0x23, 0x0b, 0x72, 0xcc, 0x61, 0xe4, 0xd1, + 0xf2, 0xdd, 0xfb, 0x0b, 0x47, 0x67, 0xeb, 0xc4, 0x33, 0xe4, 0x5e, 0x52, 0xc2, 0xfc, 0x3f, 0xd2, + 0xa1, 0x30, 0x34, 0x4e, 0xc9, 0x90, 0xd6, 0xb2, 0xfc, 0xf2, 0xe5, 0xf1, 0x75, 0xe6, 0x3e, 0xe0, + 0x48, 0xe2, 0xe6, 0x45, 0xc2, 0xa2, 0x3e, 0x94, 0x59, 0xb4, 0xa4, 0x42, 0x9d, 0x32, 0x80, 0xef, + 0xa6, 0x9c, 0x65, 0x7f, 0x32, 0x12, 0xc7, 0x61, 0xea, 0xf7, 0xa0, 0x1c, 0x9b, 0x6c, 0xce, 0xc5, + 0xc9, 0x56, 0xfc, 0xe2, 0xa4, 0x14, 0xbf, 0x05, 0x79, 0x38, 0x6b, 0x03, 0xa6, 0x23, 0xe6, 0x24, + 0xfb, 0x47, 0xbd, 0xbe, 0x38, 0xa2, 0x3e, 0xc6, 0x47, 0x27, 0xc7, 0xaa, 0xc2, 0x98, 0xfd, 0x66, + 0xef, 0xa9, 0x9a, 0x89, 0x7c, 0x28, 0xab, 0xb5, 0xa0, 0x1c, 0x93, 0x2b, 0xb1, 0x3d, 0x28, 0xc9, + 0xed, 0x81, 0x05, 0x68, 0xc3, 0xb2, 0x7c, 0x42, 0xa9, 0x94, 0x23, 0x24, 0xb5, 0x17, 0x50, 0xda, + 0xeb, 0xf6, 0x24, 0x44, 0x0d, 0x56, 0x29, 0xf1, 0xd9, 0x77, 0xf3, 0x2b, 0xb0, 0x12, 0x0e, 0x49, + 0x06, 0x4e, 0x89, 0xe1, 0x9b, 0x67, 0x84, 0xca, 0xa4, 0x22, 0xa2, 0xd9, 0x28, 0x97, 0x5f, 0x25, + 0x09, 0xdb, 0x95, 0x70, 0x48, 0x6a, 0xff, 0x5f, 0x04, 0x98, 0x5c, 0x6b, 0xa0, 0x2a, 0x64, 0xa2, + 0x60, 0x9f, 0xb1, 0x2d, 0xe6, 0x07, 0xb1, 0xcd, 0x8c, 0xff, 0x47, 0xbb, 0xb0, 0x3d, 0xa2, 0x03, + 0xcf, 0x30, 0xcf, 0x75, 0x79, 0x1b, 0x21, 0x62, 0x02, 0x0f, 0x9c, 0x15, 0xbc, 0x29, 0x1b, 0xe5, + 0x92, 0x17, 0xb8, 0x07, 0x90, 0x25, 0xce, 0x05, 0x0f, 0x72, 0xe5, 0xdd, 0xfb, 0x0b, 0x5f, 0xb7, + 0x34, 0xda, 0xce, 0x85, 0xf0, 0x15, 0x06, 0x83, 0x74, 0x00, 0x8b, 0x5c, 0xd8, 0x26, 0xd1, 0x19, + 0x68, 0x9e, 0x83, 0x7e, 0xb1, 0x38, 0xe8, 0x1e, 0xc7, 0x88, 0xa0, 0x4b, 0x56, 0x48, 0xa3, 0x2e, + 0x94, 0x7c, 0x42, 0xdd, 0xb1, 0x6f, 0x12, 0x11, 0xe9, 0xd2, 0x9f, 0x88, 0x70, 0x38, 0x0e, 0x4f, + 0x20, 0xd0, 0x1e, 0x14, 0x78, 0x80, 0xa3, 0xb5, 0x55, 0x2e, 0xec, 0x47, 0x29, 0xc1, 0x78, 0x74, + 0xc1, 0x72, 0x2c, 0x7a, 0x0c, 0xab, 0x42, 0x44, 0x5a, 0x2b, 0x72, 0x98, 0x8f, 0xd3, 0x46, 0x5f, + 0x3e, 0x0a, 0x87, 0xa3, 0x99, 0x55, 0x59, 0x60, 0xe4, 0x71, 0xb1, 0x84, 0xf9, 0x7f, 0xf4, 0x1e, + 0x94, 0xc4, 0x66, 0x6f, 0xd9, 0x7e, 0x0d, 0x84, 0x73, 0x72, 0xc6, 0x9e, 0xed, 0xa3, 0xf7, 0xa1, + 0x2c, 0x92, 0x3a, 0x9d, 0x47, 0x85, 0x32, 0x6f, 0x06, 0xc1, 0x3a, 0x66, 0xb1, 0x41, 0x74, 0x20, + 0xbe, 0x2f, 0x3a, 0x54, 0xa2, 0x0e, 0xc4, 0xf7, 0x79, 0x87, 0xdf, 0x81, 0x75, 0x9e, 0x0a, 0x0f, + 0x7c, 0x77, 0xec, 0xe9, 0xdc, 0xa7, 0xd6, 0x78, 0xa7, 0x35, 0xc6, 0x7e, 0xcc, 0xb8, 0x5d, 0xe6, + 0x5c, 0xb7, 0xa0, 0xf8, 0xca, 0x3d, 0x15, 0x1d, 0xaa, 0x62, 0x1d, 0xbc, 0x72, 0x4f, 0xc3, 0xa6, + 0x28, 0x1d, 0x59, 0x4f, 0xa6, 0x23, 0x5f, 0xc3, 0x8d, 0xd9, 0x7d, 0x95, 0xa7, 0x25, 0xea, 0xf5, + 0xd3, 0x92, 0x2d, 0x67, 0x5e, 0x1c, 0xfe, 0x12, 0xb2, 0x96, 0x43, 0x6b, 0x1b, 0x0b, 0x39, 0x47, + 0xb4, 0x8e, 0x31, 0x1b, 0x8c, 0xb6, 0xa1, 0xc0, 0x3e, 0xd6, 0xb6, 0x6a, 0x48, 0x84, 0x9e, 0x57, + 0xee, 0x69, 0xc7, 0x42, 0xdf, 0x81, 0x12, 0xfb, 0x7e, 0xea, 0x19, 0x26, 0xa9, 0x6d, 0xf2, 0x96, + 0x09, 0x83, 0x19, 0xca, 0x71, 0x2d, 0x22, 0x54, 0xb4, 0x25, 0x0c, 0xc5, 0x18, 0x5c, 0x47, 0x37, + 0x61, 0x95, 0x37, 0xda, 0x56, 0x6d, 0x5b, 0x9c, 0x38, 0x18, 0xd9, 0xb1, 0x90, 0x06, 0x6b, 0x9e, + 0xe1, 0x13, 0x27, 0xd0, 0xe5, 0x8c, 0x37, 0x78, 0x73, 0x59, 0x30, 0x9f, 0xb0, 0x79, 0xeb, 0x9f, + 0x42, 0x31, 0x5c, 0x0c, 0x8b, 0x84, 0xc9, 0xfa, 0x03, 0xa8, 0x26, 0x97, 0xd2, 0x42, 0x41, 0xf6, + 0x9f, 0x33, 0x50, 0x8a, 0x16, 0x0d, 0x72, 0x60, 0x93, 0x1b, 0x95, 0xa5, 0xa6, 0xfa, 0x64, 0x0d, + 0x8a, 0x84, 0xf8, 0xf3, 0x94, 0x6a, 0x6e, 0x86, 0x08, 0xf2, 0x64, 0x2e, 0x17, 0x24, 0x8a, 0x90, + 0x27, 0xf3, 0x7d, 0x05, 0xeb, 0x43, 0xdb, 0x19, 0x5f, 0xc6, 0xe6, 0x12, 0x99, 0xec, 0xef, 0xa7, + 0x9c, 0xeb, 0x80, 0x8d, 0x9e, 0xcc, 0x51, 0x1d, 0x26, 0x68, 0xb4, 0x0f, 0x79, 0xcf, 0xf5, 0x83, + 0x70, 0xcf, 0x4c, 0xbb, 0x9b, 0x1d, 0xbb, 0x7e, 0x70, 0x68, 0x78, 0x1e, 0x3b, 0xac, 0x09, 0x00, + 0xed, 0x9b, 0x0c, 0xdc, 0x98, 0xff, 0x61, 0xa8, 0x0b, 0x59, 0xd3, 0x1b, 0x4b, 0x25, 0x3d, 0x58, + 0x54, 0x49, 0x2d, 0x6f, 0x3c, 0x91, 0x9f, 0x01, 0xa1, 0x67, 0x50, 0x18, 0x91, 0x91, 0xeb, 0x5f, + 0x49, 0x5d, 0x3c, 0x5c, 0x14, 0xf2, 0x90, 0x8f, 0x9e, 0xa0, 0x4a, 0x38, 0x84, 0xa1, 0x28, 0x17, + 0x13, 0x95, 0x61, 0x7b, 0xc1, 0xeb, 0xb4, 0x10, 0x12, 0x47, 0x38, 0xda, 0xa7, 0xb0, 0x3d, 0xf7, + 0x53, 0xd0, 0x6f, 0x01, 0x98, 0xde, 0x58, 0xe7, 0xcf, 0x1d, 0xc2, 0x83, 0xb2, 0xb8, 0x64, 0x7a, + 0xe3, 0x1e, 0x67, 0x68, 0x2f, 0xa0, 0xf6, 0x26, 0x79, 0xd9, 0x1a, 0x13, 0x12, 0xeb, 0xa3, 0x53, + 0xae, 0x83, 0x2c, 0x2e, 0x0a, 0xc6, 0xe1, 0x29, 0x5b, 0x4a, 0x61, 0xa3, 0x71, 0xc9, 0x3a, 0x64, + 0x79, 0x87, 0xb2, 0xec, 0x60, 0x5c, 0x1e, 0x9e, 0x6a, 0xbf, 0xc8, 0xc0, 0xfa, 0x94, 0xc8, 0xec, + 0xc8, 0x2a, 0x02, 0x70, 0x78, 0x19, 0x20, 0x28, 0x16, 0x8d, 0x4d, 0xdb, 0x0a, 0xaf, 0x91, 0xf9, + 0x7f, 0xbe, 0x0f, 0x7b, 0xf2, 0x8a, 0x37, 0x63, 0x7b, 0x6c, 0xf9, 0x8c, 0x4e, 0xed, 0x80, 0xf2, + 0xa4, 0x28, 0x8f, 0x05, 0x81, 0x9e, 0x43, 0xd5, 0x27, 0x7c, 0xff, 0xb7, 0x74, 0xe1, 0x65, 0xf9, + 0x85, 0xbc, 0x4c, 0x4a, 0xc8, 0x9c, 0x0d, 0xaf, 0x85, 0x48, 0x8c, 0xa2, 0xe8, 0x19, 0xac, 0x85, + 0xc9, 0xb4, 0x40, 0x2e, 0x2c, 0x8d, 0x5c, 0x91, 0x40, 0x1c, 0x58, 0xbb, 0x07, 0xe5, 0x58, 0x23, + 0xfb, 0x30, 0x9e, 0xfd, 0x49, 0x9d, 0x08, 0x22, 0x19, 0x2d, 0xf2, 0x32, 0x5a, 0x68, 0xa7, 0x50, + 0x8e, 0xad, 0x8b, 0x45, 0x86, 0x32, 0x7d, 0x06, 0x2e, 0xd7, 0x67, 0x1e, 0x67, 0x02, 0x97, 0xc5, + 0x49, 0x96, 0x79, 0xe9, 0xb6, 0xc7, 0x35, 0x5a, 0xc2, 0x05, 0x46, 0x76, 0x3c, 0xed, 0x57, 0x19, + 0xa8, 0x26, 0x97, 0x74, 0xe8, 0x47, 0x1e, 0xf1, 0x6d, 0xd7, 0x8a, 0xf9, 0xd1, 0x31, 0x67, 0x30, + 0x5f, 0x61, 0xcd, 0x5f, 0x8f, 0xdd, 0xc0, 0x08, 0x7d, 0xc5, 0xf4, 0xc6, 0x7f, 0xc0, 0xe8, 0x29, + 0x1f, 0xcc, 0x4e, 0xf9, 0x20, 0xfa, 0x08, 0x90, 0x74, 0xa5, 0xa1, 0x3d, 0xb2, 0x03, 0xfd, 0xf4, + 0x2a, 0x20, 0xc2, 0xc6, 0x59, 0xac, 0x8a, 0x96, 0x03, 0xd6, 0xf0, 0x25, 0xe3, 0x33, 0xc7, 0x73, + 0xdd, 0x91, 0x4e, 0x4d, 0xd7, 0x27, 0xba, 0x61, 0xbd, 0xe2, 0xa7, 0xb5, 0x2c, 0x2e, 0xbb, 0xee, + 0xa8, 0xc7, 0x78, 0x4d, 0xeb, 0x15, 0xdb, 0x88, 0x4d, 0x6f, 0x4c, 0x49, 0xa0, 0xb3, 0x1f, 0x9e, + 0xbb, 0x94, 0x30, 0x08, 0x56, 0xcb, 0x1b, 0x53, 0xf4, 0x5d, 0x58, 0x0b, 0x3b, 0xf0, 0xbd, 0x58, + 0x26, 0x01, 0x15, 0xd9, 0x85, 0xf3, 0x90, 0x06, 0x95, 0x63, 0xe2, 0x9b, 0xc4, 0x09, 0xfa, 0xb6, + 0x79, 0x4e, 0xf9, 0xb1, 0x4b, 0xc1, 0x09, 0x9e, 0x3c, 0xb5, 0x84, 0xb3, 0x8d, 0xc8, 0x88, 0x6a, + 0xff, 0xaa, 0x40, 0x9e, 0xa7, 0x2c, 0x4c, 0x29, 0x7c, 0xbb, 0xe7, 0xd9, 0x80, 0x4c, 0x75, 0x19, + 0x83, 0xe7, 0x02, 0xef, 0x41, 0x89, 0x2b, 0x3f, 0x76, 0xc2, 0xe0, 0x79, 0x30, 0x6f, 0xac, 0x43, + 0xd1, 0x27, 0x86, 0xe5, 0x3a, 0xc3, 0xf0, 0x16, 0x2c, 0xa2, 0xd1, 0xef, 0x82, 0xea, 0xf9, 0xae, + 0x67, 0x0c, 0x26, 0x07, 0x67, 0x69, 0xbe, 0xf5, 0x18, 0x9f, 0xa7, 0xe8, 0xdf, 0x85, 0x35, 0x4a, + 0x44, 0x64, 0x17, 0x4e, 0x92, 0x17, 0x9f, 0x29, 0x99, 0xfc, 0x44, 0xa0, 0x7d, 0x0d, 0x05, 0xb1, + 0x71, 0x5d, 0x43, 0xde, 0x8f, 0x01, 0x09, 0x45, 0x32, 0x07, 0x19, 0xd9, 0x94, 0xca, 0x2c, 0x9b, + 0x3f, 0xe5, 0x8a, 0x96, 0xe3, 0x49, 0x83, 0xf6, 0x5f, 0x8a, 0xc8, 0xb7, 0xc5, 0x23, 0x1b, 0x4b, + 0xcc, 0xd9, 0xaa, 0x61, 0x47, 0x5b, 0x71, 0x9b, 0x17, 0x92, 0xa8, 0x03, 0x05, 0x99, 0x56, 0x67, + 0x96, 0x7d, 0xa3, 0x94, 0x00, 0xe1, 0xdd, 0x3e, 0x91, 0x37, 0x1b, 0x8b, 0xde, 0xed, 0x13, 0x71, + 0xb7, 0x4f, 0xd0, 0x07, 0x50, 0x91, 0x09, 0xbf, 0x80, 0xcb, 0xf1, 0x7c, 0xbf, 0x6c, 0x45, 0x0f, + 0x28, 0x44, 0xfb, 0x5f, 0x25, 0x8a, 0x7b, 0xe1, 0x43, 0x07, 0xfa, 0x0a, 0x8a, 0x2c, 0x84, 0xe8, + 0x23, 0xc3, 0x93, 0xcf, 0xf6, 0xad, 0xe5, 0xde, 0x50, 0xc2, 0x5d, 0x51, 0xa4, 0xeb, 0xab, 0x9e, + 0xa0, 0x58, 0xfc, 0x64, 0x47, 0xa5, 0x30, 0x7e, 0xb2, 0xff, 0xe8, 0x43, 0xa8, 0x1a, 0xe3, 0xc0, + 0xd5, 0x0d, 0xeb, 0x82, 0xf8, 0x81, 0x4d, 0x89, 0xf4, 0xa5, 0x35, 0xc6, 0x6d, 0x86, 0xcc, 0xfa, + 0x7d, 0xa8, 0xc4, 0x31, 0xdf, 0x96, 0xb7, 0xe4, 0xe3, 0x79, 0xcb, 0x9f, 0x02, 0x4c, 0x2e, 0x0d, + 0x99, 0x8f, 0x90, 0x4b, 0x3b, 0xd0, 0xcd, 0xf0, 0x6c, 0x9e, 0xc7, 0x45, 0xc6, 0x68, 0x31, 0x67, + 0x4c, 0xbe, 0x68, 0xe4, 0xc3, 0x17, 0x0d, 0x16, 0x1d, 0xd8, 0x82, 0x3e, 0xb7, 0x87, 0xc3, 0xe8, + 0x22, 0xb3, 0xe4, 0xba, 0xa3, 0xa7, 0x9c, 0xa1, 0xfd, 0x3a, 0x23, 0x7c, 0x45, 0xbc, 0x4d, 0xa5, + 0x3a, 0x9b, 0xbd, 0x2b, 0x53, 0xdf, 0x03, 0xa0, 0x81, 0xe1, 0xb3, 0x24, 0xcc, 0x08, 0xaf, 0x52, + 0xeb, 0x33, 0x4f, 0x22, 0xfd, 0xb0, 0x58, 0x06, 0x97, 0x64, 0xef, 0x66, 0x80, 0x3e, 0x87, 0x8a, + 0xe9, 0x8e, 0xbc, 0x21, 0x91, 0x83, 0xf3, 0x6f, 0x1d, 0x5c, 0x8e, 0xfa, 0x37, 0x83, 0xd8, 0x05, + 0x6e, 0xe1, 0xba, 0x17, 0xb8, 0xbf, 0x52, 0xc4, 0x13, 0x5b, 0xfc, 0x85, 0x0f, 0x0d, 0xe6, 0x94, + 0x91, 0x3c, 0x5e, 0xf2, 0xb9, 0xf0, 0xdb, 0x6a, 0x48, 0xea, 0x9f, 0xa7, 0x29, 0xda, 0x78, 0x73, + 0x5a, 0xfc, 0xef, 0x59, 0x28, 0x45, 0xaf, 0x6b, 0x33, 0xb6, 0xff, 0x0c, 0x4a, 0x51, 0xa5, 0x92, + 0x0c, 0x10, 0xdf, 0x6a, 0x9e, 0xa8, 0x33, 0x7a, 0x09, 0xc8, 0x18, 0x0c, 0xa2, 0x74, 0x57, 0x1f, + 0x53, 0x63, 0x10, 0xbe, 0x6d, 0x7e, 0xb6, 0x80, 0x1e, 0xc2, 0xfd, 0xf1, 0x84, 0x8d, 0xc7, 0xaa, + 0x31, 0x18, 0x24, 0x38, 0xe8, 0xcf, 0x60, 0x3b, 0x39, 0x87, 0x7e, 0x7a, 0xa5, 0x7b, 0xb6, 0x25, + 0xef, 0x00, 0xf6, 0x17, 0x7d, 0x60, 0x6c, 0x24, 0xe0, 0xbf, 0xbc, 0x3a, 0xb6, 0x2d, 0xa1, 0x73, + 0xe4, 0xcf, 0x34, 0xd4, 0xff, 0x02, 0x6e, 0xbe, 0xa1, 0xfb, 0x1c, 0x1b, 0x74, 0x93, 0x85, 0x33, + 0xcb, 0x2b, 0x21, 0x66, 0xbd, 0x5f, 0x2a, 0xe2, 0x1d, 0x34, 0xa9, 0x93, 0x66, 0x3c, 0x4f, 0xbf, + 0x93, 0x72, 0x9e, 0xd6, 0xf1, 0x89, 0x80, 0xe7, 0xa9, 0xf9, 0x93, 0xa9, 0xd4, 0x3c, 0x6d, 0x42, + 0x26, 0x32, 0x5c, 0x01, 0x24, 0x11, 0xb4, 0x7f, 0xc9, 0x42, 0x31, 0x44, 0xe7, 0x27, 0xf8, 0x2b, + 0x1a, 0x90, 0x91, 0x1e, 0x5d, 0x2f, 0x2a, 0x18, 0x04, 0x8b, 0xef, 0xa8, 0xef, 0x41, 0x69, 0x4c, + 0x89, 0x2f, 0x9a, 0x33, 0xbc, 0xb9, 0xc8, 0x18, 0xbc, 0xf1, 0x7d, 0x28, 0x07, 0x6e, 0x60, 0x0c, + 0xf5, 0x80, 0xe7, 0x0b, 0x59, 0x31, 0x9a, 0xb3, 0x78, 0xb6, 0x80, 0xbe, 0x0f, 0x1b, 0xc1, 0x99, + 0xef, 0x06, 0xc1, 0x90, 0xe5, 0xaa, 0x3c, 0x73, 0x12, 0x89, 0x4e, 0x0e, 0xab, 0x51, 0x83, 0xc8, + 0xa8, 0x28, 0x8b, 0xde, 0x93, 0xce, 0xcc, 0x75, 0x79, 0x10, 0xc9, 0xe1, 0xb5, 0x88, 0xcb, 0x5c, + 0x9b, 0x6d, 0x9e, 0x9e, 0xc8, 0x48, 0x78, 0xac, 0x50, 0x70, 0x48, 0x22, 0x1d, 0xd6, 0x47, 0xc4, + 0xa0, 0x63, 0x9f, 0x58, 0xfa, 0x4b, 0x9b, 0x0c, 0x2d, 0x71, 0xf1, 0x52, 0x4d, 0x7d, 0xdc, 0x08, + 0xd5, 0xd2, 0x78, 0xc4, 0x47, 0xe3, 0x6a, 0x08, 0x27, 0x68, 0x96, 0x39, 0x88, 0x7f, 0x68, 0x1d, + 0xca, 0xbd, 0xe7, 0xbd, 0x7e, 0xfb, 0x50, 0x3f, 0x3c, 0xda, 0x6b, 0xcb, 0xda, 0xa8, 0x5e, 0x1b, + 0x0b, 0x52, 0x61, 0xed, 0xfd, 0xa3, 0x7e, 0xf3, 0x40, 0xef, 0x77, 0x5a, 0x4f, 0x7b, 0x6a, 0x06, + 0x6d, 0xc3, 0x46, 0x7f, 0x1f, 0x1f, 0xf5, 0xfb, 0x07, 0xed, 0x3d, 0xfd, 0xb8, 0x8d, 0x3b, 0x47, + 0x7b, 0x3d, 0x35, 0x8b, 0x10, 0x54, 0x27, 0xec, 0x7e, 0xe7, 0xb0, 0xad, 0xe6, 0x50, 0x19, 0x56, + 0x8f, 0xdb, 0xb8, 0xd5, 0xee, 0xf6, 0xd5, 0xbc, 0xf6, 0x8b, 0x2c, 0x94, 0x63, 0x56, 0x64, 0x8e, + 0xec, 0x53, 0x71, 0xae, 0xc9, 0x61, 0xf6, 0x97, 0xbf, 0xe5, 0x1a, 0xe6, 0x99, 0xb0, 0x4e, 0x0e, + 0x0b, 0x82, 0x9f, 0x65, 0x8c, 0xcb, 0xd8, 0x3a, 0xcf, 0xe1, 0xe2, 0xc8, 0xb8, 0x14, 0x20, 0x1f, + 0x40, 0xe5, 0x9c, 0xf8, 0x0e, 0x19, 0xca, 0x76, 0x61, 0x91, 0xb2, 0xe0, 0x89, 0x2e, 0x3b, 0xa0, + 0xca, 0x2e, 0x13, 0x18, 0x61, 0x8e, 0xaa, 0xe0, 0x1f, 0x86, 0x60, 0x5b, 0x90, 0x17, 0xcd, 0xab, + 0x62, 0x7e, 0x4e, 0xb0, 0x6d, 0x8a, 0xbe, 0x36, 0x3c, 0x9e, 0x43, 0xe6, 0x30, 0xff, 0x8f, 0x4e, + 0x67, 0xed, 0x53, 0xe0, 0xf6, 0xb9, 0xb7, 0xb8, 0x3b, 0xbf, 0xc9, 0x44, 0x67, 0x91, 0x89, 0x56, + 0x21, 0x8b, 0xc3, 0x82, 0xa2, 0x56, 0xb3, 0xb5, 0xcf, 0xcc, 0xb2, 0x06, 0xa5, 0xc3, 0xe6, 0x4f, + 0xf4, 0x93, 0x9e, 0xb8, 0xd5, 0x57, 0xa1, 0xf2, 0xb4, 0x8d, 0xbb, 0xed, 0x03, 0xc9, 0xc9, 0xa2, + 0x2d, 0x50, 0x25, 0x67, 0xd2, 0x2f, 0xc7, 0x10, 0xc4, 0xdf, 0x3c, 0x2a, 0x42, 0xae, 0xf7, 0xac, + 0x79, 0xac, 0x16, 0xb4, 0xff, 0xc9, 0xc0, 0xba, 0xd8, 0x16, 0xa2, 0xd2, 0x87, 0x37, 0x3f, 0xfd, + 0xc6, 0x6f, 0xb1, 0x32, 0xc9, 0x5b, 0xac, 0x30, 0x09, 0xe5, 0xbb, 0x7a, 0x76, 0x92, 0x84, 0xf2, + 0x9b, 0x9d, 0x44, 0xc4, 0xcf, 0x2d, 0x12, 0xf1, 0x6b, 0xb0, 0x3a, 0x22, 0x34, 0xb2, 0x5b, 0x09, + 0x87, 0x24, 0xb2, 0xa1, 0x6c, 0x38, 0x8e, 0x1b, 0x18, 0xe2, 0x6a, 0xb8, 0xb0, 0xd0, 0x66, 0x38, + 0xf5, 0xc5, 0x8d, 0xe6, 0x04, 0x49, 0x04, 0xe6, 0x38, 0x76, 0xfd, 0xc7, 0xa0, 0x4e, 0x77, 0x58, + 0x64, 0x3b, 0xfc, 0xde, 0x0f, 0x26, 0xbb, 0x21, 0x61, 0xeb, 0x42, 0xbe, 0xb3, 0xa8, 0x2b, 0x8c, + 0xc0, 0x27, 0xdd, 0x6e, 0xa7, 0xfb, 0x58, 0x55, 0x10, 0x40, 0xa1, 0xfd, 0x93, 0x4e, 0xbf, 0xbd, + 0xa7, 0x66, 0x76, 0x7f, 0xb9, 0x01, 0x05, 0x21, 0x24, 0xfa, 0x46, 0x66, 0x02, 0xf1, 0xb2, 0x5a, + 0xf4, 0xe3, 0x85, 0x33, 0xea, 0x44, 0xa9, 0x6e, 0xfd, 0xe1, 0xd2, 0xe3, 0xe5, 0x33, 0xe6, 0x0a, + 0xfa, 0x1b, 0x05, 0x2a, 0x89, 0x27, 0xcc, 0xb4, 0x57, 0xe3, 0x73, 0xaa, 0x78, 0xeb, 0x3f, 0x5a, + 0x6a, 0x6c, 0x24, 0xcb, 0xcf, 0x15, 0x28, 0xc7, 0xea, 0x57, 0xd1, 0xbd, 0x65, 0x6a, 0x5e, 0x85, + 0x24, 0xf7, 0x97, 0x2f, 0x97, 0xd5, 0x56, 0x3e, 0x51, 0xd0, 0x5f, 0x2b, 0x50, 0x8e, 0x55, 0x72, + 0xa6, 0x16, 0x65, 0xb6, 0xee, 0x34, 0xb5, 0x28, 0xf3, 0x0a, 0x47, 0x57, 0xd0, 0x5f, 0x2a, 0x50, + 0x8a, 0xaa, 0x32, 0xd1, 0xdd, 0xc5, 0xeb, 0x38, 0x85, 0x10, 0x9f, 0x2d, 0x5b, 0x00, 0xaa, 0xad, + 0xa0, 0x3f, 0x87, 0x62, 0x58, 0xc2, 0x88, 0xd2, 0xee, 0x5e, 0x53, 0xf5, 0x91, 0xf5, 0xbb, 0x0b, + 0x8f, 0x8b, 0x4f, 0x1f, 0xd6, 0x15, 0xa6, 0x9e, 0x7e, 0xaa, 0x02, 0xb2, 0x7e, 0x77, 0xe1, 0x71, + 0xd1, 0xf4, 0xcc, 0x13, 0x62, 0xe5, 0x87, 0xa9, 0x3d, 0x61, 0xb6, 0xee, 0x31, 0xb5, 0x27, 0xcc, + 0xab, 0x76, 0x14, 0x82, 0xc4, 0x0a, 0x18, 0x53, 0x0b, 0x32, 0x5b, 0x24, 0x99, 0x5a, 0x90, 0x39, + 0xf5, 0x92, 0xda, 0x0a, 0xfa, 0x99, 0x12, 0x3f, 0x17, 0xdc, 0x5d, 0xb8, 0x4e, 0x6f, 0x41, 0x97, + 0x9c, 0xa9, 0x14, 0xe4, 0x0b, 0xf4, 0x67, 0xf2, 0x16, 0x43, 0x94, 0xf9, 0xa1, 0x45, 0xc0, 0x12, + 0x95, 0x81, 0xf5, 0x4f, 0x97, 0xdb, 0x6c, 0xb8, 0x10, 0x7f, 0xa5, 0x00, 0x4c, 0x0a, 0x02, 0x53, + 0x0b, 0x31, 0x53, 0x89, 0x58, 0xbf, 0xb7, 0xc4, 0xc8, 0xf8, 0x02, 0x09, 0x0b, 0x96, 0x52, 0x2f, + 0x90, 0xa9, 0x82, 0xc5, 0xd4, 0x0b, 0x64, 0xba, 0xd8, 0x50, 0x5b, 0x41, 0xff, 0xa4, 0xc0, 0xc6, + 0x4c, 0xc1, 0x14, 0x7a, 0x78, 0xcd, 0x9a, 0xb9, 0xfa, 0x17, 0xcb, 0x03, 0x84, 0xa2, 0xed, 0x28, + 0x9f, 0x28, 0xe8, 0x6f, 0x15, 0x58, 0x4b, 0x16, 0x92, 0xa4, 0xde, 0xa5, 0xe6, 0x94, 0x5e, 0xd5, + 0x1f, 0x2c, 0x37, 0x38, 0xd2, 0xd6, 0xdf, 0x2b, 0x50, 0x4d, 0xd6, 0x14, 0xa1, 0x07, 0x8b, 0x85, + 0x85, 0x29, 0x81, 0x3e, 0x5f, 0x72, 0x74, 0x28, 0xd1, 0x97, 0xab, 0x7f, 0x94, 0x17, 0xd9, 0x5b, + 0x81, 0xff, 0xfc, 0xf0, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xda, 0xef, 0xe8, 0xfd, 0x34, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// DriverClient is the client API for Driver service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DriverClient interface { + // TaskConfigSchema returns the schema for parsing the driver + // configuration of a task. + TaskConfigSchema(ctx context.Context, in *TaskConfigSchemaRequest, opts ...grpc.CallOption) (*TaskConfigSchemaResponse, error) + // Capabilities returns a set of features which the driver implements. Some + // RPCs are not possible to implement on some runtimes, this allows the + // driver to indicate if it doesn't support these RPCs and features. + Capabilities(ctx context.Context, in *CapabilitiesRequest, opts ...grpc.CallOption) (*CapabilitiesResponse, error) + // Fingerprint starts a stream which emits information about the driver + // including whether the driver healthy and able to function in the + // existing environment. + // + // The driver should immediately stream a FingerprintResponse when the RPC + // is initially called, then send any additional responses if there is a + // change in the driver's state. + Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (Driver_FingerprintClient, error) + // RecoverTask is used when a task has been started but the driver may not + // know about it. Such is the case if the driver restarts or is upgraded. + RecoverTask(ctx context.Context, in *RecoverTaskRequest, opts ...grpc.CallOption) (*RecoverTaskResponse, error) + // StartTask starts and tracks the task on the implemented runtime + StartTask(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*StartTaskResponse, error) + // WaitTask blocks until the given task exits, returning the result of the + // task. It may be called after the task has exited, but before the task is + // destroyed. + WaitTask(ctx context.Context, in *WaitTaskRequest, opts ...grpc.CallOption) (*WaitTaskResponse, error) + // StopTask stops a given task by sending the desired signal to the process. + // If the task does not exit on its own within the given timeout, it will be + // forcefully killed. + StopTask(ctx context.Context, in *StopTaskRequest, opts ...grpc.CallOption) (*StopTaskResponse, error) + // DestroyTask removes the task from the driver's internal state and cleans + // up any additional resources created by the driver. It cannot be called + // on a running task, unless force is set to true. + DestroyTask(ctx context.Context, in *DestroyTaskRequest, opts ...grpc.CallOption) (*DestroyTaskResponse, error) + // InspectTask returns detailed information for the given task + InspectTask(ctx context.Context, in *InspectTaskRequest, opts ...grpc.CallOption) (*InspectTaskResponse, error) + // TaskStats collects and returns runtime metrics for the given task + TaskStats(ctx context.Context, in *TaskStatsRequest, opts ...grpc.CallOption) (Driver_TaskStatsClient, error) + // TaskEvents starts a streaming RPC where all task events emitted by the + // driver are streamed to the caller. + TaskEvents(ctx context.Context, in *TaskEventsRequest, opts ...grpc.CallOption) (Driver_TaskEventsClient, error) + // SignalTask sends a signal to the task + SignalTask(ctx context.Context, in *SignalTaskRequest, opts ...grpc.CallOption) (*SignalTaskResponse, error) + // ExecTask executes a command inside the tasks execution context + ExecTask(ctx context.Context, in *ExecTaskRequest, opts ...grpc.CallOption) (*ExecTaskResponse, error) + // ExecTaskStreaming executes a command inside the tasks execution context + // and streams back results + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + ExecTaskStreaming(ctx context.Context, opts ...grpc.CallOption) (Driver_ExecTaskStreamingClient, error) + // CreateNetwork is implemented when the driver needs to create the network + // namespace instead of allowing the Nomad client to do. + CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) + // DestroyNetwork destroys a previously created network. This rpc is only + // implemented if the driver needs to manage network namespace creation. + DestroyNetwork(ctx context.Context, in *DestroyNetworkRequest, opts ...grpc.CallOption) (*DestroyNetworkResponse, error) +} + +type driverClient struct { + cc grpc.ClientConnInterface +} + +func NewDriverClient(cc grpc.ClientConnInterface) DriverClient { + return &driverClient{cc} +} + +func (c *driverClient) TaskConfigSchema(ctx context.Context, in *TaskConfigSchemaRequest, opts ...grpc.CallOption) (*TaskConfigSchemaResponse, error) { + out := new(TaskConfigSchemaResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskConfigSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) Capabilities(ctx context.Context, in *CapabilitiesRequest, opts ...grpc.CallOption) (*CapabilitiesResponse, error) { + out := new(CapabilitiesResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/Capabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (Driver_FingerprintClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[0], "/hashicorp.nomad.plugins.drivers.proto.Driver/Fingerprint", opts...) + if err != nil { + return nil, err + } + x := &driverFingerprintClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Driver_FingerprintClient interface { + Recv() (*FingerprintResponse, error) + grpc.ClientStream +} + +type driverFingerprintClient struct { + grpc.ClientStream +} + +func (x *driverFingerprintClient) Recv() (*FingerprintResponse, error) { + m := new(FingerprintResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) RecoverTask(ctx context.Context, in *RecoverTaskRequest, opts ...grpc.CallOption) (*RecoverTaskResponse, error) { + out := new(RecoverTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/RecoverTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) StartTask(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*StartTaskResponse, error) { + out := new(StartTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/StartTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) WaitTask(ctx context.Context, in *WaitTaskRequest, opts ...grpc.CallOption) (*WaitTaskResponse, error) { + out := new(WaitTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/WaitTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) StopTask(ctx context.Context, in *StopTaskRequest, opts ...grpc.CallOption) (*StopTaskResponse, error) { + out := new(StopTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/StopTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) DestroyTask(ctx context.Context, in *DestroyTaskRequest, opts ...grpc.CallOption) (*DestroyTaskResponse, error) { + out := new(DestroyTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/DestroyTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) InspectTask(ctx context.Context, in *InspectTaskRequest, opts ...grpc.CallOption) (*InspectTaskResponse, error) { + out := new(InspectTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/InspectTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) TaskStats(ctx context.Context, in *TaskStatsRequest, opts ...grpc.CallOption) (Driver_TaskStatsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[1], "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskStats", opts...) + if err != nil { + return nil, err + } + x := &driverTaskStatsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Driver_TaskStatsClient interface { + Recv() (*TaskStatsResponse, error) + grpc.ClientStream +} + +type driverTaskStatsClient struct { + grpc.ClientStream +} + +func (x *driverTaskStatsClient) Recv() (*TaskStatsResponse, error) { + m := new(TaskStatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) TaskEvents(ctx context.Context, in *TaskEventsRequest, opts ...grpc.CallOption) (Driver_TaskEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[2], "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskEvents", opts...) + if err != nil { + return nil, err + } + x := &driverTaskEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Driver_TaskEventsClient interface { + Recv() (*DriverTaskEvent, error) + grpc.ClientStream +} + +type driverTaskEventsClient struct { + grpc.ClientStream +} + +func (x *driverTaskEventsClient) Recv() (*DriverTaskEvent, error) { + m := new(DriverTaskEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) SignalTask(ctx context.Context, in *SignalTaskRequest, opts ...grpc.CallOption) (*SignalTaskResponse, error) { + out := new(SignalTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/SignalTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) ExecTask(ctx context.Context, in *ExecTaskRequest, opts ...grpc.CallOption) (*ExecTaskResponse, error) { + out := new(ExecTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/ExecTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) ExecTaskStreaming(ctx context.Context, opts ...grpc.CallOption) (Driver_ExecTaskStreamingClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[3], "/hashicorp.nomad.plugins.drivers.proto.Driver/ExecTaskStreaming", opts...) + if err != nil { + return nil, err + } + x := &driverExecTaskStreamingClient{stream} + return x, nil +} + +type Driver_ExecTaskStreamingClient interface { + Send(*ExecTaskStreamingRequest) error + Recv() (*ExecTaskStreamingResponse, error) + grpc.ClientStream +} + +type driverExecTaskStreamingClient struct { + grpc.ClientStream +} + +func (x *driverExecTaskStreamingClient) Send(m *ExecTaskStreamingRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *driverExecTaskStreamingClient) Recv() (*ExecTaskStreamingResponse, error) { + m := new(ExecTaskStreamingResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) { + out := new(CreateNetworkResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/CreateNetwork", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) DestroyNetwork(ctx context.Context, in *DestroyNetworkRequest, opts ...grpc.CallOption) (*DestroyNetworkResponse, error) { + out := new(DestroyNetworkResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/DestroyNetwork", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DriverServer is the server API for Driver service. +type DriverServer interface { + // TaskConfigSchema returns the schema for parsing the driver + // configuration of a task. + TaskConfigSchema(context.Context, *TaskConfigSchemaRequest) (*TaskConfigSchemaResponse, error) + // Capabilities returns a set of features which the driver implements. Some + // RPCs are not possible to implement on some runtimes, this allows the + // driver to indicate if it doesn't support these RPCs and features. + Capabilities(context.Context, *CapabilitiesRequest) (*CapabilitiesResponse, error) + // Fingerprint starts a stream which emits information about the driver + // including whether the driver healthy and able to function in the + // existing environment. + // + // The driver should immediately stream a FingerprintResponse when the RPC + // is initially called, then send any additional responses if there is a + // change in the driver's state. + Fingerprint(*FingerprintRequest, Driver_FingerprintServer) error + // RecoverTask is used when a task has been started but the driver may not + // know about it. Such is the case if the driver restarts or is upgraded. + RecoverTask(context.Context, *RecoverTaskRequest) (*RecoverTaskResponse, error) + // StartTask starts and tracks the task on the implemented runtime + StartTask(context.Context, *StartTaskRequest) (*StartTaskResponse, error) + // WaitTask blocks until the given task exits, returning the result of the + // task. It may be called after the task has exited, but before the task is + // destroyed. + WaitTask(context.Context, *WaitTaskRequest) (*WaitTaskResponse, error) + // StopTask stops a given task by sending the desired signal to the process. + // If the task does not exit on its own within the given timeout, it will be + // forcefully killed. + StopTask(context.Context, *StopTaskRequest) (*StopTaskResponse, error) + // DestroyTask removes the task from the driver's internal state and cleans + // up any additional resources created by the driver. It cannot be called + // on a running task, unless force is set to true. + DestroyTask(context.Context, *DestroyTaskRequest) (*DestroyTaskResponse, error) + // InspectTask returns detailed information for the given task + InspectTask(context.Context, *InspectTaskRequest) (*InspectTaskResponse, error) + // TaskStats collects and returns runtime metrics for the given task + TaskStats(*TaskStatsRequest, Driver_TaskStatsServer) error + // TaskEvents starts a streaming RPC where all task events emitted by the + // driver are streamed to the caller. + TaskEvents(*TaskEventsRequest, Driver_TaskEventsServer) error + // SignalTask sends a signal to the task + SignalTask(context.Context, *SignalTaskRequest) (*SignalTaskResponse, error) + // ExecTask executes a command inside the tasks execution context + ExecTask(context.Context, *ExecTaskRequest) (*ExecTaskResponse, error) + // ExecTaskStreaming executes a command inside the tasks execution context + // and streams back results + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + ExecTaskStreaming(Driver_ExecTaskStreamingServer) error + // CreateNetwork is implemented when the driver needs to create the network + // namespace instead of allowing the Nomad client to do. + CreateNetwork(context.Context, *CreateNetworkRequest) (*CreateNetworkResponse, error) + // DestroyNetwork destroys a previously created network. This rpc is only + // implemented if the driver needs to manage network namespace creation. + DestroyNetwork(context.Context, *DestroyNetworkRequest) (*DestroyNetworkResponse, error) +} + +// UnimplementedDriverServer can be embedded to have forward compatible implementations. +type UnimplementedDriverServer struct { +} + +func (*UnimplementedDriverServer) TaskConfigSchema(ctx context.Context, req *TaskConfigSchemaRequest) (*TaskConfigSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TaskConfigSchema not implemented") +} +func (*UnimplementedDriverServer) Capabilities(ctx context.Context, req *CapabilitiesRequest) (*CapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Capabilities not implemented") +} +func (*UnimplementedDriverServer) Fingerprint(req *FingerprintRequest, srv Driver_FingerprintServer) error { + return status.Errorf(codes.Unimplemented, "method Fingerprint not implemented") +} +func (*UnimplementedDriverServer) RecoverTask(ctx context.Context, req *RecoverTaskRequest) (*RecoverTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecoverTask not implemented") +} +func (*UnimplementedDriverServer) StartTask(ctx context.Context, req *StartTaskRequest) (*StartTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartTask not implemented") +} +func (*UnimplementedDriverServer) WaitTask(ctx context.Context, req *WaitTaskRequest) (*WaitTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WaitTask not implemented") +} +func (*UnimplementedDriverServer) StopTask(ctx context.Context, req *StopTaskRequest) (*StopTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopTask not implemented") +} +func (*UnimplementedDriverServer) DestroyTask(ctx context.Context, req *DestroyTaskRequest) (*DestroyTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DestroyTask not implemented") +} +func (*UnimplementedDriverServer) InspectTask(ctx context.Context, req *InspectTaskRequest) (*InspectTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InspectTask not implemented") +} +func (*UnimplementedDriverServer) TaskStats(req *TaskStatsRequest, srv Driver_TaskStatsServer) error { + return status.Errorf(codes.Unimplemented, "method TaskStats not implemented") +} +func (*UnimplementedDriverServer) TaskEvents(req *TaskEventsRequest, srv Driver_TaskEventsServer) error { + return status.Errorf(codes.Unimplemented, "method TaskEvents not implemented") +} +func (*UnimplementedDriverServer) SignalTask(ctx context.Context, req *SignalTaskRequest) (*SignalTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SignalTask not implemented") +} +func (*UnimplementedDriverServer) ExecTask(ctx context.Context, req *ExecTaskRequest) (*ExecTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecTask not implemented") +} +func (*UnimplementedDriverServer) ExecTaskStreaming(srv Driver_ExecTaskStreamingServer) error { + return status.Errorf(codes.Unimplemented, "method ExecTaskStreaming not implemented") +} +func (*UnimplementedDriverServer) CreateNetwork(ctx context.Context, req *CreateNetworkRequest) (*CreateNetworkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNetwork not implemented") +} +func (*UnimplementedDriverServer) DestroyNetwork(ctx context.Context, req *DestroyNetworkRequest) (*DestroyNetworkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DestroyNetwork not implemented") +} + +func RegisterDriverServer(s *grpc.Server, srv DriverServer) { + s.RegisterService(&_Driver_serviceDesc, srv) +} + +func _Driver_TaskConfigSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TaskConfigSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).TaskConfigSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskConfigSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).TaskConfigSchema(ctx, req.(*TaskConfigSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_Capabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).Capabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/Capabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).Capabilities(ctx, req.(*CapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_Fingerprint_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(FingerprintRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DriverServer).Fingerprint(m, &driverFingerprintServer{stream}) +} + +type Driver_FingerprintServer interface { + Send(*FingerprintResponse) error + grpc.ServerStream +} + +type driverFingerprintServer struct { + grpc.ServerStream +} + +func (x *driverFingerprintServer) Send(m *FingerprintResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Driver_RecoverTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecoverTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).RecoverTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/RecoverTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).RecoverTask(ctx, req.(*RecoverTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_StartTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).StartTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/StartTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).StartTask(ctx, req.(*StartTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_WaitTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).WaitTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/WaitTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).WaitTask(ctx, req.(*WaitTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_StopTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).StopTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/StopTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).StopTask(ctx, req.(*StopTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_DestroyTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroyTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).DestroyTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/DestroyTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).DestroyTask(ctx, req.(*DestroyTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_InspectTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InspectTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).InspectTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/InspectTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).InspectTask(ctx, req.(*InspectTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_TaskStats_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TaskStatsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DriverServer).TaskStats(m, &driverTaskStatsServer{stream}) +} + +type Driver_TaskStatsServer interface { + Send(*TaskStatsResponse) error + grpc.ServerStream +} + +type driverTaskStatsServer struct { + grpc.ServerStream +} + +func (x *driverTaskStatsServer) Send(m *TaskStatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Driver_TaskEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TaskEventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DriverServer).TaskEvents(m, &driverTaskEventsServer{stream}) +} + +type Driver_TaskEventsServer interface { + Send(*DriverTaskEvent) error + grpc.ServerStream +} + +type driverTaskEventsServer struct { + grpc.ServerStream +} + +func (x *driverTaskEventsServer) Send(m *DriverTaskEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _Driver_SignalTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignalTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).SignalTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/SignalTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).SignalTask(ctx, req.(*SignalTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_ExecTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).ExecTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/ExecTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).ExecTask(ctx, req.(*ExecTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_ExecTaskStreaming_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DriverServer).ExecTaskStreaming(&driverExecTaskStreamingServer{stream}) +} + +type Driver_ExecTaskStreamingServer interface { + Send(*ExecTaskStreamingResponse) error + Recv() (*ExecTaskStreamingRequest, error) + grpc.ServerStream +} + +type driverExecTaskStreamingServer struct { + grpc.ServerStream +} + +func (x *driverExecTaskStreamingServer) Send(m *ExecTaskStreamingResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *driverExecTaskStreamingServer) Recv() (*ExecTaskStreamingRequest, error) { + m := new(ExecTaskStreamingRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Driver_CreateNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).CreateNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/CreateNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).CreateNetwork(ctx, req.(*CreateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_DestroyNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroyNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).DestroyNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/DestroyNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).DestroyNetwork(ctx, req.(*DestroyNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Driver_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hashicorp.nomad.plugins.drivers.proto.Driver", + HandlerType: (*DriverServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TaskConfigSchema", + Handler: _Driver_TaskConfigSchema_Handler, + }, + { + MethodName: "Capabilities", + Handler: _Driver_Capabilities_Handler, + }, + { + MethodName: "RecoverTask", + Handler: _Driver_RecoverTask_Handler, + }, + { + MethodName: "StartTask", + Handler: _Driver_StartTask_Handler, + }, + { + MethodName: "WaitTask", + Handler: _Driver_WaitTask_Handler, + }, + { + MethodName: "StopTask", + Handler: _Driver_StopTask_Handler, + }, + { + MethodName: "DestroyTask", + Handler: _Driver_DestroyTask_Handler, + }, + { + MethodName: "InspectTask", + Handler: _Driver_InspectTask_Handler, + }, + { + MethodName: "SignalTask", + Handler: _Driver_SignalTask_Handler, + }, + { + MethodName: "ExecTask", + Handler: _Driver_ExecTask_Handler, + }, + { + MethodName: "CreateNetwork", + Handler: _Driver_CreateNetwork_Handler, + }, + { + MethodName: "DestroyNetwork", + Handler: _Driver_DestroyNetwork_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Fingerprint", + Handler: _Driver_Fingerprint_Handler, + ServerStreams: true, + }, + { + StreamName: "TaskStats", + Handler: _Driver_TaskStats_Handler, + ServerStreams: true, + }, + { + StreamName: "TaskEvents", + Handler: _Driver_TaskEvents_Handler, + ServerStreams: true, + }, + { + StreamName: "ExecTaskStreaming", + Handler: _Driver_ExecTaskStreaming_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "plugins/drivers/proto/driver.proto", +} diff --git a/plugin_interface/drivers/proto/driver.proto b/plugin_interface/drivers/proto/driver.proto new file mode 100644 index 00000000000..eacde9da5c2 --- /dev/null +++ b/plugin_interface/drivers/proto/driver.proto @@ -0,0 +1,787 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package hashicorp.nomad.plugins.drivers.proto; +option go_package = "proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "plugins/shared/hclspec/hcl_spec.proto"; +import "plugins/shared/structs/proto/attribute.proto"; + +// Driver service defines RPCs used to communicate with a nomad runtime driver. +// Some rpcs may not be implemented by the driver based on it's capabilities. +service Driver { + + // TaskConfigSchema returns the schema for parsing the driver + // configuration of a task. + rpc TaskConfigSchema(TaskConfigSchemaRequest) returns (TaskConfigSchemaResponse) {} + + // Capabilities returns a set of features which the driver implements. Some + // RPCs are not possible to implement on some runtimes, this allows the + // driver to indicate if it doesn't support these RPCs and features. + rpc Capabilities(CapabilitiesRequest) returns (CapabilitiesResponse) {} + + // Fingerprint starts a stream which emits information about the driver + // including whether the driver healthy and able to function in the + // existing environment. + // + // The driver should immediately stream a FingerprintResponse when the RPC + // is initially called, then send any additional responses if there is a + // change in the driver's state. + rpc Fingerprint(FingerprintRequest) returns (stream FingerprintResponse) {} + + // RecoverTask is used when a task has been started but the driver may not + // know about it. Such is the case if the driver restarts or is upgraded. + rpc RecoverTask(RecoverTaskRequest) returns (RecoverTaskResponse) {} + + // StartTask starts and tracks the task on the implemented runtime + rpc StartTask(StartTaskRequest) returns (StartTaskResponse) {} + + // WaitTask blocks until the given task exits, returning the result of the + // task. It may be called after the task has exited, but before the task is + // destroyed. + rpc WaitTask(WaitTaskRequest) returns (WaitTaskResponse) {} + + // StopTask stops a given task by sending the desired signal to the process. + // If the task does not exit on its own within the given timeout, it will be + // forcefully killed. + rpc StopTask(StopTaskRequest) returns (StopTaskResponse) {} + + // DestroyTask removes the task from the driver's internal state and cleans + // up any additional resources created by the driver. It cannot be called + // on a running task, unless force is set to true. + rpc DestroyTask(DestroyTaskRequest) returns (DestroyTaskResponse) {} + + // InspectTask returns detailed information for the given task + rpc InspectTask(InspectTaskRequest) returns (InspectTaskResponse) {} + + // TaskStats collects and returns runtime metrics for the given task + rpc TaskStats(TaskStatsRequest) returns (stream TaskStatsResponse) {} + + // TaskEvents starts a streaming RPC where all task events emitted by the + // driver are streamed to the caller. + rpc TaskEvents(TaskEventsRequest) returns (stream + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + DriverTaskEvent) {} + + // The following RPCs are only implemented if the driver sets the + // corresponding capability. + + // SignalTask sends a signal to the task + rpc SignalTask(SignalTaskRequest) returns (SignalTaskResponse) {} + + // ExecTask executes a command inside the tasks execution context + rpc ExecTask(ExecTaskRequest) returns (ExecTaskResponse) {} + + // ExecTaskStreaming executes a command inside the tasks execution context + // and streams back results + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + rpc ExecTaskStreaming(stream ExecTaskStreamingRequest) returns (stream ExecTaskStreamingResponse) {} + + // CreateNetwork is implemented when the driver needs to create the network + // namespace instead of allowing the Nomad client to do. + rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) {} + + // DestroyNetwork destroys a previously created network. This rpc is only + // implemented if the driver needs to manage network namespace creation. + rpc DestroyNetwork(DestroyNetworkRequest) returns (DestroyNetworkResponse) {} +} + +message TaskConfigSchemaRequest {} + +message TaskConfigSchemaResponse { + + // Spec is the configuration schema for the job driver config block + hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1; +} + +message CapabilitiesRequest {} + +message CapabilitiesResponse { + + // Capabilities provides a way for the driver to denote if it implements + // non-core RPCs. Some Driver service RPCs expose additional information + // or functionality outside of the core task management functions. These + // RPCs are only implemented if the driver sets the corresponding capability. + DriverCapabilities capabilities = 1; +} + +message FingerprintRequest {} + +message FingerprintResponse { + + + // Attributes are key/value pairs that annotate the nomad client and can be + // used in scheduling constraints and affinities. + map attributes = 1; + + enum HealthState { + UNDETECTED = 0; + UNHEALTHY = 1; + HEALTHY = 2; + + } + + // Health is used to determine the state of the health the driver is in. + // Health can be one of the following states: + // * UNDETECTED: driver dependencies are not met and the driver can not start + // * UNHEALTHY: driver dependencies are met but the driver is unable to + // perform operations due to some other problem + // * HEALTHY: driver is able to perform all operations + HealthState health = 2; + + // HealthDescription is a human readable message describing the current + // state of driver health + string health_description = 3; + + // Err is set if any driver error occurred while waiting for the fingerprint + string err = 4; +} + +message RecoverTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Handle is the TaskHandle returned from StartTask + TaskHandle handle = 2; +} + +message RecoverTaskResponse {} + +message StartTaskRequest { + + // Task configuration to launch + TaskConfig task = 1; + +} + +message StartTaskResponse { + + enum Result { + SUCCESS = 0; + RETRY = 1; + FATAL = 2; + } + + // Result is set depending on the type of error that occurred while starting + // a task: + // + // * SUCCESS: No error occurred, handle is set + // * RETRY: An error occurred, but is recoverable and the RPC should be retried + // * FATAL: A fatal error occurred and is not likely to succeed if retried + // + // If Result is not successful, the DriverErrorMsg will be set. + Result result = 1; + + // DriverErrorMsg is set if an error occurred + string driver_error_msg = 2; + + // Handle is opaque to the client, but must be stored in order to recover + // the task. + TaskHandle handle = 3; + + // NetworkOverride is set if the driver sets network settings and the service ip/port + // needs to be set differently. + NetworkOverride network_override = 4; +} + +message WaitTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; +} + +message WaitTaskResponse { + + // Result is the exit status of the task + ExitResult result = 1; + // Err is set if any driver error occurred while waiting for the task + string err = 2; +} + +message StopTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Timeout defines the amount of time to wait before forcefully killing + // the task. For example, on Unix clients, this means sending a SIGKILL to + // the process. + google.protobuf.Duration timeout = 2; + + // Signal can be set to override the Task's configured shutdown signal + string signal = 3; +} + +message StopTaskResponse {} + +message DestroyTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Force destroys the task even if it is still in a running state + bool force = 2; +} + +message DestroyTaskResponse {} + +message InspectTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; +} + +message InspectTaskResponse { + + // Task details + TaskStatus task = 1; + + // Driver details for task + TaskDriverStatus driver = 2; + + // NetworkOverride info if set + NetworkOverride network_override = 3; +} + +message TaskStatsRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // CollectionInterval is the interval at which to stream stats to the caller + google.protobuf.Duration collection_interval = 2; +} + +message TaskStatsResponse { + + // Stats for the task + TaskStats stats = 1; +} + +message TaskEventsRequest {} + +message SignalTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Signal is the operating system signal to send to the task. Ex: SIGHUP + string signal = 2; +} + +message SignalTaskResponse {} + +message ExecTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Command is the command to execute in the task environment + repeated string command = 2; + + // Timeout is the amount of time to wait for the command to stop. + // Defaults to 0 (run forever) + google.protobuf.Duration timeout = 3; +} + +message ExecTaskResponse { + + // Stdout from the exec + bytes stdout = 1; + + // Stderr from the exec + bytes stderr = 2; + + // Result from the exec + ExitResult result = 3; +} + +message ExecTaskStreamingIOOperation { + bytes data = 1; + bool close = 2; +} + +message ExecTaskStreamingRequest { + message Setup { + string task_id = 1; + repeated string command = 2; + bool tty = 3; + } + + message TerminalSize { + int32 height = 1; + int32 width = 2; + } + + Setup setup = 1; + TerminalSize tty_size = 2; + ExecTaskStreamingIOOperation stdin = 3; +} + +message ExecTaskStreamingResponse { + ExecTaskStreamingIOOperation stdout = 1; + ExecTaskStreamingIOOperation stderr = 2; + + bool exited = 3; + ExitResult result = 4; +} + +message CreateNetworkRequest { + + // AllocID of the allocation the network is associated with + string alloc_id = 1; + + // Hostname of the network namespace + string hostname = 2; +} + +message CreateNetworkResponse { + + NetworkIsolationSpec isolation_spec = 1; + + // created indicates that the network namespace is newly created + // as a result of this request. if false, the NetworkIsolationSpec + // value returned is an existing spec. + bool created = 2; +} + +message DestroyNetworkRequest { + + // AllocID of the allocation the network is associated with + string alloc_id = 1; + + NetworkIsolationSpec isolation_spec = 2; +} + +message DestroyNetworkResponse {} + +message DriverCapabilities { + + // SendSignals indicates that the driver can send process signals (ex. SIGUSR1) + // to the task. + bool send_signals = 1; + + // Exec indicates that the driver supports executing arbitrary commands + // in the task's execution environment. + bool exec = 2; + + enum FSIsolation { + NONE = 0; + CHROOT = 1; + IMAGE = 2; + UNVEIL= 3; + } + // FsIsolation indicates what kind of filesystem isolation a driver supports. + FSIsolation fs_isolation = 3; + + repeated NetworkIsolationSpec.NetworkIsolationMode network_isolation_modes = 4; + + bool must_create_network = 5; + + enum MountConfigs { + // buf:lint:ignore ENUM_NO_ALLOW_ALIAS + option allow_alias = true; + UNKNOWN_MOUNTS = 0; // treated as ANY_MOUNTS for backwards compatibility + ANY_MOUNTS = 0; + NO_MOUNTS = 1; + } + // MountConfigs indicates whether the driver supports mount configurations. + MountConfigs mount_configs = 6; + + // previous remote_tasks field no longer used by nomad clients + reserved 7; + reserved "remote_tasks"; + + // disable_log_collection indicates whether the driver has the capability of + // disabling log collection + bool disable_log_collection = 8; + + // dynamic_workload_users indicates the task is capable of using UID/GID + // assigned from the Nomad client as user credentials for the task. + bool dynamic_workload_users = 9; +} + +message NetworkIsolationSpec { + enum NetworkIsolationMode { + HOST = 0; + GROUP = 1; + TASK = 2; + NONE = 3; + } + NetworkIsolationMode mode = 1; + + string path = 2; + + map labels = 3; + + HostsConfig hostsConfig = 4; +} + +message HostsConfig { + string hostname = 1; + string address = 2; +} + +message DNSConfig { + repeated string servers = 1; + repeated string searches = 2; + repeated string options = 3; +} + +message TaskConfig { + + // Id of the task, recommended to the globally unique, must be unique to the driver. + string id = 1; + + // Name of the task + string name = 2; + + // MsgpackDriverConfig is the encoded driver configuation of the task + bytes msgpack_driver_config = 3; + + // Env is the a set of key/value pairs to be set as environment variables + map env = 4; + + // DeviceEnv is the set of environment variables that are defined by device + // plugins. This allows the driver to differentiate environment variables + // set by the device plugins and those by the user. When populating the + // task's environment env should be used. + map device_env = 5; + + // Resources defines the resources to isolate + Resources resources = 6; + + // Mounts is a list of targets to bind mount into the task directory + repeated Mount mounts = 7; + + // Devices is a list of system devices to mount into the task's execution + // environment. + repeated Device devices = 8; + + // User defines the operating system user the tasks should run as + string user = 9; + + // AllocDir is the directory on the host where the allocation directory + // exists. + string alloc_dir = 10; + + // StdoutPath is the path to the file to open and write task stdout to + string stdout_path = 11; + + // StderrPath is the path to the file to open and write task stderr to + string stderr_path = 12; + + // TaskGroupName is the name of the task group which this task is a member of + string task_group_name = 13; + + // JobName is the name of the job of which this task is part of + string job_name = 14; + + // AllocId is the ID of the associated allocation + string alloc_id = 15; + + // NetworkIsolationSpec specifies the configuration for the network namespace + // to use for the task. *Only supported on Linux + NetworkIsolationSpec network_isolation_spec = 16; + + // DNSConfig is the configuration for task DNS resolvers and other options + DNSConfig dns = 17; + + // JobId is the ID of the job of which this task is part of + string job_id = 18; + + // Namespace is the namespace of the job of which this task is part of + string namespace = 19; + + // NodeName is the name of the node where the associated allocation is running + string node_name = 20; + + // NodeId is the ID of the node where the associated allocation is running + string node_id = 21; + + // ParentJobID is the parent id for dispatch and periodic jobs + string parent_job_id = 22; +} + +message Resources { + + // AllocatedResources are the resources set for the task + AllocatedTaskResources allocated_resources = 1; + + // LinuxResources are the computed values to set for specific Linux features + LinuxResources linux_resources = 2; + + // Ports are the allocated port mappings for the allocation. + // A task may use these to manually configure port mapping if shared network namespaces aren't being used. + repeated PortMapping ports = 3; +} + +message AllocatedTaskResources { + AllocatedCpuResources cpu = 1; + AllocatedMemoryResources memory = 2; + repeated NetworkResource networks = 5; +} + +message AllocatedCpuResources { + int64 cpu_shares = 1; +} + +message AllocatedMemoryResources { + int64 memory_mb = 2; + int64 memory_max_mb = 3; +} + +message NetworkResource { + string device = 1; + string cidr = 2; + string ip = 3; + int32 mbits = 4; + repeated NetworkPort reserved_ports = 5; + repeated NetworkPort dynamic_ports = 6; +} + +message NetworkPort { + string label = 1; + int32 value = 2; +} + +message PortMapping { + string label = 1; + int32 value = 2; + int32 to = 3; + string host_ip = 4; +} + +message LinuxResources { + + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified) + int64 cpu_period = 1; + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified) + int64 cpu_quota = 2; + // CPU shares (relative weight vs. other containers). Default: 0 (not specified) + int64 cpu_shares = 3; + // Memory limit in bytes. Default: 0 (not specified) + int64 memory_limit_bytes = 4; + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified) + int64 oom_score_adj = 5; + + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified) + // This field exists to support drivers which can't set a cgroup path. + string cpuset_cpus = 6; + // Previously cpuset_mems fields never set by the Nomad client + reserved 7; + reserved "cpuset_mems"; + // CpusetCgroup is the path to the cpuset cgroup managed by the client + string cpuset_cgroup = 9; + + // PercentTicks is a compatibility option for docker and should not be used + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + double PercentTicks = 8; +} + +message Mount { + + // TaskPath is the file path within the task directory to mount to + string task_path = 1; + + // HostPath is the file path on the host to mount from + string host_path = 2; + + // Readonly if set true, mounts the path in readonly mode + bool readonly = 3; + + // Propagation mode for the mount. Not exactly the same as the unix mount + // propagation flags. See callsite usage for details. + string propagation_mode = 4; + + string selinux_label = 5; +} + +message Device { + + // TaskPath is the file path within the task to mount the device to + string task_path = 1; + + // HostPath is the path on the host to the source device + string host_path = 2; + + // CgroupPermissions defines the Cgroup permissions of the device. + // One or more of the following options can be set: + // * r - allows the task to read from the specified device. + // * w - allows the task to write to the specified device. + // * m - allows the task to create device files that do not yet exist. + // + // Example: "rw" + string cgroup_permissions = 3; +} + +enum TaskState { + UNKNOWN = 0; + RUNNING = 1; + EXITED = 2; +} + +// TaskHandle is created when starting a task and is used to recover task +message TaskHandle { + + // Version is used by the driver to version the DriverState schema. + // Version 0 is reserved by Nomad and should not be used. + int32 version = 1; + + // Config is the TaskConfig for the task + TaskConfig config = 2; + + // State is the state of the task's execution + TaskState state = 3; + + // DriverState is the encoded state for the specific driver + bytes driver_state = 4; +} + +// NetworkOverride contains network settings which the driver may override +// for the task, such as when the driver is setting up the task's network. +message NetworkOverride { + + // PortMap can be set to replace ports with driver-specific mappings + map port_map = 1; + + // Addr is the IP address for the task created by the driver + string addr = 2; + + // AutoAdvertise indicates whether the driver thinks services that choose + // to auto_advertise_addresses should use this IP instead of the host's. + bool auto_advertise = 3; +} + +// ExitResult contains information about the exit status of a task +message ExitResult { + + // ExitCode returned from the task on exit + int32 exit_code = 1; + + // Signal is set if a signal was sent to the task + int32 signal = 2; + + // OomKilled is true if the task exited as a result of the OOM Killer + bool oom_killed = 3; + +} + +// TaskStatus includes information of a specific task +message TaskStatus { + string id = 1; + string name = 2; + + // State is the state of the task's execution + TaskState state = 3; + + // StartedAt is the timestamp when the task was started + google.protobuf.Timestamp started_at = 4; + + // CompletedAt is the timestamp when the task exited. + // If the task is still running, CompletedAt will not be set + google.protobuf.Timestamp completed_at = 5; + + // Result is set when CompletedAt is set. + ExitResult result = 6; +} + +message TaskDriverStatus { + + // Attributes is a set of string/string key value pairs specific to the + // implementing driver + map attributes = 1; +} + +message TaskStats { + + // Id of the task + string id = 1; + + // Timestamp for which the stats were collected + google.protobuf.Timestamp timestamp = 2; + + // AggResourceUsage is the aggreate usage of all processes + TaskResourceUsage agg_resource_usage = 3; + + // ResourceUsageByPid breaks the usage stats by process + map resource_usage_by_pid = 4; +} + +message TaskResourceUsage { + + // CPU usage stats + CPUUsage cpu = 1; + + // Memory usage stats + MemoryUsage memory = 2; +} + +message CPUUsage { + + double system_mode = 1; + double user_mode = 2; + double total_ticks = 3; + uint64 throttled_periods = 4; + uint64 throttled_time = 5; + double percent = 6; + + enum Fields { + SYSTEM_MODE = 0; + USER_MODE = 1; + TOTAL_TICKS = 2; + THROTTLED_PERIODS = 3; + THROTTLED_TIME = 4; + PERCENT = 5; + } + // MeasuredFields indicates which fields were actually sampled + repeated Fields measured_fields = 7; +} + +message MemoryUsage { + uint64 rss = 1; + uint64 cache = 2; + uint64 max_usage = 3; + uint64 kernel_usage = 4; + uint64 kernel_max_usage = 5; + uint64 usage = 7; + uint64 swap = 8; + + enum Fields { + RSS = 0; + CACHE = 1; + MAX_USAGE = 2; + KERNEL_USAGE = 3; + KERNEL_MAX_USAGE = 4; + USAGE = 5; + SWAP = 6; + } + // MeasuredFields indicates which fields were actually sampled + repeated Fields measured_fields = 6; +} + +message DriverTaskEvent { + + // TaskId is the id of the task for the event + string task_id = 1; + + // AllocId of the task for the event + string alloc_id = 2; + + // TaskName is the name of the task for the event + string task_name = 3; + + // Timestamp when the event occurred + google.protobuf.Timestamp timestamp = 4; + + // Message is the body of the event + string message = 5; + + // Annotations allows for additional key/value data to be sent along with the event + map annotations = 6; +} diff --git a/plugin_interface/drivers/server.go b/plugin_interface/drivers/server.go new file mode 100644 index 00000000000..47a9ef79c8b --- /dev/null +++ b/plugin_interface/drivers/server.go @@ -0,0 +1,431 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "context" + "fmt" + "io" + "math" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/go-plugin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/nomad/plugin-interface/drivers/fsisolation" + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" + "github.com/hashicorp/nomad/plugin-interface/shared/structs" + dstructs "github.com/hashicorp/nomad/plugin-interface/shared/structs" + sproto "github.com/hashicorp/nomad/plugin-interface/shared/structs/proto" +) + +type driverPluginServer struct { + broker *plugin.GRPCBroker + impl DriverPlugin +} + +func (b *driverPluginServer) TaskConfigSchema(ctx context.Context, req *proto.TaskConfigSchemaRequest) (*proto.TaskConfigSchemaResponse, error) { + spec, err := b.impl.TaskConfigSchema() + if err != nil { + return nil, err + } + + resp := &proto.TaskConfigSchemaResponse{ + Spec: spec, + } + return resp, nil +} + +func (b *driverPluginServer) Capabilities(ctx context.Context, req *proto.CapabilitiesRequest) (*proto.CapabilitiesResponse, error) { + caps, err := b.impl.Capabilities() + if err != nil { + return nil, err + } + resp := &proto.CapabilitiesResponse{ + Capabilities: &proto.DriverCapabilities{ + SendSignals: caps.SendSignals, + Exec: caps.Exec, + MustCreateNetwork: caps.MustInitiateNetwork, + NetworkIsolationModes: []proto.NetworkIsolationSpec_NetworkIsolationMode{}, + DynamicWorkloadUsers: caps.DynamicWorkloadUsers, + }, + } + + switch caps.FSIsolation { + case fsisolation.None: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE + case fsisolation.Chroot: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_CHROOT + case fsisolation.Image: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_IMAGE + case fsisolation.Unveil: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_UNVEIL + default: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE + } + + for _, mode := range caps.NetIsolationModes { + resp.Capabilities.NetworkIsolationModes = append(resp.Capabilities.NetworkIsolationModes, netIsolationModeToProto(mode)) + } + return resp, nil +} + +func (b *driverPluginServer) Fingerprint(req *proto.FingerprintRequest, srv proto.Driver_FingerprintServer) error { + ctx := srv.Context() + ch, err := b.impl.Fingerprint(ctx) + if err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + case f, ok := <-ch: + + if !ok { + return ErrChannelClosed + } + + var errStr string + if f.Err != nil { + errStr = f.Err.Error() + } + + resp := &proto.FingerprintResponse{ + Err: errStr, + Attributes: dstructs.ConvertStructAttributeMap(f.Attributes), + Health: healthStateToProto(f.Health), + HealthDescription: f.HealthDescription, + } + + if err := srv.Send(resp); err != nil { + return err + } + } + } +} + +func (b *driverPluginServer) RecoverTask(ctx context.Context, req *proto.RecoverTaskRequest) (*proto.RecoverTaskResponse, error) { + err := b.impl.RecoverTask(taskHandleFromProto(req.Handle)) + if err != nil { + return nil, err + } + + return &proto.RecoverTaskResponse{}, nil +} + +func (b *driverPluginServer) StartTask(ctx context.Context, req *proto.StartTaskRequest) (*proto.StartTaskResponse, error) { + handle, net, err := b.impl.StartTask(taskConfigFromProto(req.Task)) + if err != nil { + if rec, ok := err.(structs.Recoverable); ok { + st := status.New(codes.FailedPrecondition, rec.Error()) + st, err := st.WithDetails(&sproto.RecoverableError{Recoverable: rec.IsRecoverable()}) + if err != nil { + // If this error, it will always error + panic(err) + } + return nil, st.Err() + } + return nil, err + } + + var pbNet *proto.NetworkOverride + if net != nil { + pbNet = &proto.NetworkOverride{ + PortMap: map[string]int32{}, + Addr: net.IP, + AutoAdvertise: net.AutoAdvertise, + } + for k, v := range net.PortMap { + if v > math.MaxInt32 { + return nil, fmt.Errorf("port map out of bounds") + } + pbNet.PortMap[k] = int32(v) + } + } + + resp := &proto.StartTaskResponse{ + Handle: taskHandleToProto(handle), + NetworkOverride: pbNet, + } + + return resp, nil +} + +func (b *driverPluginServer) WaitTask(ctx context.Context, req *proto.WaitTaskRequest) (*proto.WaitTaskResponse, error) { + ch, err := b.impl.WaitTask(ctx, req.TaskId) + if err != nil { + return nil, err + } + + var ok bool + var result *ExitResult + select { + case <-ctx.Done(): + return nil, ctx.Err() + case result, ok = <-ch: + if !ok { + return nil, ErrChannelClosed + } + } + + var errStr string + if result.Err != nil { + errStr = result.Err.Error() + } + + resp := &proto.WaitTaskResponse{ + Err: errStr, + Result: &proto.ExitResult{ + ExitCode: int32(result.ExitCode), + Signal: int32(result.Signal), + OomKilled: result.OOMKilled, + }, + } + + return resp, nil +} + +func (b *driverPluginServer) StopTask(ctx context.Context, req *proto.StopTaskRequest) (*proto.StopTaskResponse, error) { + timeout, err := ptypes.Duration(req.Timeout) + if err != nil { + return nil, err + } + + err = b.impl.StopTask(req.TaskId, timeout, req.Signal) + if err != nil { + return nil, err + } + return &proto.StopTaskResponse{}, nil +} + +func (b *driverPluginServer) DestroyTask(ctx context.Context, req *proto.DestroyTaskRequest) (*proto.DestroyTaskResponse, error) { + err := b.impl.DestroyTask(req.TaskId, req.Force) + if err != nil { + return nil, err + } + return &proto.DestroyTaskResponse{}, nil +} + +func (b *driverPluginServer) InspectTask(ctx context.Context, req *proto.InspectTaskRequest) (*proto.InspectTaskResponse, error) { + status, err := b.impl.InspectTask(req.TaskId) + if err != nil { + return nil, err + } + + protoStatus, err := taskStatusToProto(status) + if err != nil { + return nil, err + } + + var pbNet *proto.NetworkOverride + if status.NetworkOverride != nil { + pbNet = &proto.NetworkOverride{ + PortMap: map[string]int32{}, + Addr: status.NetworkOverride.IP, + AutoAdvertise: status.NetworkOverride.AutoAdvertise, + } + for k, v := range status.NetworkOverride.PortMap { + pbNet.PortMap[k] = int32(v) + } + } + + resp := &proto.InspectTaskResponse{ + Task: protoStatus, + Driver: &proto.TaskDriverStatus{ + Attributes: status.DriverAttributes, + }, + NetworkOverride: pbNet, + } + + return resp, nil +} + +func (b *driverPluginServer) TaskStats(req *proto.TaskStatsRequest, srv proto.Driver_TaskStatsServer) error { + interval, err := ptypes.Duration(req.CollectionInterval) + if err != nil { + return fmt.Errorf("failed to parse collection interval: %v", err) + } + + ch, err := b.impl.TaskStats(srv.Context(), req.TaskId, interval) + if err != nil { + if rec, ok := err.(structs.Recoverable); ok { + st := status.New(codes.FailedPrecondition, rec.Error()) + st, err := st.WithDetails(&sproto.RecoverableError{Recoverable: rec.IsRecoverable()}) + if err != nil { + // If this error, it will always error + panic(err) + } + return st.Err() + } + return err + } + + for stats := range ch { + pb, err := TaskStatsToProto(stats) + if err != nil { + return fmt.Errorf("failed to encode task stats: %v", err) + } + + if err = srv.Send(&proto.TaskStatsResponse{Stats: pb}); err == io.EOF { + break + } else if err != nil { + return err + } + + } + + return nil +} + +func (b *driverPluginServer) ExecTask(ctx context.Context, req *proto.ExecTaskRequest) (*proto.ExecTaskResponse, error) { + timeout, err := ptypes.Duration(req.Timeout) + if err != nil { + return nil, err + } + + result, err := b.impl.ExecTask(req.TaskId, req.Command, timeout) + if err != nil { + return nil, err + } + resp := &proto.ExecTaskResponse{ + Stdout: result.Stdout, + Stderr: result.Stderr, + Result: exitResultToProto(result.ExitResult), + } + + return resp, nil +} + +func (b *driverPluginServer) ExecTaskStreaming(server proto.Driver_ExecTaskStreamingServer) error { + msg, err := server.Recv() + if err != nil { + return fmt.Errorf("failed to receive initial message: %v", err) + } + + if msg.Setup == nil { + return fmt.Errorf("first message should always be setup") + } + + if impl, ok := b.impl.(ExecTaskStreamingRawDriver); ok { + return impl.ExecTaskStreamingRaw(server.Context(), + msg.Setup.TaskId, msg.Setup.Command, msg.Setup.Tty, + server) + } + + d, ok := b.impl.(ExecTaskStreamingDriver) + if !ok { + return fmt.Errorf("driver does not support exec") + } + + execOpts, errCh := StreamToExecOptions(server.Context(), + msg.Setup.Command, msg.Setup.Tty, + server) + + result, err := d.ExecTaskStreaming(server.Context(), + msg.Setup.TaskId, execOpts) + + execOpts.Stdout.Close() + execOpts.Stderr.Close() + + if err != nil { + return err + } + + // wait for copy to be done + select { + case err = <-errCh: + case <-server.Context().Done(): + err = fmt.Errorf("exec timed out: %v", server.Context().Err()) + } + + if err != nil { + return err + } + + server.Send(&ExecTaskStreamingResponseMsg{ + Exited: true, + Result: exitResultToProto(result), + }) + + return err +} + +func (b *driverPluginServer) SignalTask(ctx context.Context, req *proto.SignalTaskRequest) (*proto.SignalTaskResponse, error) { + err := b.impl.SignalTask(req.TaskId, req.Signal) + if err != nil { + return nil, err + } + + resp := &proto.SignalTaskResponse{} + return resp, nil +} + +func (b *driverPluginServer) TaskEvents(req *proto.TaskEventsRequest, srv proto.Driver_TaskEventsServer) error { + ch, err := b.impl.TaskEvents(srv.Context()) + if err != nil { + return err + } + + for { + event := <-ch + + if event == nil { + break + } + pbTimestamp, err := ptypes.TimestampProto(event.Timestamp) + if err != nil { + return err + } + + pbEvent := &proto.DriverTaskEvent{ + TaskId: event.TaskID, + AllocId: event.AllocID, + TaskName: event.TaskName, + Timestamp: pbTimestamp, + Message: event.Message, + Annotations: event.Annotations, + } + + if err = srv.Send(pbEvent); err == io.EOF { + break + } else if err != nil { + return err + } + } + return nil +} + +func (b *driverPluginServer) CreateNetwork(ctx context.Context, req *proto.CreateNetworkRequest) (*proto.CreateNetworkResponse, error) { + nm, ok := b.impl.(DriverNetworkManager) + if !ok { + return nil, fmt.Errorf("CreateNetwork RPC not supported by driver") + } + + spec, created, err := nm.CreateNetwork(req.GetAllocId(), networkCreateRequestFromProto(req)) + if err != nil { + return nil, err + } + + return &proto.CreateNetworkResponse{ + IsolationSpec: NetworkIsolationSpecToProto(spec), + Created: created, + }, nil +} + +func (b *driverPluginServer) DestroyNetwork(ctx context.Context, req *proto.DestroyNetworkRequest) (*proto.DestroyNetworkResponse, error) { + nm, ok := b.impl.(DriverNetworkManager) + if !ok { + return nil, fmt.Errorf("DestroyNetwork RPC not supported by driver") + } + + err := nm.DestroyNetwork(req.AllocId, NetworkIsolationSpecFromProto(req.IsolationSpec)) + if err != nil { + return nil, err + } + + return &proto.DestroyNetworkResponse{}, nil +} diff --git a/plugin_interface/drivers/stats.go b/plugin_interface/drivers/stats.go new file mode 100644 index 00000000000..09a247ee954 --- /dev/null +++ b/plugin_interface/drivers/stats.go @@ -0,0 +1,94 @@ +package drivers + +import ( + "errors" + "slices" + + "github.com/hashicorp/nomad/plugin-interface/device" +) + +// MemoryStats holds memory usage related stats +type MemoryStats struct { + RSS uint64 + Cache uint64 + Swap uint64 + MappedFile uint64 + Usage uint64 + MaxUsage uint64 + KernelUsage uint64 + KernelMaxUsage uint64 + + // A list of fields whose values were actually sampled + Measured []string +} + +func (ms *MemoryStats) Add(other *MemoryStats) { + if other == nil { + return + } + + ms.RSS += other.RSS + ms.Cache += other.Cache + ms.Swap += other.Swap + ms.MappedFile += other.MappedFile + ms.Usage += other.Usage + ms.MaxUsage += other.MaxUsage + ms.KernelUsage += other.KernelUsage + ms.KernelMaxUsage += other.KernelMaxUsage + ms.Measured = slices.Compact(slices.Concat(ms.Measured, other.Measured)) +} + +// CpuStats holds cpu usage related stats +type CpuStats struct { + SystemMode float64 + UserMode float64 + TotalTicks float64 + ThrottledPeriods uint64 + ThrottledTime uint64 + Percent float64 + + // A list of fields whose values were actually sampled + Measured []string +} + +func (cs *CpuStats) Add(other *CpuStats) { + if other == nil { + return + } + + cs.SystemMode += other.SystemMode + cs.UserMode += other.UserMode + cs.TotalTicks += other.TotalTicks + cs.ThrottledPeriods += other.ThrottledPeriods + cs.ThrottledTime += other.ThrottledTime + cs.Percent += other.Percent + cs.Measured = slices.Compact(slices.Concat(cs.Measured, other.Measured)) +} + +// ResourceUsage holds information related to cpu and memory stats +type ResourceUsage struct { + MemoryStats *MemoryStats + CpuStats *CpuStats + DeviceStats []*device.DeviceGroupStats +} + +func (ru *ResourceUsage) Add(other *ResourceUsage) { + ru.MemoryStats.Add(other.MemoryStats) + ru.CpuStats.Add(other.CpuStats) + ru.DeviceStats = append(ru.DeviceStats, other.DeviceStats...) +} + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +type TaskResourceUsage struct { + ResourceUsage *ResourceUsage + Timestamp int64 // UnixNano + Pids map[string]*ResourceUsage +} + +// CheckBufSize is the size of the buffer that is used for job output +const CheckBufSize = 4 * 1024 + +// DriverStatsNotImplemented is the error to be returned if a driver doesn't +// implement stats. +var DriverStatsNotImplemented = errors.New("stats not implemented for driver") diff --git a/plugin_interface/drivers/structs.go b/plugin_interface/drivers/structs.go new file mode 100644 index 00000000000..51109119a66 --- /dev/null +++ b/plugin_interface/drivers/structs.go @@ -0,0 +1,738 @@ +package drivers + +import ( + "fmt" + "hash/crc32" + "maps" + "net" + "slices" + + "github.com/hashicorp/go-set/v3" + "github.com/hashicorp/nomad/plugin-interface/lib/idset" +) + +type AllocatedDevices []*AllocatedDeviceResource + +// Index finds the matching index using the passed device. If not found, -1 is +// returned. +func (a AllocatedDevices) Index(d *AllocatedDeviceResource) int { + if d == nil { + return -1 + } + + for i, o := range a { + if o.ID().Equal(d.ID()) { + return i + } + } + + return -1 +} + +// AllocatedTaskResources are the set of resources allocated to a task. +type AllocatedTaskResources struct { + Cpu AllocatedCpuResources + Memory AllocatedMemoryResources + Networks Networks + Devices []*AllocatedDeviceResource +} + +func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources { + if a == nil { + return nil + } + newA := new(AllocatedTaskResources) + *newA = *a + + // Copy the networks + newA.Networks = a.Networks.Copy() + + // Copy the devices + if newA.Devices != nil { + n := len(a.Devices) + newA.Devices = make([]*AllocatedDeviceResource, n) + for i := range n { + newA.Devices[i] = a.Devices[i].Copy() + } + } + + return newA +} + +// NetIndex finds the matching net index using device name +func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int { + return a.Networks.NetIndex(n) +} + +func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) { + if delta == nil { + return + } + + a.Cpu.Add(&delta.Cpu) + a.Memory.Add(&delta.Memory) + + for _, n := range delta.Networks { + // Find the matching interface by IP or CIDR + idx := a.NetIndex(n) + if idx == -1 { + a.Networks = append(a.Networks, n.Copy()) + } else { + a.Networks[idx].Add(n) + } + } + + for _, d := range delta.Devices { + // Find the matching device + idx := AllocatedDevices(a.Devices).Index(d) + if idx == -1 { + a.Devices = append(a.Devices, d.Copy()) + } else { + a.Devices[idx].Add(d) + } + } +} + +func (a *AllocatedTaskResources) Max(other *AllocatedTaskResources) { + if other == nil { + return + } + + a.Cpu.Max(&other.Cpu) + a.Memory.Max(&other.Memory) + + for _, n := range other.Networks { + // Find the matching interface by IP or CIDR + idx := a.NetIndex(n) + if idx == -1 { + a.Networks = append(a.Networks, n.Copy()) + } else { + a.Networks[idx].Add(n) + } + } + + for _, d := range other.Devices { + // Find the matching device + idx := AllocatedDevices(a.Devices).Index(d) + if idx == -1 { + a.Devices = append(a.Devices, d.Copy()) + } else { + a.Devices[idx].Add(d) + } + } +} + +// Comparable turns AllocatedTaskResources into ComparableResources +// as a helper step in preemption +func (a *AllocatedTaskResources) Comparable() *ComparableResources { + ret := &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: a.Cpu.CpuShares, + ReservedCores: a.Cpu.ReservedCores, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: a.Memory.MemoryMB, + MemoryMaxMB: a.Memory.MemoryMaxMB, + }, + }, + } + ret.Flattened.Networks = append(ret.Flattened.Networks, a.Networks...) + return ret +} + +// Subtract only subtracts CPU and Memory resources. Network utilization +// is managed separately in NetworkIndex +func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) { + if delta == nil { + return + } + + a.Cpu.Subtract(&delta.Cpu) + a.Memory.Subtract(&delta.Memory) +} + +// AllocatedMemoryResources captures the allocated memory resources. +type AllocatedMemoryResources struct { + MemoryMB int64 + MemoryMaxMB int64 +} + +func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) { + if delta == nil { + return + } + + a.MemoryMB += delta.MemoryMB + if delta.MemoryMaxMB != 0 { + a.MemoryMaxMB += delta.MemoryMaxMB + } else { + a.MemoryMaxMB += delta.MemoryMB + } +} + +func (a *AllocatedMemoryResources) Subtract(delta *AllocatedMemoryResources) { + if delta == nil { + return + } + + a.MemoryMB -= delta.MemoryMB + if delta.MemoryMaxMB != 0 { + a.MemoryMaxMB -= delta.MemoryMaxMB + } else { + a.MemoryMaxMB -= delta.MemoryMB + } +} + +func (a *AllocatedMemoryResources) Max(other *AllocatedMemoryResources) { + if other == nil { + return + } + + if other.MemoryMB > a.MemoryMB { + a.MemoryMB = other.MemoryMB + } + if other.MemoryMaxMB > a.MemoryMaxMB { + a.MemoryMaxMB = other.MemoryMaxMB + } +} + +// AllocatedCpuResources captures the allocated CPU resources. +type AllocatedCpuResources struct { + CpuShares int64 + ReservedCores []uint16 +} + +func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) { + if delta == nil { + return + } + + // add cpu bandwidth + a.CpuShares += delta.CpuShares + + // add cpu cores + cores := idset.From[uint16](a.ReservedCores) + deltaCores := idset.From[uint16](delta.ReservedCores) + cores.InsertSet(deltaCores) + a.ReservedCores = cores.Slice() +} + +func (a *AllocatedCpuResources) Subtract(delta *AllocatedCpuResources) { + if delta == nil { + return + } + + // remove cpu bandwidth + a.CpuShares -= delta.CpuShares + + // remove cpu cores + cores := idset.From[uint16](a.ReservedCores) + deltaCores := idset.From[uint16](delta.ReservedCores) + cores.RemoveSet(deltaCores) + a.ReservedCores = cores.Slice() +} + +func (a *AllocatedCpuResources) Max(other *AllocatedCpuResources) { + if other == nil { + return + } + + if other.CpuShares > a.CpuShares { + a.CpuShares = other.CpuShares + } + + if len(other.ReservedCores) > len(a.ReservedCores) { + a.ReservedCores = other.ReservedCores + } +} + +type AllocatedPortMapping struct { + // msgpack omit empty fields during serialization + _struct bool `codec:",omitempty"` // nolint: structcheck + + Label string + Value int + To int + HostIP string + IgnoreCollision bool +} + +func (m *AllocatedPortMapping) Copy() *AllocatedPortMapping { + return &AllocatedPortMapping{ + Label: m.Label, + Value: m.Value, + To: m.To, + HostIP: m.HostIP, + IgnoreCollision: m.IgnoreCollision, + } +} + +func (m *AllocatedPortMapping) Equal(o *AllocatedPortMapping) bool { + if m == nil || o == nil { + return m == o + } + switch { + case m.Label != o.Label: + return false + case m.Value != o.Value: + return false + case m.To != o.To: + return false + case m.HostIP != o.HostIP: + return false + case m.IgnoreCollision != o.IgnoreCollision: + return false + } + return true +} + +type AllocatedPorts []AllocatedPortMapping + +func (p AllocatedPorts) Equal(o AllocatedPorts) bool { + return slices.EqualFunc(p, o, func(a, b AllocatedPortMapping) bool { + return a.Equal(&b) + }) +} + +func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) { + for _, port := range p { + if port.Label == label { + return port, true + } + } + + return AllocatedPortMapping{}, false +} + +type Port struct { + // msgpack omit empty fields during serialization + _struct bool `codec:",omitempty"` // nolint: structcheck + + // Label is the key for HCL port blocks: port "foo" {} + Label string + + // Value is the static or dynamic port value. For dynamic ports this + // will be 0 in the jobspec and set by the scheduler. + Value int + + // To is the port inside a network namespace where this port is + // forwarded. -1 is an internal sentinel value used by Consul Connect + // to mean "same as the host port." + To int + + // HostNetwork is the name of the network this port should be assigned + // to. Jobs with a HostNetwork set can only be placed on nodes with + // that host network available. + HostNetwork string + + // IgnoreCollision ignores port collisions, so the port can be used more + // than one time on a single network, for tasks that support SO_REUSEPORT + // Should be used only with static ports. + IgnoreCollision bool +} + +// AllocatedDeviceResource captures a set of allocated devices. +type AllocatedDeviceResource struct { + // Vendor, Type, and Name are used to select the plugin to request the + // device IDs from. + Vendor string + Type string + Name string + + // DeviceIDs is the set of allocated devices + DeviceIDs []string +} + +func (a *AllocatedDeviceResource) ID() *DeviceIdTuple { + if a == nil { + return nil + } + + return &DeviceIdTuple{ + Vendor: a.Vendor, + Type: a.Type, + Name: a.Name, + } +} + +func (a *AllocatedDeviceResource) Add(delta *AllocatedDeviceResource) { + if delta == nil { + return + } + + a.DeviceIDs = append(a.DeviceIDs, delta.DeviceIDs...) +} + +func (a *AllocatedDeviceResource) Copy() *AllocatedDeviceResource { + if a == nil { + return a + } + + na := *a + + // Copy the devices + na.DeviceIDs = make([]string, len(a.DeviceIDs)) + copy(na.DeviceIDs, a.DeviceIDs) + return &na +} + +// DeviceIdTuple is the tuple that identifies a device +type DeviceIdTuple struct { + Vendor string + Type string + Name string +} + +func (id *DeviceIdTuple) String() string { + if id == nil { + return "" + } + + return fmt.Sprintf("%s/%s/%s", id.Vendor, id.Type, id.Name) +} + +// Matches returns if this Device ID is a superset of the passed ID. +func (id *DeviceIdTuple) Matches(other *DeviceIdTuple) bool { + if other == nil { + return false + } + + if other.Name != "" && other.Name != id.Name { + return false + } + + if other.Vendor != "" && other.Vendor != id.Vendor { + return false + } + + if other.Type != "" && other.Type != id.Type { + return false + } + + return true +} + +// Equal returns if this Device ID is the same as the passed ID. +func (id *DeviceIdTuple) Equal(o *DeviceIdTuple) bool { + if id == nil && o == nil { + return true + } else if id == nil || o == nil { + return false + } + + return o.Vendor == id.Vendor && o.Type == id.Type && o.Name == id.Name +} + +type CNIConfig struct { + Args map[string]string +} + +func (d *CNIConfig) Copy() *CNIConfig { + if d == nil { + return nil + } + newMap := make(map[string]string) + maps.Copy(newMap, d.Args) + return &CNIConfig{ + Args: newMap, + } +} + +func (d *CNIConfig) Equal(o *CNIConfig) bool { + if d == nil || o == nil { + return d == o + } + return maps.Equal(d.Args, o.Args) +} + +// NetworkResource is used to represent available network +// resources +type NetworkResource struct { + // msgpack omit empty fields during serialization + _struct bool `codec:",omitempty"` // nolint: structcheck + + Mode string // Mode of the network + Device string // Name of the device + CIDR string // CIDR block of addresses + IP string // Host IP address + Hostname string `json:",omitempty"` // Hostname of the network namespace + MBits int // Throughput + DNS *DNSConfig // DNS Configuration + ReservedPorts []Port // Host Reserved ports + DynamicPorts []Port // Host Dynamically assigned ports + CNI *CNIConfig // CNIConfig Configuration +} + +func (n *NetworkResource) Hash() uint32 { + var data []byte + data = fmt.Appendf(data, "%s%s%s%s%s%d", n.Mode, n.Device, n.CIDR, n.IP, n.Hostname, n.MBits) + + for i, port := range n.ReservedPorts { + data = fmt.Appendf(data, "r%d%s%d%d", i, port.Label, port.Value, port.To) + } + + for i, port := range n.DynamicPorts { + data = fmt.Appendf(data, "d%d%s%d%d", i, port.Label, port.Value, port.To) + } + + return crc32.ChecksumIEEE(data) +} + +func (n *NetworkResource) Equal(other *NetworkResource) bool { + return n.Hash() == other.Hash() +} + +func (n *NetworkResource) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(n.ReservedPorts) == 0 { + n.ReservedPorts = nil + } + if len(n.DynamicPorts) == 0 { + n.DynamicPorts = nil + } + + for i, p := range n.DynamicPorts { + if p.HostNetwork == "" { + n.DynamicPorts[i].HostNetwork = "default" + } + } + for i, p := range n.ReservedPorts { + if p.HostNetwork == "" { + n.ReservedPorts[i].HostNetwork = "default" + } + } +} + +// Copy returns a deep copy of the network resource +func (n *NetworkResource) Copy() *NetworkResource { + if n == nil { + return nil + } + newR := new(NetworkResource) + *newR = *n + newR.DNS = n.DNS.Copy() + if n.ReservedPorts != nil { + newR.ReservedPorts = make([]Port, len(n.ReservedPorts)) + copy(newR.ReservedPorts, n.ReservedPorts) + } + if n.DynamicPorts != nil { + newR.DynamicPorts = make([]Port, len(n.DynamicPorts)) + copy(newR.DynamicPorts, n.DynamicPorts) + } + return newR +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (n *NetworkResource) Add(delta *NetworkResource) { + if len(delta.ReservedPorts) > 0 { + n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...) + } + n.MBits += delta.MBits + n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...) +} + +func (n *NetworkResource) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +// PortLabels returns a map of port labels to their assigned host ports. +func (n *NetworkResource) PortLabels() map[string]int { + num := len(n.ReservedPorts) + len(n.DynamicPorts) + labelValues := make(map[string]int, num) + for _, port := range n.ReservedPorts { + labelValues[port.Label] = port.Value + } + for _, port := range n.DynamicPorts { + labelValues[port.Label] = port.Value + } + return labelValues +} + +func (n *NetworkResource) IsIPv6() bool { + ip := net.ParseIP(n.IP) + return ip != nil && ip.To4() == nil +} + +// Networks defined for a task on the Resources struct. +type Networks []*NetworkResource + +func (ns Networks) Copy() Networks { + if len(ns) == 0 { + return nil + } + + out := make([]*NetworkResource, len(ns)) + for i := range ns { + out[i] = ns[i].Copy() + } + return out +} + +// Port assignment and IP for the given label or empty values. +func (ns Networks) Port(label string) AllocatedPortMapping { + for _, n := range ns { + for _, p := range n.ReservedPorts { + if p.Label == label { + return AllocatedPortMapping{ + Label: label, + Value: p.Value, + To: p.To, + HostIP: n.IP, + IgnoreCollision: p.IgnoreCollision, + } + } + } + for _, p := range n.DynamicPorts { + if p.Label == label { + return AllocatedPortMapping{ + Label: label, + Value: p.Value, + To: p.To, + HostIP: n.IP, + } + } + } + } + return AllocatedPortMapping{} +} + +func (ns Networks) NetIndex(n *NetworkResource) int { + for idx, net := range ns { + if net.Device == n.Device { + return idx + } + } + return -1 +} + +// Modes returns the set of network modes used by our NetworkResource blocks. +func (ns Networks) Modes() *set.Set[string] { + return set.FromFunc(ns, func(nr *NetworkResource) string { + return nr.Mode + }) +} + +// ComparableResources is the set of resources allocated to a task group but +// not keyed by Task, making it easier to compare. +type ComparableResources struct { + Flattened AllocatedTaskResources + Shared AllocatedSharedResources +} + +func (c *ComparableResources) Add(delta *ComparableResources) { + if delta == nil { + return + } + + c.Flattened.Add(&delta.Flattened) + c.Shared.Add(&delta.Shared) +} + +func (c *ComparableResources) Subtract(delta *ComparableResources) { + if delta == nil { + return + } + + c.Flattened.Subtract(&delta.Flattened) + c.Shared.Subtract(&delta.Shared) +} + +func (c *ComparableResources) Copy() *ComparableResources { + if c == nil { + return nil + } + newR := new(ComparableResources) + *newR = *c + return newR +} + +// Superset checks if one set of resources is a superset of another. This +// ignores network resources, and the NetworkIndex should be used for that. +func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) { + if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares { + return false, "cpu" + } + + cores := idset.From[uint16](c.Flattened.Cpu.ReservedCores) + otherCores := idset.From[uint16](other.Flattened.Cpu.ReservedCores) + if len(c.Flattened.Cpu.ReservedCores) > 0 && !cores.Superset(otherCores) { + return false, "cores" + } + + if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB { + return false, "memory" + } + + if c.Shared.DiskMB < other.Shared.DiskMB { + return false, "disk" + } + return true, "" +} + +// NetIndex finds the matching net index using device name +func (c *ComparableResources) NetIndex(n *NetworkResource) int { + return c.Flattened.Networks.NetIndex(n) +} + +// AllocatedSharedResources are the set of resources allocated to a task group. +type AllocatedSharedResources struct { + Networks Networks + DiskMB int64 + Ports AllocatedPorts +} + +func (a AllocatedSharedResources) Copy() AllocatedSharedResources { + return AllocatedSharedResources{ + Networks: a.Networks.Copy(), + DiskMB: a.DiskMB, + Ports: a.Ports, + } +} + +func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) { + if delta == nil { + return + } + a.Networks = append(a.Networks, delta.Networks...) + a.DiskMB += delta.DiskMB + +} + +func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) { + if delta == nil { + return + } + + diff := map[*NetworkResource]bool{} + for _, n := range delta.Networks { + diff[n] = true + } + var nets Networks + for _, n := range a.Networks { + if _, ok := diff[n]; !ok { + nets = append(nets, n) + } + } + a.Networks = nets + a.DiskMB -= delta.DiskMB +} + +func (a *AllocatedSharedResources) Canonicalize() { + if len(a.Networks) > 0 { + if len(a.Networks[0].DynamicPorts)+len(a.Networks[0].ReservedPorts) > 0 && len(a.Ports) == 0 { + for _, ports := range [][]Port{a.Networks[0].DynamicPorts, a.Networks[0].ReservedPorts} { + for _, p := range ports { + a.Ports = append(a.Ports, AllocatedPortMapping{ + Label: p.Label, + Value: p.Value, + To: p.To, + HostIP: a.Networks[0].IP, + }) + } + } + } + } +} diff --git a/plugin_interface/drivers/task_handle.go b/plugin_interface/drivers/task_handle.go new file mode 100644 index 00000000000..65d24f72014 --- /dev/null +++ b/plugin_interface/drivers/task_handle.go @@ -0,0 +1,50 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "github.com/hashicorp/nomad/plugin-interface/base" +) + +// TaskHandle is the state shared between a driver and the client. +// It is returned to the client after starting the task and used +// for recovery of tasks during a driver restart. +type TaskHandle struct { + // Version is set by the driver an allows it to handle upgrading from + // an older DriverState struct. Prior to 0.9 the only state stored for + // driver was the reattach config for the executor. To allow upgrading to + // 0.9, Version 0 is handled as if it is the json encoded reattach config. + Version int + Config *TaskConfig + State TaskState + DriverState []byte +} + +func NewTaskHandle(version int) *TaskHandle { + return &TaskHandle{Version: version} +} + +func (h *TaskHandle) SetDriverState(v interface{}) error { + h.DriverState = []byte{} + return base.MsgPackEncode(&h.DriverState, v) +} + +func (h *TaskHandle) GetDriverState(v interface{}) error { + return base.MsgPackDecode(h.DriverState, v) + +} + +func (h *TaskHandle) Copy() *TaskHandle { + if h == nil { + return nil + } + + handle := new(TaskHandle) + handle.Version = h.Version + handle.Config = h.Config.Copy() + handle.State = h.State + handle.DriverState = make([]byte, len(h.DriverState)) + copy(handle.DriverState, h.DriverState) + return handle +} diff --git a/plugin_interface/drivers/testutils/dns_testing.go b/plugin_interface/drivers/testutils/dns_testing.go new file mode 100644 index 00000000000..51ea4e99c50 --- /dev/null +++ b/plugin_interface/drivers/testutils/dns_testing.go @@ -0,0 +1,60 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package testutils + +import ( + "strings" + "testing" + + "github.com/docker/docker/libnetwork/resolvconf" + "github.com/hashicorp/nomad/plugin-interface/drivers" + "github.com/shoenig/test/must" +) + +// TestTaskDNSConfig asserts that a task is running with the given DNSConfig +func TestTaskDNSConfig(t *testing.T, driver *DriverHarness, taskID string, dns *drivers.DNSConfig) { + t.Run("dns_config", func(t *testing.T) { + caps, err := driver.Capabilities() + must.NoError(t, err) + + // FS isolation is used here as a proxy for network isolation. + // This is true for the current built-in drivers but it is not necessarily so. + isolated := caps.FSIsolation != drivers.FSIsolationNone + usesHostNetwork := caps.FSIsolation != drivers.FSIsolationImage + + if !isolated { + t.Skip("dns config not supported on non isolated drivers") + } + + // write to a file and check it presence in host + r := execTask(t, driver, taskID, `cat /etc/resolv.conf`, + false, "") + must.Zero(t, r.exitCode) + + resolvConf := []byte(strings.TrimSpace(r.stdout)) + + if dns != nil { + if len(dns.Servers) > 0 { + must.SliceContainsAll(t, dns.Servers, resolvconf.GetNameservers(resolvConf, resolvconf.IP)) + } + if len(dns.Searches) > 0 { + must.SliceContainsAll(t, dns.Searches, resolvconf.GetSearchDomains(resolvConf)) + } + if len(dns.Options) > 0 { + must.SliceContainsAll(t, dns.Options, resolvconf.GetOptions(resolvConf)) + } + } else { + systemPath := "/etc/resolv.conf" + if !usesHostNetwork { + systemPath = resolvconf.Path() + } + + system, specificErr := resolvconf.GetSpecific(systemPath) + must.NoError(t, specificErr) + must.SliceContainsAll(t, resolvconf.GetNameservers(system.Content, resolvconf.IP), resolvconf.GetNameservers(resolvConf, resolvconf.IP)) + must.SliceContainsAll(t, resolvconf.GetSearchDomains(system.Content), resolvconf.GetSearchDomains(resolvConf)) + must.SliceContainsAll(t, resolvconf.GetOptions(system.Content), resolvconf.GetOptions(resolvConf)) + } + }) +} diff --git a/plugin_interface/drivers/testutils/exec_testing.go b/plugin_interface/drivers/testutils/exec_testing.go new file mode 100644 index 00000000000..badc2fc9d9f --- /dev/null +++ b/plugin_interface/drivers/testutils/exec_testing.go @@ -0,0 +1,358 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package testutils + +import ( + "context" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/nomad/plugin-interface/drivers" + "github.com/hashicorp/nomad/plugin-interface/drivers/fsisolation" + dproto "github.com/hashicorp/nomad/plugin-interface/drivers/proto" + // "github.com/hashicorp/nomad/plugin-interface/lib/cgroupslib" + "github.com/shoenig/test/must" +) + +func ExecTaskStreamingConformanceTests(t *testing.T, driver *DriverHarness, taskID string) { + t.Helper() + + if runtime.GOOS == "windows" { + // tests assume unix-ism now + t.Skip("test assume unix tasks") + } + + TestExecTaskStreamingBasicResponses(t, driver, taskID) + TestExecFSIsolation(t, driver, taskID) +} + +var ExecTaskStreamingBasicCases = []struct { + Name string + Command string + Tty bool + Stdin string + Stdout interface{} + Stderr interface{} + ExitCode int +}{ + { + Name: "notty: basic", + Command: "echo hello stdout; echo hello stderr >&2; exit 43", + Tty: false, + Stdout: "hello stdout\n", + Stderr: "hello stderr\n", + ExitCode: 43, + }, + { + Name: "notty: streaming", + Command: "for n in 1 2 3; do echo $n; sleep 1; done", + Tty: false, + Stdout: "1\n2\n3\n", + ExitCode: 0, + }, + { + Name: "notty: stty check", + Command: "stty size", + Tty: false, + Stderr: regexp.MustCompile("stty: .?standard input.?: Inappropriate ioctl for device\n"), + ExitCode: 1, + }, + { + Name: "notty: stdin passing", + Command: "echo hello from command; head -n1", + Tty: false, + Stdin: "hello from stdin\n", + Stdout: "hello from command\nhello from stdin\n", + ExitCode: 0, + }, + // TTY cases - difference is new lines add `\r` and child process waiting is different + { + Name: "tty: basic", + Command: "echo hello stdout; echo hello stderr >&2; exit 43", + Tty: true, + Stdout: "hello stdout\r\nhello stderr\r\n", + ExitCode: 43, + }, + { + Name: "tty: streaming", + Command: "for n in 1 2 3; do echo $n; sleep 1; done", + Tty: true, + Stdout: "1\r\n2\r\n3\r\n", + ExitCode: 0, + }, + { + Name: "tty: stty check", + Command: "sleep 1; stty size", + Tty: true, + Stdout: "100 100\r\n", + ExitCode: 0, + }, + { + Name: "tty: stdin passing", + Command: "head -n1", + Tty: true, + Stdin: "hello from stdin\n", + // in tty mode, we emit line twice: once for tty echoing and one for the actual head output + Stdout: "hello from stdin\r\nhello from stdin\r\n", + ExitCode: 0, + }, + { + Name: "tty: children processes", + Command: "(( sleep 3; echo from background ) & ); echo from main; exec sleep 1", + Tty: true, + // when using tty; wait for lead process only, like `docker exec -it` + Stdout: "from main\r\n", + ExitCode: 0, + }, +} + +func TestExecTaskStreamingBasicResponses(t *testing.T, driver *DriverHarness, taskID string) { + for _, c := range ExecTaskStreamingBasicCases { + t.Run("basic: "+c.Name, func(t *testing.T) { + + result := execTask(t, driver, taskID, c.Command, c.Tty, c.Stdin) + + must.Eq(t, c.ExitCode, result.exitCode) + + switch s := c.Stdout.(type) { + case string: + must.Eq(t, s, result.stdout) + case *regexp.Regexp: + must.RegexMatch(t, s, result.stdout) + case nil: + must.Eq(t, "", result.stdout) + default: + t.Fatal("unexpected type") + } + + switch s := c.Stderr.(type) { + case string: + must.Eq(t, s, result.stderr) + case *regexp.Regexp: + must.RegexMatch(t, s, result.stderr) + case nil: + must.Eq(t, "", result.stderr) + default: + t.Fatal("unexpected type") + } + }) + } +} + +// TestExecFSIsolation asserts that exec occurs inside chroot/isolation environment rather than +// on host +func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { + t.Run("isolation", func(t *testing.T) { + caps, err := driver.Capabilities() + must.NoError(t, err) + + isolated := (caps.FSIsolation != fsisolation.None) + + text := "hello from the other side" + + // write to a file and check it presence in host + w := execTask(t, driver, taskID, + fmt.Sprintf(`FILE=$(mktemp); echo "$FILE"; echo %q >> "${FILE}"`, text), + false, "") + must.Zero(t, w.exitCode) + + tempfile := strings.TrimSpace(w.stdout) + if !isolated { + defer os.Remove(tempfile) + } + + t.Logf("created file in task: %v", tempfile) + + // read from host + b, err := os.ReadFile(tempfile) + if !isolated { + must.NoError(t, err) + must.Eq(t, text, strings.TrimSpace(string(b))) + } else { + must.Error(t, err) + must.True(t, os.IsNotExist(err)) + } + + // read should succeed from task again + r := execTask(t, driver, taskID, + fmt.Sprintf("cat %q", tempfile), + false, "") + must.Zero(t, r.exitCode) + must.Eq(t, text, strings.TrimSpace(r.stdout)) + + // we always run in a cgroup - testing freezer cgroup + r = execTask(t, driver, taskID, + "cat /proc/self/cgroup", + false, "", + ) + must.Zero(t, r.exitCode) + + info, _ := driver.PluginInfo() + if info.Name == "docker" { + // Note: docker on cgroups v2 now returns nothing + // root@97b4d3d33035:/# cat /proc/self/cgroup + // 0::/ + t.Skip("/proc/self/cgroup not useful in docker cgroups.v2") + } + // e.g. 0::/testing.slice/5bdbd6c2-8aba-3ab2-728b-0ff3a81727a9.sleep.scope + must.True(t, strings.HasSuffix(strings.TrimSpace(r.stdout), ".scope"), must.Sprintf("actual stdout %q", r.stdout)) + }) +} + +func ExecTask(t *testing.T, driver *DriverHarness, taskID string, cmd string, tty bool, stdin string) (exitCode int, stdout, stderr string) { + r := execTask(t, driver, taskID, cmd, tty, stdin) + return r.exitCode, r.stdout, r.stderr +} + +func execTask(t *testing.T, driver *DriverHarness, taskID string, cmd string, tty bool, stdin string) execResult { + stream := newTestExecStream(t, tty, stdin) + + ctx, cancelFn := context.WithTimeout(context.Background(), 30*time.Second) + defer cancelFn() + + command := []string{"/bin/sh", "-c", cmd} + + isRaw := false + exitCode := -2 + if raw, ok := driver.impl.(drivers.ExecTaskStreamingRawDriver); ok { + isRaw = true + err := raw.ExecTaskStreamingRaw(ctx, taskID, + command, tty, stream) + must.NoError(t, err) + } else if d, ok := driver.impl.(drivers.ExecTaskStreamingDriver); ok { + execOpts, errCh := drivers.StreamToExecOptions(ctx, command, tty, stream) + + r, err := d.ExecTaskStreaming(ctx, taskID, execOpts) + must.NoError(t, err) + + select { + case err := <-errCh: + must.NoError(t, err) + default: + // all good + } + + exitCode = r.ExitCode + } else { + t.Fatal("driver does not support exec") + } + + result := stream.currentResult() + must.NoError(t, result.err) + + if !isRaw { + result.exitCode = exitCode + } + + return result +} + +type execResult struct { + exitCode int + stdout string + stderr string + + err error +} + +func newTestExecStream(t *testing.T, tty bool, stdin string) *testExecStream { + + return &testExecStream{ + t: t, + input: newInputStream(tty, stdin), + result: &execResult{exitCode: -2}, + } +} + +func newInputStream(tty bool, stdin string) []*drivers.ExecTaskStreamingRequestMsg { + input := []*drivers.ExecTaskStreamingRequestMsg{} + if tty { + // emit two resize to ensure we honor latest + input = append(input, &drivers.ExecTaskStreamingRequestMsg{ + TtySize: &dproto.ExecTaskStreamingRequest_TerminalSize{ + Height: 50, + Width: 40, + }}) + input = append(input, &drivers.ExecTaskStreamingRequestMsg{ + TtySize: &dproto.ExecTaskStreamingRequest_TerminalSize{ + Height: 100, + Width: 100, + }}) + + } + + input = append(input, &drivers.ExecTaskStreamingRequestMsg{ + Stdin: &dproto.ExecTaskStreamingIOOperation{ + Data: []byte(stdin), + }, + }) + + if !tty { + // don't close stream in interactive session and risk closing tty prematurely + input = append(input, &drivers.ExecTaskStreamingRequestMsg{ + Stdin: &dproto.ExecTaskStreamingIOOperation{ + Close: true, + }, + }) + } + + return input +} + +var _ drivers.ExecTaskStream = (*testExecStream)(nil) + +type testExecStream struct { + t *testing.T + + // input + input []*drivers.ExecTaskStreamingRequestMsg + recvCalled int + + // result so far + resultLock sync.Mutex + result *execResult +} + +func (s *testExecStream) currentResult() execResult { + s.resultLock.Lock() + defer s.resultLock.Unlock() + + // make a copy + return *s.result +} + +func (s *testExecStream) Recv() (*drivers.ExecTaskStreamingRequestMsg, error) { + if s.recvCalled >= len(s.input) { + return nil, io.EOF + } + + i := s.input[s.recvCalled] + s.recvCalled++ + return i, nil +} + +func (s *testExecStream) Send(m *drivers.ExecTaskStreamingResponseMsg) error { + s.resultLock.Lock() + defer s.resultLock.Unlock() + + switch { + case m.Stdout != nil && m.Stdout.Data != nil: + s.t.Logf("received stdout: %s", string(m.Stdout.Data)) + s.result.stdout += string(m.Stdout.Data) + case m.Stderr != nil && m.Stderr.Data != nil: + s.t.Logf("received stderr: %s", string(m.Stderr.Data)) + s.result.stderr += string(m.Stderr.Data) + case m.Exited && m.Result != nil: + s.result.exitCode = int(m.Result.ExitCode) + } + + return nil +} diff --git a/plugin_interface/drivers/testutils/testing.go b/plugin_interface/drivers/testutils/testing.go new file mode 100644 index 00000000000..179dbf89715 --- /dev/null +++ b/plugin_interface/drivers/testutils/testing.go @@ -0,0 +1,155 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package testutils + +import ( + "context" + "fmt" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/drivers" + "github.com/hashicorp/nomad/plugin-interface/shared/hclspec" + "github.com/shoenig/test/must" +) + +type DriverHarness struct { + drivers.DriverPlugin + client *plugin.GRPCClient + server *plugin.GRPCServer + t *testing.T + logger hclog.Logger + impl drivers.DriverPlugin + cgroup string +} + +func (h *DriverHarness) Impl() drivers.DriverPlugin { + return h.impl +} +func NewDriverHarness(t *testing.T, d drivers.DriverPlugin) *DriverHarness { + logger := hclog.New(hclog.DefaultOptions) + pd := drivers.NewDriverPlugin(d, logger) + + client, server := plugin.TestPluginGRPCConn(t, + true, + map[string]plugin.Plugin{ + base.PluginTypeDriver: pd, + base.PluginTypeBase: &base.PluginBase{Impl: d}, + }, + ) + + raw, err := client.Dispense(base.PluginTypeDriver) + must.NoError(t, err) + + dClient := raw.(drivers.DriverPlugin) + return &DriverHarness{ + client: client, + server: server, + DriverPlugin: dClient, + logger: logger, + t: t, + impl: d, + } +} + +func (h *DriverHarness) Kill() { + _ = h.client.Close() + h.server.Stop() +} + +// WaitUntilStarted will block until the task for the given ID is in the running +// state or the timeout is reached +func (h *DriverHarness) WaitUntilStarted(taskID string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + var lastState drivers.TaskState + for { + status, err := h.InspectTask(taskID) + if err != nil { + return err + } + if status.State == drivers.TaskStateRunning { + return nil + } + lastState = status.State + if time.Now().After(deadline) { + return fmt.Errorf("task never transitioned to running, currently '%s'", lastState) + } + time.Sleep(100 * time.Millisecond) + } +} + +// MockDriver is used for testing. +// Each function can be set as a closure to make assertions about how data +// is passed through the base plugin layer. +type MockDriver struct { + base.MockPlugin + TaskConfigSchemaF func() (*hclspec.Spec, error) + FingerprintF func(context.Context) (<-chan *drivers.Fingerprint, error) + CapabilitiesF func() (*drivers.Capabilities, error) + RecoverTaskF func(*drivers.TaskHandle) error + StartTaskF func(*drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) + WaitTaskF func(context.Context, string) (<-chan *drivers.ExitResult, error) + StopTaskF func(string, time.Duration, string) error + DestroyTaskF func(string, bool) error + InspectTaskF func(string) (*drivers.TaskStatus, error) + TaskStatsF func(context.Context, string, time.Duration) (<-chan *drivers.TaskResourceUsage, error) + TaskEventsF func(context.Context) (<-chan *drivers.TaskEvent, error) + SignalTaskF func(string, string) error + ExecTaskF func(string, []string, time.Duration) (*drivers.ExecTaskResult, error) + ExecTaskStreamingF func(context.Context, string, *drivers.ExecOptions) (*drivers.ExitResult, error) + MockNetworkManager +} + +type MockNetworkManager struct { + CreateNetworkF func(string, *drivers.NetworkCreateRequest) (*drivers.NetworkIsolationSpec, bool, error) + DestroyNetworkF func(string, *drivers.NetworkIsolationSpec) error +} + +func (m *MockNetworkManager) CreateNetwork(allocID string, req *drivers.NetworkCreateRequest) (*drivers.NetworkIsolationSpec, bool, error) { + return m.CreateNetworkF(allocID, req) +} +func (m *MockNetworkManager) DestroyNetwork(id string, spec *drivers.NetworkIsolationSpec) error { + return m.DestroyNetworkF(id, spec) +} + +func (d *MockDriver) TaskConfigSchema() (*hclspec.Spec, error) { return d.TaskConfigSchemaF() } +func (d *MockDriver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) { + return d.FingerprintF(ctx) +} +func (d *MockDriver) Capabilities() (*drivers.Capabilities, error) { return d.CapabilitiesF() } +func (d *MockDriver) RecoverTask(h *drivers.TaskHandle) error { return d.RecoverTaskF(h) } +func (d *MockDriver) StartTask(c *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { + return d.StartTaskF(c) +} +func (d *MockDriver) WaitTask(ctx context.Context, id string) (<-chan *drivers.ExitResult, error) { + return d.WaitTaskF(ctx, id) +} +func (d *MockDriver) StopTask(taskID string, timeout time.Duration, signal string) error { + return d.StopTaskF(taskID, timeout, signal) +} +func (d *MockDriver) DestroyTask(taskID string, force bool) error { + return d.DestroyTaskF(taskID, force) +} +func (d *MockDriver) InspectTask(taskID string) (*drivers.TaskStatus, error) { + return d.InspectTaskF(taskID) +} +func (d *MockDriver) TaskStats(ctx context.Context, taskID string, i time.Duration) (<-chan *drivers.TaskResourceUsage, error) { + return d.TaskStatsF(ctx, taskID, i) +} +func (d *MockDriver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) { + return d.TaskEventsF(ctx) +} +func (d *MockDriver) SignalTask(taskID string, signal string) error { + return d.SignalTaskF(taskID, signal) +} +func (d *MockDriver) ExecTask(taskID string, cmd []string, timeout time.Duration) (*drivers.ExecTaskResult, error) { + return d.ExecTaskF(taskID, cmd, timeout) +} + +func (d *MockDriver) ExecTaskStreaming(ctx context.Context, taskID string, execOpts *drivers.ExecOptions) (*drivers.ExitResult, error) { + return d.ExecTaskStreamingF(ctx, taskID, execOpts) +} diff --git a/plugin_interface/drivers/testutils/testing_test.go b/plugin_interface/drivers/testutils/testing_test.go new file mode 100644 index 00000000000..db41643aa12 --- /dev/null +++ b/plugin_interface/drivers/testutils/testing_test.go @@ -0,0 +1,290 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package testutils + +import ( + "bytes" + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/nomad/plugin-interface/base" + "github.com/hashicorp/nomad/plugin-interface/drivers" + pstructs "github.com/hashicorp/nomad/plugin-interface/shared/structs" + "github.com/shoenig/test/must" +) + +var _ drivers.DriverPlugin = (*MockDriver)(nil) + +// Very simple test to ensure the test harness works as expected +func TestDriverHarness(t *testing.T) { + // ci.Parallel(t) + + handle := &drivers.TaskHandle{Config: &drivers.TaskConfig{Name: "mock"}} + d := &MockDriver{ + StartTaskF: func(task *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { + return handle, nil, nil + }, + } + harness := NewDriverHarness(t, d) + defer harness.Kill() + actual, _, err := harness.StartTask(&drivers.TaskConfig{}) + must.NoError(t, err) + must.Eq(t, handle.Config.Name, actual.Config.Name) +} + +type testDriverState struct { + Pid int + Log string +} + +func TestBaseDriver_Fingerprint(t *testing.T) { + // ci.Parallel(t) + + fingerprints := []*drivers.Fingerprint{ + { + Attributes: map[string]*pstructs.Attribute{"foo": pstructs.NewStringAttribute("bar")}, + Health: drivers.HealthStateUnhealthy, + HealthDescription: "starting up", + }, + { + Attributes: map[string]*pstructs.Attribute{"foo": pstructs.NewStringAttribute("bar")}, + Health: drivers.HealthStateHealthy, + HealthDescription: "running", + }, + } + + var complete atomic.Value + complete.Store(false) + + impl := &MockDriver{ + FingerprintF: func(ctx context.Context) (<-chan *drivers.Fingerprint, error) { + ch := make(chan *drivers.Fingerprint) + go func() { + defer close(ch) + ch <- fingerprints[0] + time.Sleep(500 * time.Millisecond) + ch <- fingerprints[1] + complete.Store(true) + }() + return ch, nil + }, + } + + harness := NewDriverHarness(t, impl) + defer harness.Kill() + + ch, err := harness.Fingerprint(context.Background()) + must.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + select { + case f := <-ch: + must.Eq(t, f, fingerprints[0]) + case <-time.After(1 * time.Second): + t.Fatal("did not receive fingerprint[0]") + } + select { + case f := <-ch: + must.Eq(t, f, fingerprints[1]) + case <-time.After(1 * time.Second): + t.Fatal("did not receive fingerprint[1]") + } + }() + must.False(t, complete.Load().(bool)) + wg.Wait() + must.True(t, complete.Load().(bool)) +} + +func TestBaseDriver_RecoverTask(t *testing.T) { + // ci.Parallel(t) + + // build driver state and encode it into proto msg + state := testDriverState{Pid: 1, Log: "foo"} + var buf bytes.Buffer + enc := codec.NewEncoder(&buf, base.MsgpackHandle) + enc.Encode(state) + + // mock the RecoverTask driver call + impl := &MockDriver{ + RecoverTaskF: func(h *drivers.TaskHandle) error { + var actual testDriverState + must.NoError(t, h.GetDriverState(&actual)) + must.Eq(t, state, actual) + return nil + }, + } + + harness := NewDriverHarness(t, impl) + defer harness.Kill() + + handle := &drivers.TaskHandle{ + DriverState: buf.Bytes(), + } + err := harness.RecoverTask(handle) + must.NoError(t, err) +} + +func TestBaseDriver_StartTask(t *testing.T) { + // ci.Parallel(t) + + cfg := &drivers.TaskConfig{ + ID: "foo", + } + state := &testDriverState{Pid: 1, Log: "log"} + var handle *drivers.TaskHandle + impl := &MockDriver{ + StartTaskF: func(c *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { + handle = drivers.NewTaskHandle(1) + handle.Config = c + handle.State = drivers.TaskStateRunning + handle.SetDriverState(state) + return handle, nil, nil + }, + } + + harness := NewDriverHarness(t, impl) + defer harness.Kill() + resp, _, err := harness.StartTask(cfg) + must.NoError(t, err) + must.Eq(t, cfg.ID, resp.Config.ID) + must.Eq(t, handle.State, resp.State) + + var actualState testDriverState + must.NoError(t, resp.GetDriverState(&actualState)) + must.Eq(t, *state, actualState) + +} + +func TestBaseDriver_WaitTask(t *testing.T) { + // ci.Parallel(t) + + result := &drivers.ExitResult{ExitCode: 1, Signal: 9} + + signalTask := make(chan struct{}) + + impl := &MockDriver{ + WaitTaskF: func(_ context.Context, id string) (<-chan *drivers.ExitResult, error) { + ch := make(chan *drivers.ExitResult) + go func() { + <-signalTask + ch <- result + }() + return ch, nil + }, + } + + harness := NewDriverHarness(t, impl) + defer harness.Kill() + var wg sync.WaitGroup + wg.Add(1) + var finished bool + go func() { + defer wg.Done() + ch, err := harness.WaitTask(context.TODO(), "foo") + must.NoError(t, err) + actualResult := <-ch + finished = true + must.Eq(t, result, actualResult) + }() + must.False(t, finished) + close(signalTask) + wg.Wait() + must.True(t, finished) +} + +func TestBaseDriver_TaskEvents(t *testing.T) { + // ci.Parallel(t) + + now := time.Now().UTC().Truncate(time.Millisecond) + events := []*drivers.TaskEvent{ + { + TaskID: "abc", + Timestamp: now, + Annotations: map[string]string{"foo": "bar"}, + Message: "starting", + }, + { + TaskID: "xyz", + Timestamp: now.Add(2 * time.Second), + Annotations: map[string]string{"foo": "bar"}, + Message: "starting", + }, + { + TaskID: "xyz", + Timestamp: now.Add(3 * time.Second), + Annotations: map[string]string{"foo": "bar"}, + Message: "running", + }, + { + TaskID: "abc", + Timestamp: now.Add(4 * time.Second), + Annotations: map[string]string{"foo": "bar"}, + Message: "running", + }, + } + + impl := &MockDriver{ + TaskEventsF: func(ctx context.Context) (<-chan *drivers.TaskEvent, error) { + ch := make(chan *drivers.TaskEvent) + go func() { + defer close(ch) + for _, event := range events { + ch <- event + } + }() + return ch, nil + }, + } + + harness := NewDriverHarness(t, impl) + defer harness.Kill() + + ch, err := harness.TaskEvents(context.Background()) + must.NoError(t, err) + + for _, event := range events { + select { + case actual := <-ch: + must.Eq(t, actual, event) + case <-time.After(500 * time.Millisecond): + t.Fatal("failed to receive event") + + } + } + +} + +func TestBaseDriver_Capabilities(t *testing.T) { + // ci.Parallel(t) + + capabilities := &drivers.Capabilities{ + NetIsolationModes: []drivers.NetIsolationMode{ + drivers.NetIsolationModeHost, + drivers.NetIsolationModeGroup, + }, + MustInitiateNetwork: true, + SendSignals: true, + Exec: true, + FSIsolation: drivers.FSIsolationNone, + } + d := &MockDriver{ + CapabilitiesF: func() (*drivers.Capabilities, error) { + return capabilities, nil + }, + } + + harness := NewDriverHarness(t, d) + defer harness.Kill() + + caps, err := harness.Capabilities() + must.NoError(t, err) + must.Eq(t, capabilities, caps) +} diff --git a/plugin_interface/drivers/utils.go b/plugin_interface/drivers/utils.go new file mode 100644 index 00000000000..c40a1e93c2e --- /dev/null +++ b/plugin_interface/drivers/utils.go @@ -0,0 +1,733 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" +) + +var taskStateToProtoMap = map[TaskState]proto.TaskState{ + TaskStateUnknown: proto.TaskState_UNKNOWN, + TaskStateRunning: proto.TaskState_RUNNING, + TaskStateExited: proto.TaskState_EXITED, +} + +var taskStateFromProtoMap = map[proto.TaskState]TaskState{ + proto.TaskState_UNKNOWN: TaskStateUnknown, + proto.TaskState_RUNNING: TaskStateRunning, + proto.TaskState_EXITED: TaskStateExited, +} + +func healthStateToProto(health HealthState) proto.FingerprintResponse_HealthState { + switch health { + case HealthStateUndetected: + return proto.FingerprintResponse_UNDETECTED + case HealthStateUnhealthy: + return proto.FingerprintResponse_UNHEALTHY + case HealthStateHealthy: + return proto.FingerprintResponse_HEALTHY + } + return proto.FingerprintResponse_UNDETECTED +} + +func healthStateFromProto(pb proto.FingerprintResponse_HealthState) HealthState { + switch pb { + case proto.FingerprintResponse_UNDETECTED: + return HealthStateUndetected + case proto.FingerprintResponse_UNHEALTHY: + return HealthStateUnhealthy + case proto.FingerprintResponse_HEALTHY: + return HealthStateHealthy + } + return HealthStateUndetected +} + +func taskConfigFromProto(pb *proto.TaskConfig) *TaskConfig { + if pb == nil { + return &TaskConfig{} + } + return &TaskConfig{ + ID: pb.Id, + JobName: pb.JobName, + JobID: pb.JobId, + TaskGroupName: pb.TaskGroupName, + Name: pb.Name, + Namespace: pb.Namespace, + NodeName: pb.NodeName, + NodeID: pb.NodeId, + Env: pb.Env, + DeviceEnv: pb.DeviceEnv, + Resources: ResourcesFromProto(pb.Resources), + Devices: DevicesFromProto(pb.Devices), + Mounts: MountsFromProto(pb.Mounts), + User: pb.User, + AllocDir: pb.AllocDir, + rawDriverConfig: pb.MsgpackDriverConfig, + StdoutPath: pb.StdoutPath, + StderrPath: pb.StderrPath, + AllocID: pb.AllocId, + NetworkIsolation: NetworkIsolationSpecFromProto(pb.NetworkIsolationSpec), + DNS: dnsConfigFromProto(pb.Dns), + } +} + +func taskConfigToProto(cfg *TaskConfig) *proto.TaskConfig { + if cfg == nil { + return &proto.TaskConfig{} + } + pb := &proto.TaskConfig{ + Id: cfg.ID, + JobName: cfg.JobName, + JobId: cfg.JobID, + TaskGroupName: cfg.TaskGroupName, + Name: cfg.Name, + Namespace: cfg.Namespace, + NodeName: cfg.NodeName, + NodeId: cfg.NodeID, + Env: cfg.Env, + DeviceEnv: cfg.DeviceEnv, + Resources: ResourcesToProto(cfg.Resources), + Devices: DevicesToProto(cfg.Devices), + Mounts: MountsToProto(cfg.Mounts), + User: cfg.User, + AllocDir: cfg.AllocDir, + MsgpackDriverConfig: cfg.rawDriverConfig, + StdoutPath: cfg.StdoutPath, + StderrPath: cfg.StderrPath, + AllocId: cfg.AllocID, + NetworkIsolationSpec: NetworkIsolationSpecToProto(cfg.NetworkIsolation), + Dns: dnsConfigToProto(cfg.DNS), + } + return pb +} + +func ResourcesFromProto(pb *proto.Resources) *Resources { + var r Resources + if pb == nil { + return &r + } + + if pb.AllocatedResources != nil { + r.NomadResources = &AllocatedTaskResources{} + + if pb.AllocatedResources.Cpu != nil { + r.NomadResources.Cpu.CpuShares = pb.AllocatedResources.Cpu.CpuShares + } + + if pb.AllocatedResources.Memory != nil { + r.NomadResources.Memory.MemoryMB = pb.AllocatedResources.Memory.MemoryMb + r.NomadResources.Memory.MemoryMaxMB = pb.AllocatedResources.Memory.MemoryMaxMb + } + + for _, network := range pb.AllocatedResources.Networks { + var n NetworkResource + n.Device = network.Device + n.IP = network.Ip + n.CIDR = network.Cidr + n.MBits = int(network.Mbits) + for _, port := range network.ReservedPorts { + n.ReservedPorts = append(n.ReservedPorts, Port{ + Label: port.Label, + Value: int(port.Value), + }) + } + for _, port := range network.DynamicPorts { + n.DynamicPorts = append(n.DynamicPorts, Port{ + Label: port.Label, + Value: int(port.Value), + }) + } + r.NomadResources.Networks = append(r.NomadResources.Networks, &n) + } + } + + if pb.LinuxResources != nil { + r.LinuxResources = &LinuxResources{ + CPUPeriod: pb.LinuxResources.CpuPeriod, + CPUQuota: pb.LinuxResources.CpuQuota, + CPUShares: pb.LinuxResources.CpuShares, + MemoryLimitBytes: pb.LinuxResources.MemoryLimitBytes, + OOMScoreAdj: pb.LinuxResources.OomScoreAdj, + CpusetCpus: pb.LinuxResources.CpusetCpus, + CpusetCgroupPath: pb.LinuxResources.CpusetCgroup, + PercentTicks: pb.LinuxResources.PercentTicks, + } + } + + if pb.Ports != nil { + ports := AllocatedPorts(make([]AllocatedPortMapping, len(pb.Ports))) + for i, port := range pb.Ports { + ports[i] = AllocatedPortMapping{ + Label: port.Label, + Value: int(port.Value), + To: int(port.To), + HostIP: port.HostIp, + } + } + r.Ports = &ports + } + + return &r +} + +func ResourcesToProto(r *Resources) *proto.Resources { + if r == nil { + return nil + } + + var pb proto.Resources + if r.NomadResources != nil { + pb.AllocatedResources = &proto.AllocatedTaskResources{ + Cpu: &proto.AllocatedCpuResources{ + CpuShares: r.NomadResources.Cpu.CpuShares, + }, + Memory: &proto.AllocatedMemoryResources{ + MemoryMb: r.NomadResources.Memory.MemoryMB, + MemoryMaxMb: r.NomadResources.Memory.MemoryMaxMB, + }, + Networks: make([]*proto.NetworkResource, len(r.NomadResources.Networks)), + } + + for i, network := range r.NomadResources.Networks { + var n proto.NetworkResource + n.Device = network.Device + n.Ip = network.IP + n.Cidr = network.CIDR + n.Mbits = int32(network.MBits) + n.ReservedPorts = []*proto.NetworkPort{} + for _, port := range network.ReservedPorts { + n.ReservedPorts = append(n.ReservedPorts, &proto.NetworkPort{ + Label: port.Label, + Value: int32(port.Value), + }) + } + for _, port := range network.DynamicPorts { + n.DynamicPorts = append(n.DynamicPorts, &proto.NetworkPort{ + Label: port.Label, + Value: int32(port.Value), + }) + } + pb.AllocatedResources.Networks[i] = &n + } + } + + if r.LinuxResources != nil { + pb.LinuxResources = &proto.LinuxResources{ + CpuPeriod: r.LinuxResources.CPUPeriod, + CpuQuota: r.LinuxResources.CPUQuota, + CpuShares: r.LinuxResources.CPUShares, + MemoryLimitBytes: r.LinuxResources.MemoryLimitBytes, + OomScoreAdj: r.LinuxResources.OOMScoreAdj, + CpusetCpus: r.LinuxResources.CpusetCpus, + CpusetCgroup: r.LinuxResources.CpusetCgroupPath, + PercentTicks: r.LinuxResources.PercentTicks, + } + } + + if r.Ports != nil { + ports := make([]*proto.PortMapping, len(*r.Ports)) + for i, port := range *r.Ports { + ports[i] = &proto.PortMapping{ + Label: port.Label, + Value: int32(port.Value), + To: int32(port.To), + HostIp: port.HostIP, + } + } + + pb.Ports = ports + } + + return &pb +} + +func DevicesFromProto(devices []*proto.Device) []*DeviceConfig { + if devices == nil { + return nil + } + + out := make([]*DeviceConfig, len(devices)) + for i, d := range devices { + out[i] = DeviceFromProto(d) + } + + return out +} + +func DeviceFromProto(device *proto.Device) *DeviceConfig { + if device == nil { + return nil + } + + return &DeviceConfig{ + TaskPath: device.TaskPath, + HostPath: device.HostPath, + Permissions: device.CgroupPermissions, + } +} + +func MountsFromProto(mounts []*proto.Mount) []*MountConfig { + if mounts == nil { + return nil + } + + out := make([]*MountConfig, len(mounts)) + for i, m := range mounts { + out[i] = MountFromProto(m) + } + + return out +} + +func MountFromProto(mount *proto.Mount) *MountConfig { + if mount == nil { + return nil + } + + return &MountConfig{ + TaskPath: mount.TaskPath, + HostPath: mount.HostPath, + Readonly: mount.Readonly, + PropagationMode: mount.PropagationMode, + SELinuxLabel: mount.SelinuxLabel, + } +} + +func DevicesToProto(devices []*DeviceConfig) []*proto.Device { + if devices == nil { + return nil + } + + out := make([]*proto.Device, len(devices)) + for i, d := range devices { + out[i] = DeviceToProto(d) + } + + return out +} + +func DeviceToProto(device *DeviceConfig) *proto.Device { + if device == nil { + return nil + } + + return &proto.Device{ + TaskPath: device.TaskPath, + HostPath: device.HostPath, + CgroupPermissions: device.Permissions, + } +} + +func MountsToProto(mounts []*MountConfig) []*proto.Mount { + if mounts == nil { + return nil + } + + out := make([]*proto.Mount, len(mounts)) + for i, m := range mounts { + out[i] = MountToProto(m) + } + + return out +} + +func MountToProto(mount *MountConfig) *proto.Mount { + if mount == nil { + return nil + } + + return &proto.Mount{ + TaskPath: mount.TaskPath, + HostPath: mount.HostPath, + Readonly: mount.Readonly, + PropagationMode: mount.PropagationMode, + SelinuxLabel: mount.SELinuxLabel, + } +} + +func taskHandleFromProto(pb *proto.TaskHandle) *TaskHandle { + if pb == nil { + return &TaskHandle{} + } + return &TaskHandle{ + Version: int(pb.Version), + Config: taskConfigFromProto(pb.Config), + State: taskStateFromProtoMap[pb.State], + DriverState: pb.DriverState, + } +} + +func taskHandleToProto(handle *TaskHandle) *proto.TaskHandle { + return &proto.TaskHandle{ + Version: int32(handle.Version), + Config: taskConfigToProto(handle.Config), + State: taskStateToProtoMap[handle.State], + DriverState: handle.DriverState, + } +} + +func exitResultToProto(result *ExitResult) *proto.ExitResult { + if result == nil { + return &proto.ExitResult{} + } + return &proto.ExitResult{ + ExitCode: int32(result.ExitCode), + Signal: int32(result.Signal), + OomKilled: result.OOMKilled, + } +} + +func exitResultFromProto(pb *proto.ExitResult) *ExitResult { + return &ExitResult{ + ExitCode: int(pb.ExitCode), + Signal: int(pb.Signal), + OOMKilled: pb.OomKilled, + } +} + +func taskStatusToProto(status *TaskStatus) (*proto.TaskStatus, error) { + started, err := ptypes.TimestampProto(status.StartedAt) + if err != nil { + return nil, err + } + completed, err := ptypes.TimestampProto(status.CompletedAt) + if err != nil { + return nil, err + } + return &proto.TaskStatus{ + Id: status.ID, + Name: status.Name, + State: taskStateToProtoMap[status.State], + StartedAt: started, + CompletedAt: completed, + Result: exitResultToProto(status.ExitResult), + }, nil +} + +func taskStatusFromProto(pb *proto.TaskStatus) (*TaskStatus, error) { + started, err := ptypes.Timestamp(pb.StartedAt) + if err != nil { + return nil, err + } + + completed, err := ptypes.Timestamp(pb.CompletedAt) + if err != nil { + return nil, err + } + + return &TaskStatus{ + ID: pb.Id, + Name: pb.Name, + State: taskStateFromProtoMap[pb.State], + StartedAt: started, + CompletedAt: completed, + ExitResult: exitResultFromProto(pb.Result), + }, nil +} + +func TaskStatsToProto(stats *TaskResourceUsage) (*proto.TaskStats, error) { + timestamp, err := ptypes.TimestampProto(time.Unix(0, stats.Timestamp)) + if err != nil { + return nil, err + } + + pids := map[string]*proto.TaskResourceUsage{} + for pid, ru := range stats.Pids { + pids[pid] = resourceUsageToProto(ru) + } + + return &proto.TaskStats{ + Timestamp: timestamp, + AggResourceUsage: resourceUsageToProto(stats.ResourceUsage), + ResourceUsageByPid: pids, + }, nil +} + +func TaskStatsFromProto(pb *proto.TaskStats) (*TaskResourceUsage, error) { + timestamp, err := ptypes.Timestamp(pb.Timestamp) + if err != nil { + return nil, err + } + + pids := map[string]*ResourceUsage{} + for pid, ru := range pb.ResourceUsageByPid { + pids[pid] = resourceUsageFromProto(ru) + } + + stats := &TaskResourceUsage{ + Timestamp: timestamp.UnixNano(), + ResourceUsage: resourceUsageFromProto(pb.AggResourceUsage), + Pids: pids, + } + + return stats, nil +} + +func resourceUsageToProto(ru *ResourceUsage) *proto.TaskResourceUsage { + if ru == nil { + return &proto.TaskResourceUsage{} + } + + cpu := &proto.CPUUsage{ + MeasuredFields: cpuUsageMeasuredFieldsToProto(ru.CpuStats.Measured), + SystemMode: ru.CpuStats.SystemMode, + UserMode: ru.CpuStats.UserMode, + TotalTicks: ru.CpuStats.TotalTicks, + ThrottledPeriods: ru.CpuStats.ThrottledPeriods, + ThrottledTime: ru.CpuStats.ThrottledTime, + Percent: ru.CpuStats.Percent, + } + + memory := &proto.MemoryUsage{ + MeasuredFields: memoryUsageMeasuredFieldsToProto(ru.MemoryStats.Measured), + Rss: ru.MemoryStats.RSS, + Cache: ru.MemoryStats.Cache, + Swap: ru.MemoryStats.Swap, + Usage: ru.MemoryStats.Usage, + MaxUsage: ru.MemoryStats.MaxUsage, + KernelUsage: ru.MemoryStats.KernelUsage, + KernelMaxUsage: ru.MemoryStats.KernelMaxUsage, + } + + return &proto.TaskResourceUsage{ + Cpu: cpu, + Memory: memory, + } +} + +func resourceUsageFromProto(pb *proto.TaskResourceUsage) *ResourceUsage { + cpu := CpuStats{} + if pb.Cpu != nil { + cpu = CpuStats{ + Measured: cpuUsageMeasuredFieldsFromProto(pb.Cpu.MeasuredFields), + SystemMode: pb.Cpu.SystemMode, + UserMode: pb.Cpu.UserMode, + TotalTicks: pb.Cpu.TotalTicks, + ThrottledPeriods: pb.Cpu.ThrottledPeriods, + ThrottledTime: pb.Cpu.ThrottledTime, + Percent: pb.Cpu.Percent, + } + } + + memory := MemoryStats{} + if pb.Memory != nil { + memory = MemoryStats{ + Measured: memoryUsageMeasuredFieldsFromProto(pb.Memory.MeasuredFields), + RSS: pb.Memory.Rss, + Cache: pb.Memory.Cache, + Swap: pb.Memory.Swap, + Usage: pb.Memory.Usage, + MaxUsage: pb.Memory.MaxUsage, + KernelUsage: pb.Memory.KernelUsage, + KernelMaxUsage: pb.Memory.KernelMaxUsage, + } + } + + return &ResourceUsage{ + CpuStats: &cpu, + MemoryStats: &memory, + } +} + +func BytesToMB(bytes int64) int64 { + return bytes / (1024 * 1024) +} + +var cpuUsageMeasuredFieldToProtoMap = map[string]proto.CPUUsage_Fields{ + "System Mode": proto.CPUUsage_SYSTEM_MODE, + "User Mode": proto.CPUUsage_USER_MODE, + "Total Ticks": proto.CPUUsage_TOTAL_TICKS, + "Throttled Periods": proto.CPUUsage_THROTTLED_PERIODS, + "Throttled Time": proto.CPUUsage_THROTTLED_TIME, + "Percent": proto.CPUUsage_PERCENT, +} + +var cpuUsageMeasuredFieldFromProtoMap = map[proto.CPUUsage_Fields]string{ + proto.CPUUsage_SYSTEM_MODE: "System Mode", + proto.CPUUsage_USER_MODE: "User Mode", + proto.CPUUsage_TOTAL_TICKS: "Total Ticks", + proto.CPUUsage_THROTTLED_PERIODS: "Throttled Periods", + proto.CPUUsage_THROTTLED_TIME: "Throttled Time", + proto.CPUUsage_PERCENT: "Percent", +} + +func cpuUsageMeasuredFieldsToProto(fields []string) []proto.CPUUsage_Fields { + r := make([]proto.CPUUsage_Fields, 0, len(fields)) + + for _, f := range fields { + if v, ok := cpuUsageMeasuredFieldToProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +func cpuUsageMeasuredFieldsFromProto(fields []proto.CPUUsage_Fields) []string { + r := make([]string, 0, len(fields)) + + for _, f := range fields { + if v, ok := cpuUsageMeasuredFieldFromProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +var memoryUsageMeasuredFieldToProtoMap = map[string]proto.MemoryUsage_Fields{ + "RSS": proto.MemoryUsage_RSS, + "Cache": proto.MemoryUsage_CACHE, + "Swap": proto.MemoryUsage_SWAP, + "Usage": proto.MemoryUsage_USAGE, + "Max Usage": proto.MemoryUsage_MAX_USAGE, + "Kernel Usage": proto.MemoryUsage_KERNEL_USAGE, + "Kernel Max Usage": proto.MemoryUsage_KERNEL_MAX_USAGE, +} + +var memoryUsageMeasuredFieldFromProtoMap = map[proto.MemoryUsage_Fields]string{ + proto.MemoryUsage_RSS: "RSS", + proto.MemoryUsage_CACHE: "Cache", + proto.MemoryUsage_SWAP: "Swap", + proto.MemoryUsage_USAGE: "Usage", + proto.MemoryUsage_MAX_USAGE: "Max Usage", + proto.MemoryUsage_KERNEL_USAGE: "Kernel Usage", + proto.MemoryUsage_KERNEL_MAX_USAGE: "Kernel Max Usage", +} + +func memoryUsageMeasuredFieldsToProto(fields []string) []proto.MemoryUsage_Fields { + r := make([]proto.MemoryUsage_Fields, 0, len(fields)) + + for _, f := range fields { + if v, ok := memoryUsageMeasuredFieldToProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +func memoryUsageMeasuredFieldsFromProto(fields []proto.MemoryUsage_Fields) []string { + r := make([]string, 0, len(fields)) + + for _, f := range fields { + if v, ok := memoryUsageMeasuredFieldFromProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +func netIsolationModeToProto(mode NetIsolationMode) proto.NetworkIsolationSpec_NetworkIsolationMode { + switch mode { + case NetIsolationModeHost: + return proto.NetworkIsolationSpec_HOST + case NetIsolationModeGroup: + return proto.NetworkIsolationSpec_GROUP + case NetIsolationModeTask: + return proto.NetworkIsolationSpec_TASK + case NetIsolationModeNone: + return proto.NetworkIsolationSpec_NONE + default: + return proto.NetworkIsolationSpec_HOST + } +} + +func netIsolationModeFromProto(pb proto.NetworkIsolationSpec_NetworkIsolationMode) NetIsolationMode { + switch pb { + case proto.NetworkIsolationSpec_HOST: + return NetIsolationModeHost + case proto.NetworkIsolationSpec_GROUP: + return NetIsolationModeGroup + case proto.NetworkIsolationSpec_TASK: + return NetIsolationModeTask + case proto.NetworkIsolationSpec_NONE: + return NetIsolationModeNone + default: + return NetIsolationModeHost + } +} + +func networkCreateRequestFromProto(pb *proto.CreateNetworkRequest) *NetworkCreateRequest { + if pb == nil { + return nil + } + return &NetworkCreateRequest{ + Hostname: pb.GetHostname(), + } +} + +func NetworkIsolationSpecToProto(spec *NetworkIsolationSpec) *proto.NetworkIsolationSpec { + if spec == nil { + return nil + } + return &proto.NetworkIsolationSpec{ + Path: spec.Path, + Labels: spec.Labels, + Mode: netIsolationModeToProto(spec.Mode), + HostsConfig: hostsConfigToProto(spec.HostsConfig), + } +} + +func NetworkIsolationSpecFromProto(pb *proto.NetworkIsolationSpec) *NetworkIsolationSpec { + if pb == nil { + return nil + } + return &NetworkIsolationSpec{ + Path: pb.Path, + Labels: pb.Labels, + Mode: netIsolationModeFromProto(pb.Mode), + HostsConfig: hostsConfigFromProto(pb.HostsConfig), + } +} + +func hostsConfigToProto(cfg *HostsConfig) *proto.HostsConfig { + if cfg == nil { + return nil + } + + return &proto.HostsConfig{ + Hostname: cfg.Hostname, + Address: cfg.Address, + } +} + +func hostsConfigFromProto(pb *proto.HostsConfig) *HostsConfig { + if pb == nil { + return nil + } + + return &HostsConfig{ + Hostname: pb.Hostname, + Address: pb.Address, + } +} + +func dnsConfigToProto(dns *DNSConfig) *proto.DNSConfig { + if dns == nil { + return nil + } + + return &proto.DNSConfig{ + Servers: dns.Servers, + Searches: dns.Searches, + Options: dns.Options, + } +} + +func dnsConfigFromProto(pb *proto.DNSConfig) *DNSConfig { + if pb == nil { + return nil + } + + return &DNSConfig{ + Servers: pb.Servers, + Searches: pb.Searches, + Options: pb.Options, + } +} diff --git a/plugin_interface/drivers/utils/utils_unix.go b/plugin_interface/drivers/utils/utils_unix.go new file mode 100644 index 00000000000..2c8481b3d81 --- /dev/null +++ b/plugin_interface/drivers/utils/utils_unix.go @@ -0,0 +1,16 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package utils + +import ( + "golang.org/x/sys/unix" +) + +// IsUnixRoot returns true if system is unix and user running is effectively root +func IsUnixRoot() bool { + return unix.Geteuid() == 0 +} diff --git a/plugin_interface/drivers/utils/utils_windows.go b/plugin_interface/drivers/utils/utils_windows.go new file mode 100644 index 00000000000..cf4cfa7c29f --- /dev/null +++ b/plugin_interface/drivers/utils/utils_windows.go @@ -0,0 +1,9 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package utils + +// IsUnixRoot returns true if system is a unix system and the effective uid of user is root +func IsUnixRoot() bool { + return false +} diff --git a/plugin_interface/drivers/utils_test.go b/plugin_interface/drivers/utils_test.go new file mode 100644 index 00000000000..42f16e1039f --- /dev/null +++ b/plugin_interface/drivers/utils_test.go @@ -0,0 +1,142 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +import ( + "testing" + + "github.com/google/uuid" // TODO: maybe not use this + "github.com/hashicorp/nomad/plugin-interface/drivers/proto" + "github.com/shoenig/test/must" +) + +func TestResourceUsageRoundTrip(t *testing.T) { + input := &ResourceUsage{ + CpuStats: &CpuStats{ + SystemMode: 0, + UserMode: 0.9963907032120152, + TotalTicks: 21.920595295932515, + ThrottledPeriods: 2321, + ThrottledTime: 123, + Percent: 0.9963906952696598, + Measured: []string{"System Mode", "User Mode", "Percent"}, + }, + MemoryStats: &MemoryStats{ + RSS: 25681920, + Swap: 15681920, + Usage: 12, + MaxUsage: 23, + KernelUsage: 34, + KernelMaxUsage: 45, + Measured: []string{"RSS", "Swap"}, + }, + } + + parsed := resourceUsageFromProto(resourceUsageToProto(input)) + must.Eq(t, parsed, input) +} + +func TestTaskConfigRoundTrip(t *testing.T) { + + input := &TaskConfig{ + ID: uuid.New().String(), + JobName: "job", + JobID: "job-id", + TaskGroupName: "group", + Name: "task", + Namespace: "default", + NodeName: "node-1", + NodeID: uuid.New().String(), + Env: map[string]string{"gir": "zim"}, + DeviceEnv: map[string]string{"foo": "bar"}, + Resources: &Resources{ + NomadResources: &AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: int64(100), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: int64(300), + }, + }, + LinuxResources: &LinuxResources{ + MemoryLimitBytes: 300 * 1024 * 1024, + CPUShares: 100, + PercentTicks: float64(100) / float64(3200), + }, + Ports: &AllocatedPorts{ + { + Label: "port", + Value: 23456, + To: 8080, + HostIP: "10.0.0.1", + }, + }, + }, + Devices: []*DeviceConfig{ + { + TaskPath: "task", + HostPath: "host", + Permissions: "perms", + }, + }, + Mounts: []*MountConfig{ + { + TaskPath: "task", + HostPath: "host", + Readonly: true, + PropagationMode: "private", + }, + }, + User: "user", + AllocDir: "allocDir", + StdoutPath: "stdout", + StderrPath: "stderr", + AllocID: uuid.New().String(), + NetworkIsolation: &NetworkIsolationSpec{ + Mode: NetIsolationModeGroup, + Path: "path", + Labels: map[string]string{"net": "abc"}, + }, + DNS: &DNSConfig{ + Servers: []string{"8.8.8.8"}, + Searches: []string{".consul"}, + Options: []string{"ndots:2"}, + }, + } + + parsed := taskConfigFromProto(taskConfigToProto(input)) + must.Eq(t, input, parsed) + +} + +func Test_networkCreateRequestFromProto(t *testing.T) { + testCases := []struct { + inputPB *proto.CreateNetworkRequest + expectedOutput *NetworkCreateRequest + name string + }{ + { + inputPB: nil, + expectedOutput: nil, + name: "nil safety", + }, + { + inputPB: &proto.CreateNetworkRequest{ + AllocId: "59598b74-86e9-16ee-eb54-24c62935cc7c", + Hostname: "foobar", + }, + expectedOutput: &NetworkCreateRequest{ + Hostname: "foobar", + }, + name: "generic 1", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actualOutput := networkCreateRequestFromProto(tc.inputPB) + must.Eq(t, tc.expectedOutput, actualOutput) + }) + } +} diff --git a/plugin_interface/drivers/versions.go b/plugin_interface/drivers/versions.go new file mode 100644 index 00000000000..3faf198da49 --- /dev/null +++ b/plugin_interface/drivers/versions.go @@ -0,0 +1,9 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package drivers + +const ( + // ApiVersion010 is the initial API version for the device plugins + ApiVersion010 = "v0.1.0" +) diff --git a/plugin_interface/go.mod b/plugin_interface/go.mod new file mode 100644 index 00000000000..fab97e55617 --- /dev/null +++ b/plugin_interface/go.mod @@ -0,0 +1,53 @@ +module github.com/hashicorp/nomad/plugin-interface + +go 1.25.4 + +require ( + github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 + github.com/container-storage-interface/spec v1.12.0 + github.com/docker/docker v28.5.2+incompatible + github.com/golang/protobuf v1.5.4 + github.com/google/uuid v1.6.0 + github.com/hashicorp/go-hclog v1.6.3 + github.com/hashicorp/go-msgpack/v2 v2.1.5 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/go-plugin v1.7.0 + github.com/hashicorp/go-set/v3 v3.0.1 + github.com/kr/pretty v0.3.1 + github.com/shoenig/test v1.12.2 + github.com/stretchr/testify v1.11.1 + github.com/zclconf/go-cty v1.17.0 + golang.org/x/sys v0.38.0 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 + oss.indeed.com/go/libtime v1.6.0 +) + +require ( + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/gojuno/minimock/v3 v3.0.6 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/moby/sys/atomicwriter v0.1.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/text v0.31.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.1 // indirect +) diff --git a/plugin_interface/go.sum b/plugin_interface/go.sum new file mode 100644 index 00000000000..b38bb6d5b82 --- /dev/null +++ b/plugin_interface/go.sum @@ -0,0 +1,187 @@ +github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 h1:U7q69tqXiCf6m097GRlNQB0/6SI1qWIOHYHhCEvDxF4= +github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5/go.mod h1:nxQPcNPR/34g+HcK2hEsF99O+GJgIkW/OmPl8wtzhmk= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/container-storage-interface/spec v1.12.0 h1:zrFOEqpR5AghNaaDG4qyedwPBqU2fU0dWjLQMP/azK0= +github.com/container-storage-interface/spec v1.12.0/go.mod h1:txsm+MA2B2WDa5kW69jNbqPnvTtfvZma7T/zsAZ9qX8= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gojuno/minimock/v3 v3.0.4/go.mod h1:HqeqnwV8mAABn3pO5hqF+RE7gjA0jsN8cbbSogoGrzI= +github.com/gojuno/minimock/v3 v3.0.6 h1:YqHcVR10x2ZvswPK8Ix5yk+hMpspdQ3ckSpkOzyF85I= +github.com/gojuno/minimock/v3 v3.0.6/go.mod h1:v61ZjAKHr+WnEkND63nQPCZ/DTfQgJdvbCi3IuoMblY= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-msgpack/v2 v2.1.5 h1:Ue879bPnutj/hXfmUk6s/jtIK90XxgiUIcXRl656T44= +github.com/hashicorp/go-msgpack/v2 v2.1.5/go.mod h1:bjCsRXpZ7NsJdk45PoCQnzRGDaK8TKm5ZnDI/9y3J4M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA= +github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= +github.com/hashicorp/go-set/v3 v3.0.1 h1:ZwO15ZYmIrFYL9zSm2wBuwcRiHxVdp46m/XA/MUlM6I= +github.com/hashicorp/go-set/v3 v3.0.1/go.mod h1:0oPQqhtitglZeT2ZiWnRIfUG6gJAHnn7LzrS7SbgNY4= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/shoenig/test v1.12.2 h1:ZVT8NeIUwGWpZcKaepPmFMoNQ3sVpxvqUh/MAqwFiJI= +github.com/shoenig/test v1.12.2/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +oss.indeed.com/go/libtime v1.6.0 h1:XQyczJihse/wQGo59OfPF3f4f+Sywv4R8vdGB3S9BfU= +oss.indeed.com/go/libtime v1.6.0/go.mod h1:B2sdEcuzB0zhTKkAuHy4JInKRc7Al3tME4qWam6R7mA= diff --git a/plugin_interface/helper/grpc.go b/plugin_interface/helper/grpc.go new file mode 100644 index 00000000000..d88844dde7d --- /dev/null +++ b/plugin_interface/helper/grpc.go @@ -0,0 +1,139 @@ +package helper + +import ( + "context" + "path" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugin-interface/base/structs" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// HandleReqCtxGrpcErr is used to handle a non io.EOF error in a GRPC request +// where a user supplied context is used. It handles detecting if the plugin has +// shutdown via the passeed pluginCtx. The parameters are: +// - err: the error returned from the streaming RPC +// - reqCtx: the user context passed to the request +// - pluginCtx: the plugins done ctx used to detect the plugin dying +// +// The return values are: +// - ErrPluginShutdown if the error is because the plugin shutdown +// - context.Canceled if the reqCtx is canceled +// - The original error +func HandleReqCtxGrpcErr(err error, reqCtx, pluginCtx context.Context) error { + if err == nil { + return nil + } + + // Determine if the error is because the plugin shutdown + if errStatus, ok := status.FromError(err); ok && + (errStatus.Code() == codes.Unavailable || errStatus.Code() == codes.Canceled) { + // Potentially wait a little before returning an error so we can detect + // the exit + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + case <-reqCtx.Done(): + err = reqCtx.Err() + + // There is no guarantee that the select will choose the + // doneCtx first so we have to double check + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + default: + } + case <-time.After(3 * time.Second): + // Its okay to wait a while since the connection isn't available and + // on local host it is likely shutting down. It is not expected for + // this to ever reach even close to 3 seconds. + } + + // It is an error we don't know how to handle, so return it + return err + } + + // Context was cancelled + if errStatus := status.FromContextError(reqCtx.Err()); errStatus.Code() == codes.Canceled { + return context.Canceled + } + + return err +} + +// HandleGrpcErr is used to handle errors made to a remote gRPC plugin. It +// handles detecting if the plugin has shutdown via the passeed pluginCtx. The +// parameters are: +// - err: the error returned from the streaming RPC +// - pluginCtx: the plugins done ctx used to detect the plugin dying +// +// The return values are: +// - ErrPluginShutdown if the error is because the plugin shutdown +// - The original error +func HandleGrpcErr(err error, pluginCtx context.Context) error { + if err == nil { + return nil + } + + if errStatus := status.FromContextError(pluginCtx.Err()); errStatus.Code() == codes.Canceled { + // See if the plugin shutdown + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + default: + } + } + + // Determine if the error is because the plugin shutdown + if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.Unavailable { + // Potentially wait a little before returning an error so we can detect + // the exit + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + case <-time.After(3 * time.Second): + // Its okay to wait a while since the connection isn't available and + // on local host it is likely shutting down. It is not expected for + // this to ever reach even close to 3 seconds. + } + + // It is an error we don't know how to handle, so return it + return err + } + + return err +} + +// UnaryClientInterceptor returns a new unary client interceptor that logs the execution of gRPC calls. +func UnaryClientInterceptor(logger hclog.Logger, opts ...Option) grpc.UnaryClientInterceptor { + o := evaluateClientOpt(opts) + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + emitClientLog(logger, o, method, startTime, err, "finished client unary call") + return err + } +} + +// StreamClientInterceptor returns a new streaming client interceptor that logs the execution of gRPC calls. +func StreamClientInterceptor(logger hclog.Logger, opts ...Option) grpc.StreamClientInterceptor { + o := evaluateClientOpt(opts) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + startTime := time.Now() + clientStream, err := streamer(ctx, desc, cc, method, opts...) + emitClientLog(logger, o, method, startTime, err, "finished client streaming call") + return clientStream, err + } +} + +func emitClientLog(logger hclog.Logger, o *options, fullMethodString string, startTime time.Time, err error, msg string) { + code := status.Code(err) + logLevel := o.levelFunc(code) + reqDuration := time.Since(startTime) + service := path.Dir(fullMethodString)[1:] + method := path.Base(fullMethodString) + logger.Log(logLevel, msg, "grpc.code", code, "duration", reqDuration, "grpc.service", service, "grpc.method", method) +} diff --git a/plugin_interface/helper/logging.go b/plugin_interface/helper/logging.go new file mode 100644 index 00000000000..35bfcd34196 --- /dev/null +++ b/plugin_interface/helper/logging.go @@ -0,0 +1,89 @@ +package helper + +import ( + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/codes" +) + +type options struct { + levelFunc CodeToLevel +} + +var defaultOptions = &options{} + +type Option func(*options) + +func evaluateClientOpt(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + optCopy.levelFunc = DefaultCodeToLevel + for _, o := range opts { + o(optCopy) + } + return optCopy +} + +func WithStatusCodeToLevelFunc(fn CodeToLevel) Option { + return func(opts *options) { + opts.levelFunc = fn + } +} + +// CodeToLevel function defines the mapping between gRPC return codes and hclog level. +type CodeToLevel func(code codes.Code) hclog.Level + +func DefaultCodeToLevel(code codes.Code) hclog.Level { + switch code { + // Trace Logs -- Useful for Nomad developers but not necessarily always wanted + case codes.OK: + return hclog.Trace + + // Debug logs + case codes.Canceled: + return hclog.Debug + case codes.InvalidArgument: + return hclog.Debug + case codes.ResourceExhausted: + return hclog.Debug + case codes.FailedPrecondition: + return hclog.Debug + case codes.Aborted: + return hclog.Debug + case codes.OutOfRange: + return hclog.Debug + case codes.NotFound: + return hclog.Debug + case codes.AlreadyExists: + return hclog.Debug + + // Info Logs - More curious/interesting than debug, but not necessarily critical + case codes.Unknown: + return hclog.Info + case codes.DeadlineExceeded: + return hclog.Info + case codes.PermissionDenied: + return hclog.Info + case codes.Unauthenticated: + // unauthenticated requests are probably usually fine? + return hclog.Info + case codes.Unavailable: + // unavailable errors indicate the upstream is not currently available. Info + // because I would guess these are usually transient and will be handled by + // retry mechanisms before being served as a higher level warning. + return hclog.Info + + // Warn Logs - These are almost definitely bad in most cases - usually because + // the upstream is broken. + case codes.Unimplemented: + return hclog.Warn + case codes.Internal: + return hclog.Warn + case codes.DataLoss: + return hclog.Warn + + default: + // Codes that aren't implemented as part of a CodeToLevel case are probably + // unknown and should be surfaced. + return hclog.Info + } +} diff --git a/plugin_interface/helper/pointer.go b/plugin_interface/helper/pointer.go new file mode 100644 index 00000000000..83e8421321e --- /dev/null +++ b/plugin_interface/helper/pointer.go @@ -0,0 +1,6 @@ +package helper + +// Of returns a pointer to a. +func PointerOf[A any](a A) *A { + return &a +} diff --git a/plugin_interface/helper/slice.go b/plugin_interface/helper/slice.go new file mode 100644 index 00000000000..36fa63937d4 --- /dev/null +++ b/plugin_interface/helper/slice.go @@ -0,0 +1,42 @@ +package helper + +import "github.com/hashicorp/go-set/v3" + +// ConvertSlice takes the input slice and generates a new one using the +// supplied conversion function to covert the element. This is useful when +// converting a slice of strings to a slice of structs which wraps the string. +func ConvertSlice[A, B any](original []A, conversion func(a A) B) []B { + result := make([]B, len(original)) + for i, element := range original { + result[i] = conversion(element) + } + return result +} + +// SliceSetEq returns true if slices a and b contain the same elements (in no +// particular order), using '==' for comparison. +// +// Note: for pointers, consider implementing an Equal method and using +// ElementsEqual instead. +func SliceSetEq[T comparable](a, b []T) bool { + lenA, lenB := len(a), len(b) + if lenA != lenB { + return false + } + + if lenA > 10 { + // avoid quadratic comparisons over large input + return set.From(a).EqualSlice(b) + } + +OUTER: + for _, item := range a { + for _, other := range b { + if item == other { + continue OUTER + } + } + return false + } + return true +} diff --git a/plugin_interface/lib/cpustats/stats.go b/plugin_interface/lib/cpustats/stats.go new file mode 100644 index 00000000000..dae928ea351 --- /dev/null +++ b/plugin_interface/lib/cpustats/stats.go @@ -0,0 +1,77 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: BUSL-1.1 + +// Package cpustats provides utilities for tracking CPU usage statistics. +package cpustats + +import ( + "time" + + "github.com/hashicorp/nomad/plugin-interface/lib/hw" + "oss.indeed.com/go/libtime" +) + +// Compute is the cpu related fields of a numa.Topology needed for computing +// performance / utilization of tasks. +// +// Note that this is serialized and passed to executor.Executor as a CLI +// argument and so we use small json field names to minimize ps spam. +type Compute struct { + TotalCompute hw.MHz `json:"tc"` + NumCores int `json:"nc"` +} + +// A Tracker keeps track of one aspect of CPU utilization (i.e. one of system, +// user, or total time). +type Tracker struct { + prevCPUTime float64 + prevTime time.Time + + totalCompute hw.MHz + numCPUs int + + clock libtime.Clock +} + +// New creates a fresh Tracker with no data. +func New(c Compute) *Tracker { + return &Tracker{ + totalCompute: c.TotalCompute, + numCPUs: c.NumCores, + clock: libtime.SystemClock(), + } +} + +// Percent calculates the CPU usage percentage based on the current CPU usage +// and the previous CPU usage where usage is given as a time in nanoseconds +// spent using the CPU. +func (t *Tracker) Percent(cpuTime float64) float64 { + now := t.clock.Now() + + if t.prevCPUTime == 0.0 { + t.prevCPUTime = cpuTime + t.prevTime = now + return 0.0 + } + + timeDelta := now.Sub(t.prevTime).Nanoseconds() + ret := t.calculatePercent(t.prevCPUTime, cpuTime, timeDelta) + t.prevCPUTime = cpuTime + t.prevTime = now + return ret +} + +func (t *Tracker) calculatePercent(t1, t2 float64, timeDelta int64) float64 { + vDelta := t2 - t1 + if timeDelta <= 0 || vDelta <= 0 { + return 0.0 + } + return (vDelta / float64(timeDelta)) * 100.0 +} + +// TicksConsumed calculates the total bandwidth consumed by the process across +// all system CPU cores (not just the ones available to Nomad or this particular +// process. +func (t *Tracker) TicksConsumed(percent float64) float64 { + return (percent / 100) * float64(t.totalCompute) / float64(t.numCPUs) +} diff --git a/plugin_interface/lib/hw/hw.go b/plugin_interface/lib/hw/hw.go new file mode 100644 index 00000000000..227c5da63ca --- /dev/null +++ b/plugin_interface/lib/hw/hw.go @@ -0,0 +1,25 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: BUSL-1.1 + +package hw + +import ( + "strconv" +) + +type ( + MHz uint64 + KHz uint64 +) + +func (khz KHz) MHz() MHz { + return MHz(khz / 1000) +} + +func (mhz MHz) KHz() KHz { + return KHz(mhz * 1000) +} + +func (khz KHz) String() string { + return strconv.FormatUint(uint64(khz.MHz()), 10) +} diff --git a/plugin_interface/lib/idset/idset.go b/plugin_interface/lib/idset/idset.go new file mode 100644 index 00000000000..68456bc29ea --- /dev/null +++ b/plugin_interface/lib/idset/idset.go @@ -0,0 +1,219 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: BUSL-1.1 + +// Package idset provides a Set implementation for keeping track of various +// types of numeric IDs (e.g. CoreID, ProcessID, etc.). +package idset + +import ( + "fmt" + "regexp" + "slices" + "strconv" + "strings" + + "github.com/hashicorp/go-set/v3" +) + +// An ID is representative of a non-negative identifier of something like +// a CPU core ID, a NUMA node ID, etc. +// +// See the hwids package for typical use cases. +type ID interface { + ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uint +} + +// A Set contains some IDs. +// +// See the List Format section of +// https://www.man7.org/linux/man-pages/man7/cpuset.7.html +// for more information on the syntax and utility of these sets. +type Set[T ID] struct { + items *set.Set[T] +} + +// Empty creates a fresh Set with no elements. +func Empty[T ID]() *Set[T] { + return &Set[T]{ + items: set.New[T](0), + } +} + +// FromFunc creates a Set from the given values by first applying +// the conversion function. +func FromFunc[T ID, A any](values []A, convert func(value A) T) *Set[T] { + return &Set[T]{ + items: set.FromFunc(values, convert), + } +} + +// Copy creates a deep copy of s. +func (s *Set[T]) Copy() *Set[T] { + return &Set[T]{items: s.items.Copy()} +} + +var ( + numberRe = regexp.MustCompile(`^\d+$`) + spanRe = regexp.MustCompile(`^(\d+)-(\d+)$`) +) + +func atoi[T ID](s string) T { + i, _ := strconv.Atoi(s) + return T(i) +} + +func order[T ID](a, b T) (T, T) { + if a < b { + return a, b + } + return b, a +} + +// Parse the given cpuset into a set. +// +// The input is assumed to be valid. +func Parse[T ID](list string) *Set[T] { + result := Empty[T]() + + add := func(s string) { + s = strings.TrimSpace(s) + switch { + case numberRe.MatchString(s): + result.items.Insert(atoi[T](s)) + case spanRe.MatchString(s): + values := spanRe.FindStringSubmatch(s) + low, high := order(atoi[T](values[1]), atoi[T](values[2])) + for i := low; i <= high; i++ { + result.items.Insert(i) + } + } + } + + pieces := strings.Split(list, ",") + for _, piece := range pieces { + add(piece) + } + + return result +} + +// From returns Set created from the given slice. +func From[T, U ID](slice []U) *Set[T] { + result := Empty[T]() + for _, item := range slice { + result.items.Insert(T(item)) + } + return result +} + +// Difference returns the set of elements in s but not in other. +func (s *Set[T]) Difference(other *Set[T]) *Set[T] { + diff := s.items.Difference(other.items) + return &Set[T]{items: diff.(*set.Set[T])} +} + +// Intersect returns the set of elements that are in both s and other. +func (s *Set[T]) Intersect(other *Set[T]) *Set[T] { + intersection := s.items.Intersect(other.items) + return &Set[T]{items: intersection.(*set.Set[T])} +} + +// Contains returns whether the Set contains item. +func (s *Set[T]) Contains(item T) bool { + return s.items.Contains(item) +} + +// Insert item into the Set. +func (s *Set[T]) Insert(item T) { + s.items.Insert(item) +} + +// Slice returns a slice copy of the Set. +func (s *Set[T]) Slice() []T { + items := s.items.Slice() + slices.Sort(items) + return items +} + +// InsertSet inserts all items of other into s. +func (s *Set[T]) InsertSet(other *Set[T]) { + s.items.InsertSet(other.items) +} + +// RemoveSet removes all items of other from s. +func (s *Set[T]) RemoveSet(other *Set[T]) { + s.items.RemoveSet(other.items) +} + +// String creates a well-formed cpuset string representation of the Set. +func (s *Set[T]) String() string { + if s.items.Empty() { + // cgroups notation uses a space (or newline) to indicate + // "empty"; and this value is written to cgroups interface + // files + const empty = " " + return empty + } + + var parts []string + ids := s.Slice() + + low, high := ids[0], ids[0] + for i := 1; i < len(ids); i++ { + switch { + case ids[i] == high+1: + high = ids[i] + continue + case low == high: + parts = append(parts, fmt.Sprintf("%d", low)) + default: + parts = append(parts, fmt.Sprintf("%d-%d", low, high)) + } + low, high = ids[i], ids[i] // new range + } + + if low == high { + parts = append(parts, fmt.Sprintf("%d", low)) + } else { + parts = append(parts, fmt.Sprintf("%d-%d", low, high)) + } + + return strings.Join(parts, ",") +} + +// ForEach iterates the elements in the set and applies f. Iteration stops +// if the result of f is a non-nil error. +func (s *Set[T]) ForEach(f func(id T) error) error { + for id := range s.items.Items() { + if err := f(id); err != nil { + return err + } + } + return nil +} + +// Size returns the number of elements in the Set. +func (s *Set[T]) Size() int { + return s.items.Size() +} + +// Empty returns whether the set is empty. +func (s *Set[T]) Empty() bool { + if s == nil || s.items == nil { + return true + } + return s.items.Empty() +} + +// InsertSlice is used to bludgen a slice of integers into s. +func InsertSlice[T ID, X ~uint16](s *Set[T], items ...X) { + for _, item := range items { + s.Insert(T(item)) + } +} + +// Superset returns true of s is a superset of other. +func (s *Set[T]) Superset(other *Set[T]) bool { + // todo(shoenig) optimize + return s.items.ContainsSlice(other.items.Slice()) +} diff --git a/plugin_interface/lib/idset/idset_test.go b/plugin_interface/lib/idset/idset_test.go new file mode 100644 index 00000000000..8feae32113b --- /dev/null +++ b/plugin_interface/lib/idset/idset_test.go @@ -0,0 +1,81 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: BUSL-1.1 + +package idset + +import ( + "testing" + + "github.com/shoenig/test/must" +) + +func Test_Parse(t *testing.T) { + cases := []struct { + input string + exp []uint16 + }{ + { + input: "0", + exp: []uint16{0}, + }, + { + input: "1,3,5,9", + exp: []uint16{1, 3, 5, 9}, + }, + { + input: "1-2", + exp: []uint16{1, 2}, + }, + { + input: "3-6", + exp: []uint16{3, 4, 5, 6}, + }, + { + input: "1,3-5,9,11-14", + exp: []uint16{1, 3, 4, 5, 9, 11, 12, 13, 14}, + }, + { + input: " 4-2 , 9-9 , 11-7\n", + exp: []uint16{2, 3, 4, 7, 8, 9, 10, 11}, + }, + } + + for _, tc := range cases { + t.Run("("+tc.input+")", func(t *testing.T) { + result := Parse[uint16](tc.input).Slice() + must.SliceContainsAll(t, tc.exp, result, must.Sprint("got", result)) + }) + } +} + +func Test_String(t *testing.T) { + cases := []struct { + input string + exp string + }{ + { + input: "0", + exp: "0", + }, + { + input: "1-3", + exp: "1-3", + }, + { + input: "1, 2, 3", + exp: "1-3", + }, + { + input: "7, 1-3, 12-9", + exp: "1-3,7,9-12", + }, + } + + for _, tc := range cases { + t.Run("("+tc.input+")", func(t *testing.T) { + result := Parse[uint16](tc.input) + str := result.String() + must.Eq(t, tc.exp, str, must.Sprint("slice", result.Slice())) + }) + } +} diff --git a/plugin_interface/serve.go b/plugin_interface/serve.go new file mode 100644 index 00000000000..f3517a9feb8 --- /dev/null +++ b/plugin_interface/serve.go @@ -0,0 +1,53 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package plugins + +import ( + "context" + "fmt" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugin-interface/device" +) + +// PluginFactory returns a new plugin instance +type PluginFactory func(log log.Logger) interface{} + +// PluginCtxFactory returns a new plugin instance, that takes in a context +type PluginCtxFactory func(ctx context.Context, log log.Logger) interface{} + +// Serve is used to serve a new Nomad plugin +func Serve(f PluginFactory) { + logger := log.New(&log.LoggerOptions{ + Level: log.Trace, + JSONFormat: true, + }) + + plugin := f(logger) + serve(plugin, logger) +} + +// ServeCtx is used to serve a new Nomad plugin +func ServeCtx(f PluginCtxFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.New(&log.LoggerOptions{ + Level: log.Trace, + JSONFormat: true, + }) + + plugin := f(ctx, logger) + serve(plugin, logger) +} +func serve(plugin interface{}, logger log.Logger) { + switch p := plugin.(type) { + case device.DevicePlugin: + device.Serve(p, logger) + // case drivers.DriverPlugin: + // drivers.Serve(p, logger) + default: + fmt.Println("Unsupported plugin type") + } +} diff --git a/plugin_interface/shared/hclspec/hcl_spec.pb.go b/plugin_interface/shared/hclspec/hcl_spec.pb.go new file mode 100644 index 00000000000..af1b37ffb38 --- /dev/null +++ b/plugin_interface/shared/hclspec/hcl_spec.pb.go @@ -0,0 +1,1124 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/hclspec/hcl_spec.proto + +// Spec allows exposing the specification for an HCL body, allowing for parsing and +//validation. +// +//Certain expressions within a specification may use the following functions. +//The documentation for each spec type above specifies where functions may +//be used. +// +// `abs(number)` returns the absolute (positive) value of the given number. +// `coalesce(vals...)` returns the first non-null value given. +// `concat(lists...)` concatenates together all of the given lists to produce a new list. +// `hasindex(val, idx)` returns true if the expression `val[idx]` could succeed. +// `int(number)` returns the integer portion of the given number, rounding towards zero. +// `jsondecode(str)` interprets the given string as JSON and returns the resulting data structure. +// `jsonencode(val)` returns a JSON-serialized version of the given value. +// `length(collection)` returns the number of elements in the given collection (list, set, map, object, or tuple). +// `lower(string)` returns the given string with all uppercase letters converted to lowercase. +// `max(numbers...)` returns the greatest of the given numbers. +// `min(numbers...)` returns the smallest of the given numbers. +// `reverse(string)` returns the given string with all of the characters in reverse order. +// `strlen(string)` returns the number of characters in the given string. +// `substr(string, offset, length)` returns the requested substring of the given string. +// `upper(string)` returns the given string with all lowercase letters converted to uppercase. +// +//## Type Expressions +// +//Type expressions are used to describe the expected type of an attribute, as +//an additional validation constraint. +// +//A type expression uses primitive type names and compound type constructors. +//A type constructor builds a new type based on one or more type expression +//arguments. +// +//The following type names and type constructors are supported: +// +// `any` is a wildcard that accepts a value of any type. (In HCL terms, this +// is the _dynamic pseudo-type_.) +// `string` is a Unicode string. +// `number` is an arbitrary-precision floating point number. +// `bool` is a boolean value (`true` or `false`) +// `list(element_type)` constructs a list type with the given element type +// `set(element_type)` constructs a set type with the given element type +// `map(element_type)` constructs a map type with the given element type +// `object({name1 = element_type, name2 = element_type, ...})` constructs +// an object type with the given attribute types. +// `tuple([element_type, element_type, ...])` constructs a tuple type with +// the given element types. This can be used, for example, to require an +// array with a particular number of elements, or with elements of different +// types. +// +//`null` is a valid value of any type, and not a type itself. + +package hclspec + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Spec defines the available specification types. +type Spec struct { + // Types that are valid to be assigned to Block: + // + // *Spec_Object + // *Spec_Array + // *Spec_Attr + // *Spec_BlockValue + // *Spec_BlockAttrs + // *Spec_BlockList + // *Spec_BlockSet + // *Spec_BlockMap + // *Spec_Default + // *Spec_Literal + Block isSpec_Block `protobuf_oneof:"block"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Spec) Reset() { *m = Spec{} } +func (m *Spec) String() string { return proto.CompactTextString(m) } +func (*Spec) ProtoMessage() {} +func (*Spec) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{0} +} + +func (m *Spec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Spec.Unmarshal(m, b) +} +func (m *Spec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Spec.Marshal(b, m, deterministic) +} +func (m *Spec) XXX_Merge(src proto.Message) { + xxx_messageInfo_Spec.Merge(m, src) +} +func (m *Spec) XXX_Size() int { + return xxx_messageInfo_Spec.Size(m) +} +func (m *Spec) XXX_DiscardUnknown() { + xxx_messageInfo_Spec.DiscardUnknown(m) +} + +var xxx_messageInfo_Spec proto.InternalMessageInfo + +type isSpec_Block interface { + isSpec_Block() +} + +type Spec_Object struct { + Object *Object `protobuf:"bytes,1,opt,name=object,proto3,oneof"` +} + +type Spec_Array struct { + Array *Array `protobuf:"bytes,2,opt,name=array,proto3,oneof"` +} + +type Spec_Attr struct { + Attr *Attr `protobuf:"bytes,3,opt,name=Attr,proto3,oneof"` +} + +type Spec_BlockValue struct { + BlockValue *Block `protobuf:"bytes,4,opt,name=block_value,json=blockValue,proto3,oneof"` +} + +type Spec_BlockAttrs struct { + BlockAttrs *BlockAttrs `protobuf:"bytes,5,opt,name=block_attrs,json=blockAttrs,proto3,oneof"` +} + +type Spec_BlockList struct { + BlockList *BlockList `protobuf:"bytes,6,opt,name=block_list,json=blockList,proto3,oneof"` +} + +type Spec_BlockSet struct { + BlockSet *BlockSet `protobuf:"bytes,7,opt,name=block_set,json=blockSet,proto3,oneof"` +} + +type Spec_BlockMap struct { + BlockMap *BlockMap `protobuf:"bytes,8,opt,name=block_map,json=blockMap,proto3,oneof"` +} + +type Spec_Default struct { + Default *Default `protobuf:"bytes,9,opt,name=default,proto3,oneof"` +} + +type Spec_Literal struct { + Literal *Literal `protobuf:"bytes,10,opt,name=literal,proto3,oneof"` +} + +func (*Spec_Object) isSpec_Block() {} + +func (*Spec_Array) isSpec_Block() {} + +func (*Spec_Attr) isSpec_Block() {} + +func (*Spec_BlockValue) isSpec_Block() {} + +func (*Spec_BlockAttrs) isSpec_Block() {} + +func (*Spec_BlockList) isSpec_Block() {} + +func (*Spec_BlockSet) isSpec_Block() {} + +func (*Spec_BlockMap) isSpec_Block() {} + +func (*Spec_Default) isSpec_Block() {} + +func (*Spec_Literal) isSpec_Block() {} + +func (m *Spec) GetBlock() isSpec_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Spec) GetObject() *Object { + if x, ok := m.GetBlock().(*Spec_Object); ok { + return x.Object + } + return nil +} + +func (m *Spec) GetArray() *Array { + if x, ok := m.GetBlock().(*Spec_Array); ok { + return x.Array + } + return nil +} + +func (m *Spec) GetAttr() *Attr { + if x, ok := m.GetBlock().(*Spec_Attr); ok { + return x.Attr + } + return nil +} + +func (m *Spec) GetBlockValue() *Block { + if x, ok := m.GetBlock().(*Spec_BlockValue); ok { + return x.BlockValue + } + return nil +} + +func (m *Spec) GetBlockAttrs() *BlockAttrs { + if x, ok := m.GetBlock().(*Spec_BlockAttrs); ok { + return x.BlockAttrs + } + return nil +} + +func (m *Spec) GetBlockList() *BlockList { + if x, ok := m.GetBlock().(*Spec_BlockList); ok { + return x.BlockList + } + return nil +} + +func (m *Spec) GetBlockSet() *BlockSet { + if x, ok := m.GetBlock().(*Spec_BlockSet); ok { + return x.BlockSet + } + return nil +} + +func (m *Spec) GetBlockMap() *BlockMap { + if x, ok := m.GetBlock().(*Spec_BlockMap); ok { + return x.BlockMap + } + return nil +} + +func (m *Spec) GetDefault() *Default { + if x, ok := m.GetBlock().(*Spec_Default); ok { + return x.Default + } + return nil +} + +func (m *Spec) GetLiteral() *Literal { + if x, ok := m.GetBlock().(*Spec_Literal); ok { + return x.Literal + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Spec) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Spec_Object)(nil), + (*Spec_Array)(nil), + (*Spec_Attr)(nil), + (*Spec_BlockValue)(nil), + (*Spec_BlockAttrs)(nil), + (*Spec_BlockList)(nil), + (*Spec_BlockSet)(nil), + (*Spec_BlockMap)(nil), + (*Spec_Default)(nil), + (*Spec_Literal)(nil), + } +} + +// Attr spec type reads the value of an attribute in the current body +// and returns that value as its result. It also creates validation constraints +// for the given attribute name and its value. +// +// ```hcl +// Attr { +// name = "document_root" +// type = string +// required = true +// } +// ``` +// +// `Attr` spec blocks accept the following arguments: +// +// `name` (required) - The attribute name to expect within the HCL input file. +// This may be omitted when a default name selector is created by a parent +// `Object` spec, if the input attribute name should match the output JSON +// object property name. +// +// `type` (optional) - A [type expression](#type-expressions) that the given +// attribute value must conform to. If this argument is set, `hcldec` will +// automatically convert the given input value to this type or produce an +// error if that is not possible. +// +// `required` (optional) - If set to `true`, `hcldec` will produce an error +// if a value is not provided for the source attribute. +// +// `Attr` is a leaf spec type, so no nested spec blocks are permitted. +type Attr struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attr) Reset() { *m = Attr{} } +func (m *Attr) String() string { return proto.CompactTextString(m) } +func (*Attr) ProtoMessage() {} +func (*Attr) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{1} +} + +func (m *Attr) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Attr.Unmarshal(m, b) +} +func (m *Attr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Attr.Marshal(b, m, deterministic) +} +func (m *Attr) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attr.Merge(m, src) +} +func (m *Attr) XXX_Size() int { + return xxx_messageInfo_Attr.Size(m) +} +func (m *Attr) XXX_DiscardUnknown() { + xxx_messageInfo_Attr.DiscardUnknown(m) +} + +var xxx_messageInfo_Attr proto.InternalMessageInfo + +func (m *Attr) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Attr) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Attr) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +// Block spec type applies one nested spec block to the contents of a +// block within the current body and returns the result of that spec. It also +// creates validation constraints for the given block type name. +// +// ```hcl +// Block { +// name = "logging" +// +// Object { +// Attr "level" { +// type = string +// } +// Attr "file" { +// type = string +// } +// } +// } +// ``` +// +// `Block` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `Object` spec, if the input block type name should match the +// output JSON object property name. +// +// `required` (optional) - If set to `true`, `hcldec` will produce an error +// if a block of the specified type is not present in the current body. +// +// `Block` creates a validation constraint that there must be zero or one blocks +// of the given type name, or exactly one if `required` is set. +// +// `Block` expects a single nested spec block, which is applied to the body of +// the block of the given type when it is present. +type Block struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` + Nested *Spec `protobuf:"bytes,3,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{2} +} + +func (m *Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Block.Unmarshal(m, b) +} +func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Block.Marshal(b, m, deterministic) +} +func (m *Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Block.Merge(m, src) +} +func (m *Block) XXX_Size() int { + return xxx_messageInfo_Block.Size(m) +} +func (m *Block) XXX_DiscardUnknown() { + xxx_messageInfo_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Block proto.InternalMessageInfo + +func (m *Block) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Block) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Block) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// The BlockAttrs spec type is similar to an Attr spec block of a map type, +// but it produces a map from the attributes of a block rather than from an +// attribute's expression. +// +// ```hcl +// BlockAttrs { +// name = "variables" +// type = string +// required = false +// } +// ``` +// +// This allows a map with user-defined keys to be produced within block syntax, +// but due to the constraints of that syntax it also means that the user will +// be unable to dynamically-generate either individual key names using key +// expressions or the entire map value using a `for` expression. +// +// `BlockAttrs` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `object` spec, if the input block type name should match the +// output JSON object property name. +// +// `type` (required) - The value type to require for each of the +// attributes within a matched block. The resulting value will be a JSON +// object whose property values are of this type. +// +// `required` (optional) - If `true`, an error will be produced if a block +// of the given type is not present. If `false` -- the default -- an absent +// block will be indicated by producing `null`. +type BlockAttrs struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockAttrs) Reset() { *m = BlockAttrs{} } +func (m *BlockAttrs) String() string { return proto.CompactTextString(m) } +func (*BlockAttrs) ProtoMessage() {} +func (*BlockAttrs) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{3} +} + +func (m *BlockAttrs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockAttrs.Unmarshal(m, b) +} +func (m *BlockAttrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockAttrs.Marshal(b, m, deterministic) +} +func (m *BlockAttrs) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockAttrs.Merge(m, src) +} +func (m *BlockAttrs) XXX_Size() int { + return xxx_messageInfo_BlockAttrs.Size(m) +} +func (m *BlockAttrs) XXX_DiscardUnknown() { + xxx_messageInfo_BlockAttrs.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockAttrs proto.InternalMessageInfo + +func (m *BlockAttrs) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockAttrs) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BlockAttrs) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +// BlockList spec type is similar to `Block`, but it accepts zero or +// more blocks of a specified type rather than requiring zero or one. The +// result is a JSON array with one entry per block of the given type. +// +// ```hcl +// BlockList { +// name = "log_file" +// +// Object { +// Attr "level" { +// type = string +// } +// Attr "filename" { +// type = string +// required = true +// } +// } +// } +// ``` +// +// `BlockList` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `Object` spec, if the input block type name should match the +// output JSON object property name. +// +// `min_items` (optional) - If set to a number greater than zero, `hcldec` will +// produce an error if fewer than the given number of blocks are present. +// +// `max_items` (optional) - If set to a number greater than zero, `hcldec` will +// produce an error if more than the given number of blocks are present. This +// attribute must be greater than or equal to `min_items` if both are set. +// +// `Block` creates a validation constraint on the number of blocks of the given +// type that must be present. +// +// `Block` expects a single nested spec block, which is applied to the body of +// each matching block to produce the resulting list items. +type BlockList struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems uint64 `protobuf:"varint,3,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + Nested *Spec `protobuf:"bytes,4,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockList) Reset() { *m = BlockList{} } +func (m *BlockList) String() string { return proto.CompactTextString(m) } +func (*BlockList) ProtoMessage() {} +func (*BlockList) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{4} +} + +func (m *BlockList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockList.Unmarshal(m, b) +} +func (m *BlockList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockList.Marshal(b, m, deterministic) +} +func (m *BlockList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockList.Merge(m, src) +} +func (m *BlockList) XXX_Size() int { + return xxx_messageInfo_BlockList.Size(m) +} +func (m *BlockList) XXX_DiscardUnknown() { + xxx_messageInfo_BlockList.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockList proto.InternalMessageInfo + +func (m *BlockList) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockList) GetMinItems() uint64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *BlockList) GetMaxItems() uint64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *BlockList) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// BlockSet spec type behaves the same as BlockList except that +// the result is in no specific order and any duplicate items are removed. +// +// ```hcl +// BlockSet { +// name = "log_file" +// +// Object { +// Attr "level" { +// type = string +// } +// Attr "filename" { +// type = string +// required = true +// } +// } +// } +// ``` +// +// The contents of `BlockSet` are the same as for `BlockList`. +type BlockSet struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems uint64 `protobuf:"varint,3,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + Nested *Spec `protobuf:"bytes,4,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockSet) Reset() { *m = BlockSet{} } +func (m *BlockSet) String() string { return proto.CompactTextString(m) } +func (*BlockSet) ProtoMessage() {} +func (*BlockSet) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{5} +} + +func (m *BlockSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockSet.Unmarshal(m, b) +} +func (m *BlockSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockSet.Marshal(b, m, deterministic) +} +func (m *BlockSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockSet.Merge(m, src) +} +func (m *BlockSet) XXX_Size() int { + return xxx_messageInfo_BlockSet.Size(m) +} +func (m *BlockSet) XXX_DiscardUnknown() { + xxx_messageInfo_BlockSet.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockSet proto.InternalMessageInfo + +func (m *BlockSet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockSet) GetMinItems() uint64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *BlockSet) GetMaxItems() uint64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *BlockSet) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// BlockMap spec type is similar to `Block`, but it accepts zero or +// more blocks of a specified type rather than requiring zero or one. The +// result is a JSON object, or possibly multiple nested JSON objects, whose +// properties are derived from the labels set on each matching block. +// +// ```hcl +// BlockMap { +// name = "log_file" +// labels = ["filename"] +// +// Object { +// Attr "level" { +// type = string +// required = true +// } +// } +// } +// ``` +// +// `BlockMap` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `Object` spec, if the input block type name should match the +// output JSON object property name. +// +// `labels` (required) - A list of user-oriented block label names. Each entry +// in this list creates one level of object within the output value, and +// requires one additional block header label on any child block of this type. +// Block header labels are the quoted strings that appear after the block type +// name but before the opening `{`. +// +// `Block` creates a validation constraint on the number of labels that blocks +// of the given type must have. +// +// `Block` expects a single nested spec block, which is applied to the body of +// each matching block to produce the resulting map items. +type BlockMap struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + Nested *Spec `protobuf:"bytes,3,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockMap) Reset() { *m = BlockMap{} } +func (m *BlockMap) String() string { return proto.CompactTextString(m) } +func (*BlockMap) ProtoMessage() {} +func (*BlockMap) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{6} +} + +func (m *BlockMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockMap.Unmarshal(m, b) +} +func (m *BlockMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockMap.Marshal(b, m, deterministic) +} +func (m *BlockMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMap.Merge(m, src) +} +func (m *BlockMap) XXX_Size() int { + return xxx_messageInfo_BlockMap.Size(m) +} +func (m *BlockMap) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMap.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMap proto.InternalMessageInfo + +func (m *BlockMap) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockMap) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *BlockMap) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// Literal spec type returns a given literal value, and creates no +// validation constraints. It is most commonly used with the `Default` spec +// type to create a fallback value, but can also be used e.g. to fill out +// required properties in an `Object` spec that do not correspond to any +// construct in the input configuration. +// +// ```hcl +// Literal { +// value = "hello world" +// } +// ``` +// +// `Literal` spec blocks accept the following argument: +// +// `value` (required) - The value to return. This attribute may be an expression +// that uses [functions](#spec-definition-functions). +// +// `Literal` is a leaf spec type, so no nested spec blocks are permitted. +type Literal struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Literal) Reset() { *m = Literal{} } +func (m *Literal) String() string { return proto.CompactTextString(m) } +func (*Literal) ProtoMessage() {} +func (*Literal) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{7} +} + +func (m *Literal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Literal.Unmarshal(m, b) +} +func (m *Literal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Literal.Marshal(b, m, deterministic) +} +func (m *Literal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Literal.Merge(m, src) +} +func (m *Literal) XXX_Size() int { + return xxx_messageInfo_Literal.Size(m) +} +func (m *Literal) XXX_DiscardUnknown() { + xxx_messageInfo_Literal.DiscardUnknown(m) +} + +var xxx_messageInfo_Literal proto.InternalMessageInfo + +func (m *Literal) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Default spec type evaluates a sequence of nested specs in turn and +// returns the result of the first one that produces a non-null value. +// It creates no validation constraints of its own, but passes on the validation +// constraints from its first nested block. +// +// ```hcl +// Default { +// Attr { +// name = "private" +// type = bool +// } +// Literal { +// value = false +// } +// } +// ``` +// +// A `Default` spec block must have at least one nested spec block, and should +// generally have at least two since otherwise the `Default` wrapper is a no-op. +// +// The second and any subsequent spec blocks are _fallback_ specs. These exhibit +// their usual behavior but are not able to impose validation constraints on the +// current body since they are not evaluated unless all prior specs produce +// `null` as their result. +type Default struct { + Primary *Spec `protobuf:"bytes,1,opt,name=primary,proto3" json:"primary,omitempty"` + Default *Spec `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{8} +} + +func (m *Default) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Default.Unmarshal(m, b) +} +func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Default.Marshal(b, m, deterministic) +} +func (m *Default) XXX_Merge(src proto.Message) { + xxx_messageInfo_Default.Merge(m, src) +} +func (m *Default) XXX_Size() int { + return xxx_messageInfo_Default.Size(m) +} +func (m *Default) XXX_DiscardUnknown() { + xxx_messageInfo_Default.DiscardUnknown(m) +} + +var xxx_messageInfo_Default proto.InternalMessageInfo + +func (m *Default) GetPrimary() *Spec { + if m != nil { + return m.Primary + } + return nil +} + +func (m *Default) GetDefault() *Spec { + if m != nil { + return m.Default + } + return nil +} + +// Object spec type is the most commonly used at the root of a spec file. +// Its result is a JSON object whose properties are set based on any nested +// spec blocks: +// +// ```hcl +// Object { +// Attr "name" { +// type = "string" +// } +// Block "address" { +// Object { +// Attr "street" { +// type = "string" +// } +// # ... +// } +// } +// } +// ``` +// +// Nested spec blocks inside `Object` must always have an extra block label +// `"name"`, `"address"` and `"street"` in the above example) that specifies +// the name of the property that should be created in the JSON object result. +// This label also acts as a default name selector for the nested spec, allowing +// the `Attr` blocks in the above example to omit the usually-required `name` +// argument in cases where the HCL input name and JSON output name are the same. +// +// An `Object` spec block creates no validation constraints, but it passes on +// any validation constraints created by the nested specs. +type Object struct { + Attributes map[string]*Spec `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object) Reset() { *m = Object{} } +func (m *Object) String() string { return proto.CompactTextString(m) } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{9} +} + +func (m *Object) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Object.Unmarshal(m, b) +} +func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Object.Marshal(b, m, deterministic) +} +func (m *Object) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object.Merge(m, src) +} +func (m *Object) XXX_Size() int { + return xxx_messageInfo_Object.Size(m) +} +func (m *Object) XXX_DiscardUnknown() { + xxx_messageInfo_Object.DiscardUnknown(m) +} + +var xxx_messageInfo_Object proto.InternalMessageInfo + +func (m *Object) GetAttributes() map[string]*Spec { + if m != nil { + return m.Attributes + } + return nil +} + +// Array spec type produces a JSON array whose elements are set based on +// any nested spec blocks: +// +// ```hcl +// Array { +// Attr { +// name = "first_element" +// type = "string" +// } +// Attr { +// name = "second_element" +// type = "string" +// } +// } +// ``` +// +// An `Array` spec block creates no validation constraints, but it passes on +// any validation constraints created by the nested specs. +type Array struct { + Values []*Spec `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Array) Reset() { *m = Array{} } +func (m *Array) String() string { return proto.CompactTextString(m) } +func (*Array) ProtoMessage() {} +func (*Array) Descriptor() ([]byte, []int) { + return fileDescriptor_28863966909039be, []int{10} +} + +func (m *Array) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Array.Unmarshal(m, b) +} +func (m *Array) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Array.Marshal(b, m, deterministic) +} +func (m *Array) XXX_Merge(src proto.Message) { + xxx_messageInfo_Array.Merge(m, src) +} +func (m *Array) XXX_Size() int { + return xxx_messageInfo_Array.Size(m) +} +func (m *Array) XXX_DiscardUnknown() { + xxx_messageInfo_Array.DiscardUnknown(m) +} + +var xxx_messageInfo_Array proto.InternalMessageInfo + +func (m *Array) GetValues() []*Spec { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Spec)(nil), "hashicorp.nomad.plugins.shared.hclspec.Spec") + proto.RegisterType((*Attr)(nil), "hashicorp.nomad.plugins.shared.hclspec.Attr") + proto.RegisterType((*Block)(nil), "hashicorp.nomad.plugins.shared.hclspec.Block") + proto.RegisterType((*BlockAttrs)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockAttrs") + proto.RegisterType((*BlockList)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockList") + proto.RegisterType((*BlockSet)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockSet") + proto.RegisterType((*BlockMap)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockMap") + proto.RegisterType((*Literal)(nil), "hashicorp.nomad.plugins.shared.hclspec.Literal") + proto.RegisterType((*Default)(nil), "hashicorp.nomad.plugins.shared.hclspec.Default") + proto.RegisterType((*Object)(nil), "hashicorp.nomad.plugins.shared.hclspec.Object") + proto.RegisterMapType((map[string]*Spec)(nil), "hashicorp.nomad.plugins.shared.hclspec.Object.AttributesEntry") + proto.RegisterType((*Array)(nil), "hashicorp.nomad.plugins.shared.hclspec.Array") +} + +func init() { + proto.RegisterFile("plugins/shared/hclspec/hcl_spec.proto", fileDescriptor_28863966909039be) +} + +var fileDescriptor_28863966909039be = []byte{ + // 624 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0xc7, 0xe3, 0xc4, 0xaf, 0xd3, 0xc3, 0xf3, 0x68, 0x85, 0x90, 0x55, 0x0e, 0x54, 0x96, 0x40, + 0x3d, 0x80, 0x0b, 0xe5, 0x82, 0x38, 0x20, 0x35, 0x6a, 0x91, 0x81, 0x46, 0xad, 0xb6, 0x82, 0x03, + 0x07, 0xa2, 0xb5, 0xb3, 0x10, 0x13, 0xbf, 0xb1, 0xbb, 0x41, 0x8d, 0x04, 0x1f, 0x84, 0x03, 0xf0, + 0xa9, 0xf8, 0x3e, 0x68, 0x5f, 0x9c, 0x14, 0x94, 0x43, 0x1c, 0x7a, 0xe0, 0x94, 0x19, 0x8f, 0xfe, + 0x3f, 0xcf, 0xec, 0xce, 0x78, 0x02, 0x77, 0x9a, 0x62, 0xfe, 0x3e, 0xaf, 0xf8, 0x01, 0x9f, 0x12, + 0x46, 0x27, 0x07, 0xd3, 0xac, 0xe0, 0x0d, 0xcd, 0xe4, 0xef, 0x58, 0x1a, 0x71, 0xc3, 0x6a, 0x51, + 0xa3, 0xbb, 0x53, 0xc2, 0xa7, 0x79, 0x56, 0xb3, 0x26, 0xae, 0xea, 0x92, 0x4c, 0x62, 0x23, 0x8b, + 0xb5, 0x2c, 0x36, 0xb2, 0xe8, 0x9b, 0x0b, 0xf6, 0x45, 0x43, 0x33, 0x94, 0x80, 0x5b, 0xa7, 0x1f, + 0x68, 0x26, 0x42, 0x6b, 0xcf, 0xda, 0xdf, 0x39, 0x8c, 0xe3, 0xcd, 0x08, 0xf1, 0x99, 0x52, 0x25, + 0x3d, 0x6c, 0xf4, 0xe8, 0x04, 0x1c, 0xc2, 0x18, 0x59, 0x84, 0x7d, 0x05, 0xba, 0xbf, 0x29, 0xe8, + 0x48, 0x8a, 0x92, 0x1e, 0xd6, 0x6a, 0x34, 0x04, 0xfb, 0x48, 0x08, 0x16, 0x0e, 0x14, 0xe5, 0xde, + 0xc6, 0x14, 0x21, 0x58, 0xd2, 0xc3, 0x4a, 0x8b, 0xce, 0x61, 0x27, 0x2d, 0xea, 0x6c, 0x36, 0xfe, + 0x44, 0x8a, 0x39, 0x0d, 0xed, 0x6e, 0x09, 0x0d, 0xa5, 0x34, 0xe9, 0x61, 0x50, 0x8c, 0xd7, 0x12, + 0x81, 0x5e, 0xb5, 0x44, 0x22, 0x04, 0xe3, 0xa1, 0xa3, 0x88, 0x87, 0x9d, 0x88, 0x32, 0x33, 0xbe, + 0xc4, 0x2a, 0x0f, 0x61, 0xd0, 0xde, 0xb8, 0xc8, 0xb9, 0x08, 0x5d, 0x45, 0x7d, 0xd8, 0x89, 0x7a, + 0x9a, 0x73, 0x79, 0x09, 0x41, 0xda, 0x3a, 0xe8, 0x0c, 0xb4, 0x33, 0xe6, 0x54, 0x84, 0x9e, 0x42, + 0x3e, 0xe8, 0x84, 0xbc, 0xa0, 0x92, 0xe8, 0xa7, 0xc6, 0x5e, 0x01, 0x4b, 0xd2, 0x84, 0xfe, 0x16, + 0xc0, 0x11, 0x69, 0x96, 0xc0, 0x11, 0x69, 0xd0, 0x4b, 0xf0, 0x26, 0xf4, 0x1d, 0x99, 0x17, 0x22, + 0x0c, 0x14, 0xee, 0x60, 0x53, 0xdc, 0xb1, 0x96, 0x25, 0x3d, 0xdc, 0x12, 0x24, 0xac, 0xc8, 0x05, + 0x65, 0xa4, 0x08, 0xa1, 0x1b, 0xec, 0x54, 0xcb, 0x24, 0xcc, 0x10, 0x86, 0x1e, 0x38, 0x2a, 0xcb, + 0xe8, 0x85, 0xee, 0x42, 0x84, 0xc0, 0xae, 0x48, 0x49, 0xd5, 0x70, 0x04, 0x58, 0xd9, 0xf2, 0x99, + 0x58, 0x34, 0x54, 0xf5, 0x79, 0x80, 0x95, 0x8d, 0x76, 0xc1, 0x67, 0xf4, 0xe3, 0x3c, 0x67, 0x74, + 0xa2, 0x3a, 0xd7, 0xc7, 0x4b, 0x3f, 0xfa, 0x02, 0x8e, 0x3a, 0x86, 0xb5, 0xb0, 0xab, 0xc2, 0xfe, + 0xef, 0x42, 0x74, 0x0c, 0x6e, 0x45, 0xb9, 0x30, 0xc8, 0x0e, 0xc3, 0x20, 0x27, 0x1b, 0x1b, 0x6d, + 0x74, 0x0e, 0xb0, 0xea, 0xbf, 0x6b, 0x29, 0xe8, 0x87, 0x05, 0xc1, 0xb2, 0xf9, 0xd6, 0x12, 0x6f, + 0x41, 0x50, 0xe6, 0xd5, 0x38, 0x17, 0xb4, 0xe4, 0x0a, 0x6b, 0x63, 0xbf, 0xcc, 0xab, 0xe7, 0xd2, + 0x57, 0x41, 0x72, 0x69, 0x82, 0x03, 0x13, 0x24, 0x97, 0x3a, 0xb8, 0xaa, 0xd9, 0xfe, 0x8b, 0x9a, + 0xbf, 0x5b, 0xe0, 0xb7, 0xbd, 0xfc, 0x4f, 0x26, 0xf8, 0xd9, 0xe4, 0x27, 0xc7, 0x61, 0x5d, 0x7e, + 0x37, 0xc1, 0x2d, 0x48, 0x4a, 0x0b, 0x99, 0xdc, 0x60, 0x3f, 0xc0, 0xc6, 0xbb, 0xa6, 0x96, 0xb8, + 0x0d, 0x9e, 0x69, 0x7e, 0x74, 0x03, 0x1c, 0xfd, 0x91, 0xd4, 0x6f, 0xd7, 0x4e, 0xf4, 0xd5, 0x02, + 0xcf, 0xcc, 0x1a, 0x7a, 0x06, 0x5e, 0xc3, 0xf2, 0x92, 0xb0, 0x85, 0x59, 0x11, 0xdd, 0xde, 0xd9, + 0x8a, 0x25, 0xa7, 0x9d, 0xfa, 0xfe, 0x36, 0x1c, 0x23, 0x8e, 0x7e, 0x5a, 0xe0, 0xea, 0xe5, 0x83, + 0xde, 0x02, 0xc8, 0xef, 0x71, 0x9e, 0xce, 0x05, 0xe5, 0xa1, 0xb5, 0x37, 0xd8, 0xdf, 0x39, 0x7c, + 0xda, 0x6d, 0x81, 0xa9, 0xc5, 0xa1, 0x01, 0x27, 0x95, 0x60, 0x0b, 0x7c, 0x85, 0xb8, 0x3b, 0x83, + 0xff, 0xfe, 0x08, 0xa3, 0xff, 0x61, 0x30, 0xa3, 0x0b, 0x73, 0x5a, 0xd2, 0x44, 0xc3, 0xf6, 0x04, + 0xb7, 0xa9, 0x4a, 0x4b, 0x9f, 0xf4, 0x1f, 0x5b, 0xd1, 0x08, 0x1c, 0xb5, 0x0a, 0xe5, 0x1d, 0xab, + 0xa7, 0x6d, 0x45, 0x1d, 0xef, 0x58, 0x6b, 0x87, 0xc1, 0x1b, 0xcf, 0x3c, 0x4f, 0x5d, 0xf5, 0xdf, + 0xe0, 0xd1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x32, 0x20, 0x9f, 0xf2, 0x44, 0x08, 0x00, 0x00, +} diff --git a/plugin_interface/shared/hclspec/hcl_spec.proto b/plugin_interface/shared/hclspec/hcl_spec.proto new file mode 100644 index 00000000000..ddcc74fb326 --- /dev/null +++ b/plugin_interface/shared/hclspec/hcl_spec.proto @@ -0,0 +1,425 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "hclspec"; + +/* Spec allows exposing the specification for an HCL body, allowing for parsing and +validation. + +Certain expressions within a specification may use the following functions. +The documentation for each spec type above specifies where functions may +be used. + +* `abs(number)` returns the absolute (positive) value of the given number. +* `coalesce(vals...)` returns the first non-null value given. +* `concat(lists...)` concatenates together all of the given lists to produce a new list. +* `hasindex(val, idx)` returns true if the expression `val[idx]` could succeed. +* `int(number)` returns the integer portion of the given number, rounding towards zero. +* `jsondecode(str)` interprets the given string as JSON and returns the resulting data structure. +* `jsonencode(val)` returns a JSON-serialized version of the given value. +* `length(collection)` returns the number of elements in the given collection (list, set, map, object, or tuple). +* `lower(string)` returns the given string with all uppercase letters converted to lowercase. +* `max(numbers...)` returns the greatest of the given numbers. +* `min(numbers...)` returns the smallest of the given numbers. +* `reverse(string)` returns the given string with all of the characters in reverse order. +* `strlen(string)` returns the number of characters in the given string. +* `substr(string, offset, length)` returns the requested substring of the given string. +* `upper(string)` returns the given string with all lowercase letters converted to uppercase. + +## Type Expressions + +Type expressions are used to describe the expected type of an attribute, as +an additional validation constraint. + +A type expression uses primitive type names and compound type constructors. +A type constructor builds a new type based on one or more type expression +arguments. + +The following type names and type constructors are supported: + +* `any` is a wildcard that accepts a value of any type. (In HCL terms, this + is the _dynamic pseudo-type_.) +* `string` is a Unicode string. +* `number` is an arbitrary-precision floating point number. +* `bool` is a boolean value (`true` or `false`) +* `list(element_type)` constructs a list type with the given element type +* `set(element_type)` constructs a set type with the given element type +* `map(element_type)` constructs a map type with the given element type +* `object({name1 = element_type, name2 = element_type, ...})` constructs + an object type with the given attribute types. +* `tuple([element_type, element_type, ...])` constructs a tuple type with + the given element types. This can be used, for example, to require an + array with a particular number of elements, or with elements of different + types. + +`null` is a valid value of any type, and not a type itself. +*/ +package hashicorp.nomad.plugins.shared.hclspec; + +// Spec defines the available specification types. +message Spec { + oneof block { + Object object = 1; + Array array = 2; + // buf:lint:ignore FIELD_LOWER_SNAKE_CASE + Attr Attr = 3; + Block block_value = 4; + BlockAttrs block_attrs = 5; + BlockList block_list = 6; + BlockSet block_set = 7; + BlockMap block_map = 8; + Default default = 9; + Literal literal = 10; + } +} + +/* Attr spec type reads the value of an attribute in the current body +and returns that value as its result. It also creates validation constraints +for the given attribute name and its value. + +```hcl +Attr { + name = "document_root" + type = string + required = true +} +``` + +`Attr` spec blocks accept the following arguments: + +* `name` (required) - The attribute name to expect within the HCL input file. + This may be omitted when a default name selector is created by a parent + `Object` spec, if the input attribute name should match the output JSON + object property name. + +* `type` (optional) - A [type expression](#type-expressions) that the given + attribute value must conform to. If this argument is set, `hcldec` will + automatically convert the given input value to this type or produce an + error if that is not possible. + +* `required` (optional) - If set to `true`, `hcldec` will produce an error + if a value is not provided for the source attribute. + +`Attr` is a leaf spec type, so no nested spec blocks are permitted. +*/ +message Attr { + string name = 1; + string type = 2; + bool required = 3; +} + +/* Block spec type applies one nested spec block to the contents of a +block within the current body and returns the result of that spec. It also +creates validation constraints for the given block type name. + +```hcl +Block { + name = "logging" + + Object { + Attr "level" { + type = string + } + Attr "file" { + type = string + } + } +} +``` + +`Block` spec blocks accept the following arguments: + +* `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `Object` spec, if the input block type name should match the + output JSON object property name. + +* `required` (optional) - If set to `true`, `hcldec` will produce an error + if a block of the specified type is not present in the current body. + +`Block` creates a validation constraint that there must be zero or one blocks +of the given type name, or exactly one if `required` is set. + +`Block` expects a single nested spec block, which is applied to the body of +the block of the given type when it is present. + +*/ +message Block { + string name = 1; + bool required = 2; + Spec nested = 3; +} + +/* + The BlockAttrs spec type is similar to an Attr spec block of a map type, + but it produces a map from the attributes of a block rather than from an + attribute's expression. + + ```hcl + BlockAttrs { + name = "variables" + type = string + required = false + } + ``` + + This allows a map with user-defined keys to be produced within block syntax, + but due to the constraints of that syntax it also means that the user will + be unable to dynamically-generate either individual key names using key + expressions or the entire map value using a `for` expression. + + `BlockAttrs` spec blocks accept the following arguments: + + * `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `object` spec, if the input block type name should match the + output JSON object property name. + + * `type` (required) - The value type to require for each of the + attributes within a matched block. The resulting value will be a JSON + object whose property values are of this type. + + * `required` (optional) - If `true`, an error will be produced if a block + of the given type is not present. If `false` -- the default -- an absent + block will be indicated by producing `null`. +*/ +message BlockAttrs { + string name = 1; + string type = 2; + bool required = 3; +} + +/* BlockList spec type is similar to `Block`, but it accepts zero or +more blocks of a specified type rather than requiring zero or one. The +result is a JSON array with one entry per block of the given type. + +```hcl +BlockList { + name = "log_file" + + Object { + Attr "level" { + type = string + } + Attr "filename" { + type = string + required = true + } + } +} +``` + +`BlockList` spec blocks accept the following arguments: + +* `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `Object` spec, if the input block type name should match the + output JSON object property name. + +* `min_items` (optional) - If set to a number greater than zero, `hcldec` will + produce an error if fewer than the given number of blocks are present. + +* `max_items` (optional) - If set to a number greater than zero, `hcldec` will + produce an error if more than the given number of blocks are present. This + attribute must be greater than or equal to `min_items` if both are set. + +`Block` creates a validation constraint on the number of blocks of the given +type that must be present. + +`Block` expects a single nested spec block, which is applied to the body of +each matching block to produce the resulting list items. + +*/ +message BlockList { + string name = 1; + uint64 min_items = 2; + uint64 max_items = 3; + Spec nested = 4; +} + +/* BlockSet spec type behaves the same as BlockList except that +the result is in no specific order and any duplicate items are removed. + +```hcl +BlockSet { + name = "log_file" + + Object { + Attr "level" { + type = string + } + Attr "filename" { + type = string + required = true + } + } +} +``` + +The contents of `BlockSet` are the same as for `BlockList`. + +*/ +message BlockSet { + string name = 1; + uint64 min_items = 2; + uint64 max_items = 3; + Spec nested = 4; +} + +/* BlockMap spec type is similar to `Block`, but it accepts zero or +more blocks of a specified type rather than requiring zero or one. The +result is a JSON object, or possibly multiple nested JSON objects, whose +properties are derived from the labels set on each matching block. + +```hcl +BlockMap { + name = "log_file" + labels = ["filename"] + + Object { + Attr "level" { + type = string + required = true + } + } +} +``` + +`BlockMap` spec blocks accept the following arguments: + +* `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `Object` spec, if the input block type name should match the + output JSON object property name. + +* `labels` (required) - A list of user-oriented block label names. Each entry + in this list creates one level of object within the output value, and + requires one additional block header label on any child block of this type. + Block header labels are the quoted strings that appear after the block type + name but before the opening `{`. + +`Block` creates a validation constraint on the number of labels that blocks +of the given type must have. + +`Block` expects a single nested spec block, which is applied to the body of +each matching block to produce the resulting map items. + +*/ +message BlockMap { + string name = 1; + repeated string labels = 2; + Spec nested = 3; +} + +/* Literal spec type returns a given literal value, and creates no +validation constraints. It is most commonly used with the `Default` spec +type to create a fallback value, but can also be used e.g. to fill out +required properties in an `Object` spec that do not correspond to any +construct in the input configuration. + +```hcl +Literal { + value = "hello world" +} +``` + +`Literal` spec blocks accept the following argument: + +* `value` (required) - The value to return. This attribute may be an expression + that uses [functions](#spec-definition-functions). + +`Literal` is a leaf spec type, so no nested spec blocks are permitted. +*/ +message Literal { + string value = 1; +} + +/* Default spec type evaluates a sequence of nested specs in turn and +returns the result of the first one that produces a non-null value. +It creates no validation constraints of its own, but passes on the validation +constraints from its first nested block. + +```hcl +Default { + Attr { + name = "private" + type = bool + } + Literal { + value = false + } +} +``` + +A `Default` spec block must have at least one nested spec block, and should +generally have at least two since otherwise the `Default` wrapper is a no-op. + +The second and any subsequent spec blocks are _fallback_ specs. These exhibit +their usual behavior but are not able to impose validation constraints on the +current body since they are not evaluated unless all prior specs produce +`null` as their result. + +*/ +message Default { + Spec primary = 1; + Spec default = 2; +} + +/* Object spec type is the most commonly used at the root of a spec file. +Its result is a JSON object whose properties are set based on any nested +spec blocks: + +```hcl +Object { + Attr "name" { + type = "string" + } + Block "address" { + Object { + Attr "street" { + type = "string" + } + # ... + } + } +} +``` + +Nested spec blocks inside `Object` must always have an extra block label +`"name"`, `"address"` and `"street"` in the above example) that specifies +the name of the property that should be created in the JSON object result. +This label also acts as a default name selector for the nested spec, allowing +the `Attr` blocks in the above example to omit the usually-required `name` +argument in cases where the HCL input name and JSON output name are the same. + +An `Object` spec block creates no validation constraints, but it passes on +any validation constraints created by the nested specs. +*/ +message Object { + map attributes = 1; +} + +/* Array spec type produces a JSON array whose elements are set based on +any nested spec blocks: + +```hcl +Array { + Attr { + name = "first_element" + type = "string" + } + Attr { + name = "second_element" + type = "string" + } +} +``` + +An `Array` spec block creates no validation constraints, but it passes on +any validation constraints created by the nested specs. +*/ +message Array { + repeated Spec values = 1; +} diff --git a/plugin_interface/shared/hclspec/spec.go b/plugin_interface/shared/hclspec/spec.go new file mode 100644 index 00000000000..5432d83747f --- /dev/null +++ b/plugin_interface/shared/hclspec/spec.go @@ -0,0 +1,191 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package hclspec + +// ObjectSpec wraps the object and returns a spec. +func ObjectSpec(obj *Object) *Spec { + return &Spec{ + Block: &Spec_Object{ + Object: obj, + }, + } +} + +// ArraySpec wraps the array and returns a spec. +func ArraySpec(array *Array) *Spec { + return &Spec{ + Block: &Spec_Array{ + Array: array, + }, + } +} + +// AttrSpec wraps the attr and returns a spec. +func AttrSpec(attr *Attr) *Spec { + return &Spec{ + Block: &Spec_Attr{ + Attr: attr, + }, + } +} + +// BlockSpec wraps the block and returns a spec. +func BlockSpec(block *Block) *Spec { + return &Spec{ + Block: &Spec_BlockValue{ + BlockValue: block, + }, + } +} + +// BlockAttrsSpec wraps the block attrs and returns a spec. +func BlockAttrsSpec(blockAttrs *BlockAttrs) *Spec { + return &Spec{ + Block: &Spec_BlockAttrs{ + BlockAttrs: blockAttrs, + }, + } +} + +// BlockListSpec wraps the block list and returns a spec. +func BlockListSpec(blockList *BlockList) *Spec { + return &Spec{ + Block: &Spec_BlockList{ + BlockList: blockList, + }, + } +} + +// BlockSetSpec wraps the block set and returns a spec. +func BlockSetSpec(blockSet *BlockSet) *Spec { + return &Spec{ + Block: &Spec_BlockSet{ + BlockSet: blockSet, + }, + } +} + +// BlockMapSpec wraps the block map and returns a spec. +func BlockMapSpec(blockMap *BlockMap) *Spec { + return &Spec{ + Block: &Spec_BlockMap{ + BlockMap: blockMap, + }, + } +} + +// DefaultSpec wraps the default and returns a spec. +func DefaultSpec(d *Default) *Spec { + return &Spec{ + Block: &Spec_Default{ + Default: d, + }, + } +} + +// LiteralSpec wraps the literal and returns a spec. +func LiteralSpec(l *Literal) *Spec { + return &Spec{ + Block: &Spec_Literal{ + Literal: l, + }, + } +} + +// NewObject returns a new object spec. +func NewObject(attrs map[string]*Spec) *Spec { + return ObjectSpec(&Object{ + Attributes: attrs, + }) +} + +// NewAttr returns a new attribute spec. +func NewAttr(name, attrType string, required bool) *Spec { + return AttrSpec(&Attr{ + Name: name, + Type: attrType, + Required: required, + }) +} + +// NewBlock returns a new block spec. +func NewBlock(name string, required bool, nested *Spec) *Spec { + return BlockSpec(&Block{ + Name: name, + Required: required, + Nested: nested, + }) +} + +// NewBlockAttrs returns a new block attrs spec +func NewBlockAttrs(name, elementType string, required bool) *Spec { + return BlockAttrsSpec(&BlockAttrs{ + Name: name, + Required: required, + Type: elementType, + }) +} + +// NewBlockList returns a new block list spec that has no limits. +func NewBlockList(name string, nested *Spec) *Spec { + return NewBlockListLimited(name, 0, 0, nested) +} + +// NewBlockListLimited returns a new block list spec that limits the number of +// blocks. +func NewBlockListLimited(name string, min, max uint64, nested *Spec) *Spec { + return BlockListSpec(&BlockList{ + Name: name, + MinItems: min, + MaxItems: max, + Nested: nested, + }) +} + +// NewBlockSet returns a new block set spec that has no limits. +func NewBlockSet(name string, nested *Spec) *Spec { + return NewBlockSetLimited(name, 0, 0, nested) +} + +// NewBlockSetLimited returns a new block set spec that limits the number of +// blocks. +func NewBlockSetLimited(name string, min, max uint64, nested *Spec) *Spec { + return BlockSetSpec(&BlockSet{ + Name: name, + MinItems: min, + MaxItems: max, + Nested: nested, + }) +} + +// NewBlockMap returns a new block map spec. +func NewBlockMap(name string, labels []string, nested *Spec) *Spec { + return BlockMapSpec(&BlockMap{ + Name: name, + Labels: labels, + Nested: nested, + }) +} + +// NewLiteral returns a new literal spec. +func NewLiteral(value string) *Spec { + return LiteralSpec(&Literal{ + Value: value, + }) +} + +// NewDefault returns a new default spec. +func NewDefault(primary, defaultValue *Spec) *Spec { + return DefaultSpec(&Default{ + Primary: primary, + Default: defaultValue, + }) +} + +// NewArray returns a new array spec. +func NewArray(values []*Spec) *Spec { + return ArraySpec(&Array{ + Values: values, + }) +} diff --git a/plugin_interface/shared/structs/attribute.go b/plugin_interface/shared/structs/attribute.go new file mode 100644 index 00000000000..d5e03d755a1 --- /dev/null +++ b/plugin_interface/shared/structs/attribute.go @@ -0,0 +1,460 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package structs + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "unicode" + + "github.com/hashicorp/nomad/plugin-interface/helper" +) + +const ( + // floatPrecision is the precision used before rounding. It is set to a high + // number to give a high chance of correctly returning equality. + floatPrecision = uint(256) +) + +// BaseUnit is a unique base unit. All units that share the same base unit +// should be comparable. +type BaseUnit uint16 + +const ( + UnitScalar BaseUnit = iota + UnitByte + UnitByteRate + UnitHertz + UnitWatt +) + +// Unit describes a unit and its multiplier over the base unit type +type Unit struct { + // Name is the name of the unit (GiB, MB/s) + Name string + + // Base is the base unit for the unit + Base BaseUnit + + // Multiplier is the multiplier over the base unit (KiB multiplier is 1024) + Multiplier int64 + + // InverseMultiplier specifies that the multiplier is an inverse so: + // Base / Multiplier. For example a mW is a W/1000. + InverseMultiplier bool +} + +// Comparable returns if two units are comparable +func (u *Unit) Comparable(o *Unit) bool { + if u == nil || o == nil { + return false + } + + return u.Base == o.Base +} + +// ParseAttribute takes a string and parses it into an attribute, pulling out +// units if they are specified as a suffix on a number. +func ParseAttribute(input string) *Attribute { + ll := len(input) + if ll == 0 { + return &Attribute{String: helper.PointerOf(input)} + } + + // Check if the string is a number ending with potential units + var unit string + numeric := input + if unicode.IsLetter(rune(input[ll-1])) { + // Try suffix matching + for _, u := range lengthSortedUnits { + if strings.HasSuffix(input, u) { + unit = u + break + } + } + + // Check if we know about the unit. + if len(unit) != 0 { + numeric = strings.TrimSpace(strings.TrimSuffix(input, unit)) + } + } + + // Try to parse as an int + i, err := strconv.ParseInt(numeric, 10, 64) + if err == nil { + return &Attribute{Int: helper.PointerOf(i), Unit: unit} + } + + // Try to parse as a float + f, err := strconv.ParseFloat(numeric, 64) + if err == nil { + return &Attribute{Float: helper.PointerOf(f), Unit: unit} + } + + // Try to parse as a bool + b, err := strconv.ParseBool(input) + if err == nil { + return &Attribute{Bool: helper.PointerOf(b)} + } + + return &Attribute{String: helper.PointerOf(input)} +} + +// Attribute is used to describe the value of an attribute, optionally +// specifying units +type Attribute struct { + // Float is the float value for the attribute + Float *float64 + + // Int is the int value for the attribute + Int *int64 + + // String is the string value for the attribute + String *string + + // Bool is the bool value for the attribute + Bool *bool + + // Unit is the optional unit for the set int or float value + Unit string +} + +// NewStringAttribute returns a new string attribute. +func NewStringAttribute(s string) *Attribute { + return &Attribute{ + String: helper.PointerOf(s), + } +} + +// NewBoolAttribute returns a new boolean attribute. +func NewBoolAttribute(b bool) *Attribute { + return &Attribute{ + Bool: helper.PointerOf(b), + } +} + +// NewIntAttribute returns a new integer attribute. The unit is not checked +// to be valid. +func NewIntAttribute(i int64, unit string) *Attribute { + return &Attribute{ + Int: helper.PointerOf(i), + Unit: unit, + } +} + +// NewFloatAttribute returns a new float attribute. The unit is not checked to +// be valid. +func NewFloatAttribute(f float64, unit string) *Attribute { + return &Attribute{ + Float: helper.PointerOf(f), + Unit: unit, + } +} + +// GetString returns the string value of the attribute or false if the attribute +// doesn't contain a string. +func (a *Attribute) GetString() (value string, ok bool) { + if a.String == nil { + return "", false + } + + return *a.String, true +} + +// GetBool returns the boolean value of the attribute or false if the attribute +// doesn't contain a boolean. +func (a *Attribute) GetBool() (value bool, ok bool) { + if a.Bool == nil { + return false, false + } + + return *a.Bool, true +} + +// GetInt returns the integer value of the attribute or false if the attribute +// doesn't contain a integer. +func (a *Attribute) GetInt() (value int64, ok bool) { + if a.Int == nil { + return 0, false + } + + return *a.Int, true +} + +// GetFloat returns the float value of the attribute or false if the attribute +// doesn't contain a float. +func (a *Attribute) GetFloat() (value float64, ok bool) { + if a.Float == nil { + return 0.0, false + } + + return *a.Float, true +} + +// Copy returns a copied version of the attribute +func (a *Attribute) Copy() *Attribute { + if a == nil { + return nil + } + + ca := &Attribute{ + Unit: a.Unit, + } + + if a.Float != nil { + ca.Float = helper.PointerOf(*a.Float) + } + if a.Int != nil { + ca.Int = helper.PointerOf(*a.Int) + } + if a.Bool != nil { + ca.Bool = helper.PointerOf(*a.Bool) + } + if a.String != nil { + ca.String = helper.PointerOf(*a.String) + } + + return ca +} + +// GoString returns a string representation of the attribute +func (a *Attribute) GoString() string { + if a == nil { + return "nil attribute" + } + + var b strings.Builder + if a.Float != nil { + b.WriteString(fmt.Sprintf("%v", *a.Float)) + } else if a.Int != nil { + b.WriteString(fmt.Sprintf("%v", *a.Int)) + } else if a.Bool != nil { + b.WriteString(fmt.Sprintf("%v", *a.Bool)) + } else if a.String != nil { + b.WriteString(*a.String) + } + + if a.Unit != "" { + b.WriteString(a.Unit) + } + + return b.String() +} + +// Validate checks if the attribute is valid +func (a *Attribute) Validate() error { + if a.Unit != "" { + if _, ok := UnitIndex[a.Unit]; !ok { + return fmt.Errorf("unrecognized unit %q", a.Unit) + } + + // Check only int/float set + if a.String != nil || a.Bool != nil { + return fmt.Errorf("unit can not be specified on a boolean or string attribute") + } + } + + // Assert only one of the attributes is set + set := 0 + if a.Float != nil { + set++ + } + if a.Int != nil { + set++ + } + if a.String != nil { + set++ + } + if a.Bool != nil { + set++ + } + + if set == 0 { + return fmt.Errorf("no attribute value set") + } else if set > 1 { + return fmt.Errorf("only one attribute value may be set") + } + + return nil +} + +// Comparable returns whether the two attributes are comparable +func (a *Attribute) Comparable(b *Attribute) bool { + if a == nil || b == nil { + return false + } + + // First use the units to decide if comparison is possible + aUnit := a.getTypedUnit() + bUnit := b.getTypedUnit() + if aUnit != nil && bUnit != nil { + return aUnit.Comparable(bUnit) + } else if aUnit != nil && bUnit == nil { + return false + } else if aUnit == nil && bUnit != nil { + return false + } + + if a.String != nil { + return b.String != nil + } + + if a.Bool != nil { + return b.Bool != nil + } + + return true +} + +// Compare compares two attributes. If the returned boolean value is false, it +// means the values are not comparable, either because they are of different +// types (bool versus int) or the units are incompatible for comparison. +// The returned int will be 0 if a==b, -1 if a < b, and +1 if a > b for all +// values but bool. For bool it will be 0 if a==b or 1 if a!=b. +func (a *Attribute) Compare(b *Attribute) (int, bool) { + if !a.Comparable(b) { + return 0, false + } + + return a.comparator()(b) +} + +// comparator returns the comparator function for the attribute +func (a *Attribute) comparator() compareFn { + if a.Bool != nil { + return a.boolComparator + } + if a.String != nil { + return a.stringComparator + } + if a.Int != nil || a.Float != nil { + return a.numberComparator + } + + return nullComparator +} + +// boolComparator compares two boolean attributes +func (a *Attribute) boolComparator(b *Attribute) (int, bool) { + if *a.Bool == *b.Bool { + return 0, true + } + + return 1, true +} + +// stringComparator compares two string attributes +func (a *Attribute) stringComparator(b *Attribute) (int, bool) { + return strings.Compare(*a.String, *b.String), true +} + +// numberComparator compares two number attributes, having either Int or Float +// set. +func (a *Attribute) numberComparator(b *Attribute) (int, bool) { + // If they are both integers we do perfect precision comparisons + if a.Int != nil && b.Int != nil { + return a.intComparator(b) + } + + // Push both into the float space + af := a.getBigFloat() + bf := b.getBigFloat() + if af == nil || bf == nil { + return 0, false + } + + return af.Cmp(bf), true +} + +// intComparator compares two integer attributes. +func (a *Attribute) intComparator(b *Attribute) (int, bool) { + ai := a.getInt() + bi := b.getInt() + + if ai == bi { + return 0, true + } else if ai < bi { + return -1, true + } else { + return 1, true + } +} + +// nullComparator always returns false and is used when no comparison function +// is possible +func nullComparator(*Attribute) (int, bool) { + return 0, false +} + +// compareFn is used to compare two attributes. It returns -1, 0, 1 for ordering +// and a boolean for if the comparison is possible. +type compareFn func(b *Attribute) (int, bool) + +// getBigFloat returns a big.Float representation of the attribute, converting +// the value to the base unit if a unit is specified. +func (a *Attribute) getBigFloat() *big.Float { + f := new(big.Float) + f.SetPrec(floatPrecision) + if a.Int != nil { + f.SetInt64(*a.Int) + } else if a.Float != nil { + f.SetFloat64(*a.Float) + } else { + return nil + } + + // Get the unit + u := a.getTypedUnit() + + // If there is no unit just return the float + if u == nil { + return f + } + + // Convert to the base unit + multiplier := new(big.Float) + multiplier.SetPrec(floatPrecision) + multiplier.SetInt64(u.Multiplier) + if u.InverseMultiplier { + base := big.NewFloat(1.0) + base.SetPrec(floatPrecision) + multiplier = multiplier.Quo(base, multiplier) + } + + f.Mul(f, multiplier) + return f +} + +// getInt returns an int representation of the attribute, converting +// the value to the base unit if a unit is specified. +func (a *Attribute) getInt() int64 { + if a.Int == nil { + return 0 + } + + i := *a.Int + + // Get the unit + u := a.getTypedUnit() + + // If there is no unit just return the int + if u == nil { + return i + } + + if u.InverseMultiplier { + i /= u.Multiplier + } else { + i *= u.Multiplier + } + + return i +} + +// getTypedUnit returns the Unit for the attribute or nil if no unit exists. +func (a *Attribute) getTypedUnit() *Unit { + return UnitIndex[a.Unit] +} diff --git a/plugin_interface/shared/structs/attribute_test.go b/plugin_interface/shared/structs/attribute_test.go new file mode 100644 index 00000000000..19292629e4a --- /dev/null +++ b/plugin_interface/shared/structs/attribute_test.go @@ -0,0 +1,696 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package structs + +import ( + "fmt" + "testing" + + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/shoenig/test/must" +) + +func TestAttribute_Validate(t *testing.T) { + cases := []struct { + Input *Attribute + Fail bool + }{ + { + Input: &Attribute{ + Bool: helper.PointerOf(true), + }, + }, + { + Input: &Attribute{ + String: helper.PointerOf("foo"), + }, + }, + { + Input: &Attribute{ + Int: helper.PointerOf(int64(123)), + }, + }, + { + Input: &Attribute{ + Float: helper.PointerOf(float64(123.2)), + }, + }, + { + Input: &Attribute{ + Bool: helper.PointerOf(true), + Unit: "MB", + }, + Fail: true, + }, + { + Input: &Attribute{ + String: helper.PointerOf("foo"), + Unit: "MB", + }, + Fail: true, + }, + { + Input: &Attribute{ + Int: helper.PointerOf(int64(123)), + Unit: "lolNO", + }, + Fail: true, + }, + { + Input: &Attribute{ + Float: helper.PointerOf(float64(123.2)), + Unit: "lolNO", + }, + Fail: true, + }, + { + Input: &Attribute{ + Int: helper.PointerOf(int64(123)), + Float: helper.PointerOf(float64(123.2)), + Unit: "mW", + }, + Fail: true, + }, + } + + for _, c := range cases { + t.Run(c.Input.GoString(), func(t *testing.T) { + if err := c.Input.Validate(); err != nil && !c.Fail { + must.NoError(t, err) + } + }) + } +} + +type compareTestCase struct { + A *Attribute + B *Attribute + Expected int + NotComparable bool +} + +func TestAttribute_Compare_Bool(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + Bool: helper.PointerOf(true), + }, + B: &Attribute{ + Bool: helper.PointerOf(true), + }, + Expected: 0, + }, + { + A: &Attribute{ + Bool: helper.PointerOf(true), + }, + B: &Attribute{ + Bool: helper.PointerOf(false), + }, + Expected: 1, + }, + { + A: &Attribute{ + Bool: helper.PointerOf(true), + }, + B: &Attribute{ + String: helper.PointerOf("foo"), + }, + NotComparable: true, + }, + { + A: &Attribute{ + Bool: helper.PointerOf(true), + }, + B: &Attribute{ + Int: helper.PointerOf(int64(123)), + }, + NotComparable: true, + }, + { + A: &Attribute{ + Bool: helper.PointerOf(true), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(123.2)), + }, + NotComparable: true, + }, + } + testComparison(t, cases) +} + +func TestAttribute_Compare_String(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + String: helper.PointerOf("a"), + }, + B: &Attribute{ + String: helper.PointerOf("b"), + }, + Expected: -1, + }, + { + A: &Attribute{ + String: helper.PointerOf("hello"), + }, + B: &Attribute{ + String: helper.PointerOf("hello"), + }, + Expected: 0, + }, + { + A: &Attribute{ + String: helper.PointerOf("b"), + }, + B: &Attribute{ + String: helper.PointerOf("a"), + }, + Expected: 1, + }, + { + A: &Attribute{ + String: helper.PointerOf("hello"), + }, + B: &Attribute{ + Bool: helper.PointerOf(true), + }, + NotComparable: true, + }, + { + A: &Attribute{ + String: helper.PointerOf("hello"), + }, + B: &Attribute{ + Int: helper.PointerOf(int64(123)), + }, + NotComparable: true, + }, + { + A: &Attribute{ + String: helper.PointerOf("hello"), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(123.2)), + }, + NotComparable: true, + }, + } + testComparison(t, cases) +} + +func TestAttribute_Compare_Float(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + Float: helper.PointerOf(float64(101.5)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(100001.5)), + }, + Expected: -1, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(100001.5)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(100001.5)), + }, + Expected: 0, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(999999999.5)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(101.5)), + }, + Expected: 1, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(101.5)), + }, + B: &Attribute{ + Bool: helper.PointerOf(true), + }, + NotComparable: true, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(101.5)), + }, + B: &Attribute{ + String: helper.PointerOf("hello"), + }, + NotComparable: true, + }, + } + testComparison(t, cases) +} + +func TestAttribute_Compare_Int(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + Int: helper.PointerOf(int64(3)), + }, + B: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + Expected: -1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + B: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + Expected: 0, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(100)), + }, + B: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + Expected: 1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + B: &Attribute{ + Bool: helper.PointerOf(true), + }, + NotComparable: true, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + B: &Attribute{ + String: helper.PointerOf("hello"), + }, + NotComparable: true, + }, + } + testComparison(t, cases) +} + +func TestAttribute_Compare_Int_With_Units(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + Int: helper.PointerOf(int64(3)), + Unit: "MB", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(10)), + Unit: "MB", + }, + Expected: -1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(10)), + Unit: "MB", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(10)), + Unit: "MB", + }, + Expected: 0, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(100)), + Unit: "MB", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(10)), + Unit: "MB", + }, + Expected: 1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(3)), + Unit: "GB", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(3)), + Unit: "MB", + }, + Expected: 1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(1)), + Unit: "GiB", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(1024)), + Unit: "MiB", + }, + Expected: 0, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(1)), + Unit: "GiB", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(1025)), + Unit: "MiB", + }, + Expected: -1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(1000)), + Unit: "mW", + }, + B: &Attribute{ + Int: helper.PointerOf(int64(1)), + Unit: "W", + }, + Expected: 0, + }, + } + testComparison(t, cases) +} + +func TestAttribute_Compare_Float_With_Units(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + Float: helper.PointerOf(float64(3.0)), + Unit: "MB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + Unit: "MB", + }, + Expected: -1, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + Unit: "MB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + Unit: "MB", + }, + Expected: 0, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(100.0)), + Unit: "MB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + Unit: "MB", + }, + Expected: 1, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(3.0)), + Unit: "GB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(3.0)), + Unit: "MB", + }, + Expected: 1, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(1.0)), + Unit: "GiB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(1024.0)), + Unit: "MiB", + }, + Expected: 0, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(1.0)), + Unit: "GiB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(1025.0)), + Unit: "MiB", + }, + Expected: -1, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(1000.0)), + Unit: "mW", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(1.0)), + Unit: "W", + }, + Expected: 0, + }, + { + A: &Attribute{ + Float: helper.PointerOf(float64(1.5)), + Unit: "GiB", + }, + B: &Attribute{ + Float: helper.PointerOf(float64(1400.0)), + Unit: "MiB", + }, + Expected: 1, + }, + } + testComparison(t, cases) +} + +func TestAttribute_Compare_IntToFloat(t *testing.T) { + cases := []*compareTestCase{ + { + A: &Attribute{ + Int: helper.PointerOf(int64(3)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + }, + Expected: -1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + }, + Expected: 0, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(10)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.1)), + }, + Expected: -1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(100)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(10.0)), + }, + Expected: 1, + }, + { + A: &Attribute{ + Int: helper.PointerOf(int64(100)), + }, + B: &Attribute{ + Float: helper.PointerOf(float64(100.00001)), + }, + Expected: -1, + }, + } + testComparison(t, cases) +} + +func testComparison(t *testing.T, cases []*compareTestCase) { + for _, c := range cases { + t.Run(fmt.Sprintf("%#v vs %#v", c.A, c.B), func(t *testing.T) { + v, ok := c.A.Compare(c.B) + if !ok && !c.NotComparable { + t.Fatal("should be comparable") + } else if ok { + must.Eq(t, c.Expected, v) + } + }) + } +} + +func TestAttribute_ParseAndValidate(t *testing.T) { + cases := []struct { + Input string + Expected *Attribute + }{ + { + Input: "true", + Expected: &Attribute{ + Bool: helper.PointerOf(true), + }, + }, + { + Input: "false", + Expected: &Attribute{ + Bool: helper.PointerOf(false), + }, + }, + { + Input: "1", + Expected: &Attribute{ + Int: helper.PointerOf(int64(1)), + }, + }, + { + Input: "100", + Expected: &Attribute{ + Int: helper.PointerOf(int64(100)), + }, + }, + { + Input: "-100", + Expected: &Attribute{ + Int: helper.PointerOf(int64(-100)), + }, + }, + { + Input: "-1.0", + Expected: &Attribute{ + Float: helper.PointerOf(float64(-1.0)), + }, + }, + { + Input: "-100.25", + Expected: &Attribute{ + Float: helper.PointerOf(float64(-100.25)), + }, + }, + { + Input: "1.01", + Expected: &Attribute{ + Float: helper.PointerOf(float64(1.01)), + }, + }, + { + Input: "100.25", + Expected: &Attribute{ + Float: helper.PointerOf(float64(100.25)), + }, + }, + { + Input: "foobar", + Expected: &Attribute{ + String: helper.PointerOf("foobar"), + }, + }, + { + Input: "foo123bar", + Expected: &Attribute{ + String: helper.PointerOf("foo123bar"), + }, + }, + { + Input: "100MB", + Expected: &Attribute{ + Int: helper.PointerOf(int64(100)), + Unit: "MB", + }, + }, + { + Input: "-100MHz", + Expected: &Attribute{ + Int: helper.PointerOf(int64(-100)), + Unit: "MHz", + }, + }, + { + Input: "-1.0MB/s", + Expected: &Attribute{ + Float: helper.PointerOf(float64(-1.0)), + Unit: "MB/s", + }, + }, + { + Input: "-100.25GiB/s", + Expected: &Attribute{ + Float: helper.PointerOf(float64(-100.25)), + Unit: "GiB/s", + }, + }, + { + Input: "1.01TB", + Expected: &Attribute{ + Float: helper.PointerOf(float64(1.01)), + Unit: "TB", + }, + }, + { + Input: "100.25mW", + Expected: &Attribute{ + Float: helper.PointerOf(float64(100.25)), + Unit: "mW", + }, + }, + } + + for _, c := range cases { + t.Run(c.Input, func(t *testing.T) { + a := ParseAttribute(c.Input) + must.Eq(t, c.Expected, a) + must.NoError(t, a.Validate()) + }) + } +} + +func BenchmarkParse(b *testing.B) { + cases := []string{ + "true", + "false", + "100", + "-100", + "-1.0", + "-100.25", + "1.01", + "100.25", + "foobar", + "foo123bar", + "100MB", + "-100MHz", + "-1.0MB/s", + "-100.25GiB/s", + "1.01TB", + "100.25mW", + } + + for n := 0; n < b.N; n++ { + for _, c := range cases { + ParseAttribute(c) + } + } +} diff --git a/plugin_interface/shared/structs/errors.go b/plugin_interface/shared/structs/errors.go new file mode 100644 index 00000000000..1427c2e93e5 --- /dev/null +++ b/plugin_interface/shared/structs/errors.go @@ -0,0 +1,62 @@ +package structs + +// RecoverableError wraps an error and marks whether it is recoverable and could +// be retried or it is fatal. +type RecoverableError struct { + Err string + Recoverable bool + wrapped error +} + +// NewRecoverableError is used to wrap an error and mark it as recoverable or +// not. +func NewRecoverableError(e error, recoverable bool) error { + if e == nil { + return nil + } + + return &RecoverableError{ + Err: e.Error(), + Recoverable: recoverable, + wrapped: e, + } +} + +// WrapRecoverable wraps an existing error in a new RecoverableError with a new +// message. If the error was recoverable before the returned error is as well; +// otherwise it is unrecoverable. +func WrapRecoverable(msg string, err error) error { + return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)} +} + +func (r *RecoverableError) Error() string { + return r.Err +} + +func (r *RecoverableError) IsRecoverable() bool { + return r.Recoverable +} + +func (r *RecoverableError) IsUnrecoverable() bool { + return !r.Recoverable +} + +func (r *RecoverableError) Unwrap() error { + return r.wrapped +} + +// Recoverable is an interface for errors to implement to indicate whether or +// not they are fatal or recoverable. +type Recoverable interface { + error + IsRecoverable() bool +} + +// IsRecoverable returns true if error is a RecoverableError with +// Recoverable=true. Otherwise false is returned. +func IsRecoverable(e error) bool { + if re, ok := e.(Recoverable); ok { + return re.IsRecoverable() + } + return false +} diff --git a/plugin_interface/shared/structs/msgpack.go b/plugin_interface/shared/structs/msgpack.go new file mode 100644 index 00000000000..09ea7bf3643 --- /dev/null +++ b/plugin_interface/shared/structs/msgpack.go @@ -0,0 +1,44 @@ +package structs + +import ( + "bytes" + "reflect" + + "github.com/hashicorp/go-msgpack/v2/codec" +) + +// TODO: these are just used for plugin_test.go, can we do something else here? + +// msgpackHandle is a shared handle for encoding/decoding of structs +var MsgpackHandle = func() *codec.MsgpackHandle { + h := &codec.MsgpackHandle{} + h.RawToString = true + + // maintain binary format from time prior to upgrading latest ugorji + h.BasicHandle.TimeNotBuiltin = true + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // only review struct codec tags + h.TypeInfos = codec.NewTypeInfos([]string{"codec"}) + + return h +}() + +// Decode is used to decode a MsgPack encoded object +func Decode(buf []byte, out interface{}) error { + return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) +} + +type MessageType uint8 + +// Encode is used to encode a MsgPack object with type prefix +func Encode(t MessageType, msg interface{}) ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(uint8(t)) + err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg) + return buf.Bytes(), err +} diff --git a/plugin_interface/shared/structs/plugin_reattach_config.go b/plugin_interface/shared/structs/plugin_reattach_config.go new file mode 100644 index 00000000000..698a0190dd3 --- /dev/null +++ b/plugin_interface/shared/structs/plugin_reattach_config.go @@ -0,0 +1,75 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package structs + +import ( + "fmt" + "net" + + plugin "github.com/hashicorp/go-plugin" +) + +// ReattachConfig is a wrapper around plugin.ReattachConfig to better support +// serialization +type ReattachConfig struct { + Protocol string + Network string + Addr string + Pid int +} + +// ReattachConfigToGoPlugin converts a ReattachConfig wrapper struct into a go +// plugin ReattachConfig struct +func ReattachConfigToGoPlugin(rc *ReattachConfig) (*plugin.ReattachConfig, error) { + if rc == nil { + return nil, fmt.Errorf("nil ReattachConfig cannot be converted") + } + + plug := &plugin.ReattachConfig{ + Protocol: plugin.Protocol(rc.Protocol), + Pid: rc.Pid, + } + + switch rc.Network { + case "tcp", "tcp4", "tcp6": + addr, err := net.ResolveTCPAddr(rc.Network, rc.Addr) + if err != nil { + return nil, err + } + plug.Addr = addr + case "udp", "udp4", "udp6": + addr, err := net.ResolveUDPAddr(rc.Network, rc.Addr) + if err != nil { + return nil, err + } + plug.Addr = addr + case "unix", "unixgram", "unixpacket": + addr, err := net.ResolveUnixAddr(rc.Network, rc.Addr) + if err != nil { + return nil, err + } + plug.Addr = addr + default: + return nil, fmt.Errorf("unknown network: %s", rc.Network) + } + + return plug, nil +} + +// ReattachConfigFromGoPlugin converts a go plugin ReattachConfig into a +// ReattachConfig wrapper struct +func ReattachConfigFromGoPlugin(plug *plugin.ReattachConfig) *ReattachConfig { + if plug == nil { + return nil + } + + rc := &ReattachConfig{ + Protocol: string(plug.Protocol), + Network: plug.Addr.Network(), + Addr: plug.Addr.String(), + Pid: plug.Pid, + } + + return rc +} diff --git a/plugin_interface/shared/structs/proto/attribute.pb.go b/plugin_interface/shared/structs/proto/attribute.pb.go new file mode 100644 index 00000000000..cdc95735cd9 --- /dev/null +++ b/plugin_interface/shared/structs/proto/attribute.pb.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/structs/proto/attribute.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Attribute is used to describe the value of an attribute, optionally +// specifying units +type Attribute struct { + // Types that are valid to be assigned to Value: + // + // *Attribute_FloatVal + // *Attribute_IntVal + // *Attribute_StringVal + // *Attribute_BoolVal + Value isAttribute_Value `protobuf_oneof:"value"` + // unit gives the unit type: MHz, MB, etc. + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attribute) Reset() { *m = Attribute{} } +func (m *Attribute) String() string { return proto.CompactTextString(m) } +func (*Attribute) ProtoMessage() {} +func (*Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_5b30c64b64565493, []int{0} +} + +func (m *Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Attribute.Unmarshal(m, b) +} +func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) +} +func (m *Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attribute.Merge(m, src) +} +func (m *Attribute) XXX_Size() int { + return xxx_messageInfo_Attribute.Size(m) +} +func (m *Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Attribute proto.InternalMessageInfo + +type isAttribute_Value interface { + isAttribute_Value() +} + +type Attribute_FloatVal struct { + FloatVal float64 `protobuf:"fixed64,1,opt,name=float_val,json=floatVal,proto3,oneof"` +} + +type Attribute_IntVal struct { + IntVal int64 `protobuf:"varint,2,opt,name=int_val,json=intVal,proto3,oneof"` +} + +type Attribute_StringVal struct { + StringVal string `protobuf:"bytes,3,opt,name=string_val,json=stringVal,proto3,oneof"` +} + +type Attribute_BoolVal struct { + BoolVal bool `protobuf:"varint,4,opt,name=bool_val,json=boolVal,proto3,oneof"` +} + +func (*Attribute_FloatVal) isAttribute_Value() {} + +func (*Attribute_IntVal) isAttribute_Value() {} + +func (*Attribute_StringVal) isAttribute_Value() {} + +func (*Attribute_BoolVal) isAttribute_Value() {} + +func (m *Attribute) GetValue() isAttribute_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Attribute) GetFloatVal() float64 { + if x, ok := m.GetValue().(*Attribute_FloatVal); ok { + return x.FloatVal + } + return 0 +} + +func (m *Attribute) GetIntVal() int64 { + if x, ok := m.GetValue().(*Attribute_IntVal); ok { + return x.IntVal + } + return 0 +} + +func (m *Attribute) GetStringVal() string { + if x, ok := m.GetValue().(*Attribute_StringVal); ok { + return x.StringVal + } + return "" +} + +func (m *Attribute) GetBoolVal() bool { + if x, ok := m.GetValue().(*Attribute_BoolVal); ok { + return x.BoolVal + } + return false +} + +func (m *Attribute) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Attribute) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Attribute_FloatVal)(nil), + (*Attribute_IntVal)(nil), + (*Attribute_StringVal)(nil), + (*Attribute_BoolVal)(nil), + } +} + +func init() { + proto.RegisterType((*Attribute)(nil), "hashicorp.nomad.plugins.shared.structs.Attribute") +} + +func init() { + proto.RegisterFile("plugins/shared/structs/proto/attribute.proto", fileDescriptor_5b30c64b64565493) +} + +var fileDescriptor_5b30c64b64565493 = []byte{ + // 218 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x40, 0x63, 0xda, 0x34, 0xc9, 0x8d, 0x99, 0x8a, 0x10, 0x22, 0x62, 0x40, 0x19, 0x90, 0x33, + 0xf0, 0x05, 0x74, 0xf2, 0xec, 0x81, 0x81, 0x05, 0x5d, 0xda, 0xd0, 0x58, 0x32, 0x76, 0x64, 0x9f, + 0xfb, 0x3d, 0x7c, 0x2a, 0xf2, 0x25, 0x4c, 0xf6, 0xbd, 0x77, 0x6f, 0x38, 0x78, 0x5d, 0x6c, 0xba, + 0x1a, 0x17, 0x87, 0x38, 0x63, 0x98, 0x2e, 0x43, 0xa4, 0x90, 0xce, 0x14, 0x87, 0x25, 0x78, 0xf2, + 0x03, 0x12, 0x05, 0x33, 0x26, 0x9a, 0x24, 0xcf, 0xed, 0xcb, 0x8c, 0x71, 0x36, 0x67, 0x1f, 0x16, + 0xe9, 0xfc, 0x0f, 0x5e, 0xe4, 0x56, 0xcb, 0xb5, 0x96, 0x5b, 0xfd, 0xfc, 0x2b, 0xa0, 0x79, 0xff, + 0x6f, 0xdb, 0x47, 0x68, 0xbe, 0xad, 0x47, 0xfa, 0xba, 0xa1, 0x3d, 0x8a, 0x4e, 0xf4, 0x42, 0x15, + 0xba, 0x66, 0xf4, 0x81, 0xb6, 0xbd, 0x87, 0xca, 0xb8, 0x55, 0xde, 0x75, 0xa2, 0xdf, 0xa9, 0x42, + 0x1f, 0x8c, 0x63, 0xf5, 0x04, 0x10, 0x29, 0x18, 0x77, 0x65, 0xbb, 0xeb, 0x44, 0xdf, 0xa8, 0x42, + 0x37, 0x2b, 0xcb, 0x0b, 0x0f, 0x50, 0x8f, 0xde, 0x5b, 0xd6, 0xfb, 0x4e, 0xf4, 0xb5, 0x2a, 0x74, + 0x95, 0x49, 0x96, 0x2d, 0xec, 0x93, 0x33, 0x74, 0x2c, 0x73, 0xa7, 0xf9, 0x7f, 0xaa, 0xa0, 0xbc, + 0xa1, 0x4d, 0xd3, 0xa9, 0xfa, 0x2c, 0xf9, 0xa6, 0xf1, 0xc0, 0xcf, 0xdb, 0x5f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x77, 0x2b, 0x7a, 0x7c, 0x0a, 0x01, 0x00, 0x00, +} diff --git a/plugin_interface/shared/structs/proto/attribute.proto b/plugin_interface/shared/structs/proto/attribute.proto new file mode 100644 index 00000000000..1479e9f3f69 --- /dev/null +++ b/plugin_interface/shared/structs/proto/attribute.proto @@ -0,0 +1,28 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package hashicorp.nomad.plugins.shared.structs; +option go_package = "proto"; + +// Attribute is used to describe the value of an attribute, optionally +// specifying units +message Attribute { + oneof value { + // float_val exposes a floating point value. + double float_val = 1; + + // int_numerator_val exposes a int value. + int64 int_val = 2; + + // string_val exposes a string value. + string string_val = 3; + + // bool_val exposes a boolean statistic. + bool bool_val = 4; + } + + // unit gives the unit type: MHz, MB, etc. + string unit = 5; +} + diff --git a/plugin_interface/shared/structs/proto/recoverable_error.pb.go b/plugin_interface/shared/structs/proto/recoverable_error.pb.go new file mode 100644 index 00000000000..77034c2c71d --- /dev/null +++ b/plugin_interface/shared/structs/proto/recoverable_error.pb.go @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/structs/proto/recoverable_error.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// RecoverableError is used with a grpc Status to indicate if the error is one +// which is recoverable and can be reattempted by the client. +type RecoverableError struct { + Recoverable bool `protobuf:"varint,1,opt,name=recoverable,proto3" json:"recoverable,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecoverableError) Reset() { *m = RecoverableError{} } +func (m *RecoverableError) String() string { return proto.CompactTextString(m) } +func (*RecoverableError) ProtoMessage() {} +func (*RecoverableError) Descriptor() ([]byte, []int) { + return fileDescriptor_82d0e8d3a57dbb3c, []int{0} +} + +func (m *RecoverableError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecoverableError.Unmarshal(m, b) +} +func (m *RecoverableError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecoverableError.Marshal(b, m, deterministic) +} +func (m *RecoverableError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecoverableError.Merge(m, src) +} +func (m *RecoverableError) XXX_Size() int { + return xxx_messageInfo_RecoverableError.Size(m) +} +func (m *RecoverableError) XXX_DiscardUnknown() { + xxx_messageInfo_RecoverableError.DiscardUnknown(m) +} + +var xxx_messageInfo_RecoverableError proto.InternalMessageInfo + +func (m *RecoverableError) GetRecoverable() bool { + if m != nil { + return m.Recoverable + } + return false +} + +func init() { + proto.RegisterType((*RecoverableError)(nil), "hashicorp.nomad.plugins.shared.structs.RecoverableError") +} + +func init() { + proto.RegisterFile("plugins/shared/structs/proto/recoverable_error.proto", fileDescriptor_82d0e8d3a57dbb3c) +} + +var fileDescriptor_82d0e8d3a57dbb3c = []byte{ + // 138 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0x2b, 0xd6, 0x2f, 0xce, 0x48, 0x2c, 0x4a, 0x4d, 0xd1, 0x2f, 0x2e, 0x29, 0x2a, 0x4d, + 0x2e, 0x29, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4b, 0x2d, + 0x4a, 0x4c, 0xca, 0x49, 0x8d, 0x4f, 0x2d, 0x2a, 0xca, 0x2f, 0xd2, 0x03, 0x8b, 0x0b, 0xa9, 0x65, + 0x24, 0x16, 0x67, 0x64, 0x26, 0xe7, 0x17, 0x15, 0xe8, 0xe5, 0xe5, 0xe7, 0x26, 0xa6, 0xe8, 0x41, + 0x4d, 0xd1, 0x83, 0x98, 0xa2, 0x07, 0x35, 0x45, 0xc9, 0x84, 0x4b, 0x20, 0x08, 0x61, 0x84, 0x2b, + 0xc8, 0x04, 0x21, 0x05, 0x2e, 0x6e, 0x24, 0x63, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0x90, + 0x85, 0x9c, 0xd8, 0xa3, 0x58, 0xc1, 0xd6, 0x24, 0xb1, 0x81, 0x29, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xc5, 0x45, 0x79, 0xed, 0xa5, 0x00, 0x00, 0x00, +} diff --git a/plugin_interface/shared/structs/proto/recoverable_error.proto b/plugin_interface/shared/structs/proto/recoverable_error.proto new file mode 100644 index 00000000000..fe15a811304 --- /dev/null +++ b/plugin_interface/shared/structs/proto/recoverable_error.proto @@ -0,0 +1,12 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package hashicorp.nomad.plugins.shared.structs; +option go_package = "proto"; + +// RecoverableError is used with a grpc Status to indicate if the error is one +// which is recoverable and can be reattempted by the client. +message RecoverableError { + bool recoverable = 1; +} diff --git a/plugin_interface/shared/structs/proto/stats.pb.go b/plugin_interface/shared/structs/proto/stats.pb.go new file mode 100644 index 00000000000..77af17dee6a --- /dev/null +++ b/plugin_interface/shared/structs/proto/stats.pb.go @@ -0,0 +1,224 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/structs/proto/stats.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// StatObject is a collection of statistics either exposed at the top +// level or via nested StatObjects. +type StatObject struct { + // nested is a mapping of object name to a nested stats object. + Nested map[string]*StatObject `protobuf:"bytes,1,rep,name=nested,proto3" json:"nested,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // attributes is a mapping of statistic name to its value. + Attributes map[string]*StatValue `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatObject) Reset() { *m = StatObject{} } +func (m *StatObject) String() string { return proto.CompactTextString(m) } +func (*StatObject) ProtoMessage() {} +func (*StatObject) Descriptor() ([]byte, []int) { + return fileDescriptor_cbe97f35e2eb2516, []int{0} +} + +func (m *StatObject) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatObject.Unmarshal(m, b) +} +func (m *StatObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatObject.Marshal(b, m, deterministic) +} +func (m *StatObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatObject.Merge(m, src) +} +func (m *StatObject) XXX_Size() int { + return xxx_messageInfo_StatObject.Size(m) +} +func (m *StatObject) XXX_DiscardUnknown() { + xxx_messageInfo_StatObject.DiscardUnknown(m) +} + +var xxx_messageInfo_StatObject proto.InternalMessageInfo + +func (m *StatObject) GetNested() map[string]*StatObject { + if m != nil { + return m.Nested + } + return nil +} + +func (m *StatObject) GetAttributes() map[string]*StatValue { + if m != nil { + return m.Attributes + } + return nil +} + +// StatValue exposes the values of a particular statistic. The value may +// be of type double, integer, string or boolean. Numeric types can be +// exposed as a single value or as a fraction. +type StatValue struct { + // float_numerator_val exposes a floating point value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + FloatNumeratorVal *wrappers.DoubleValue `protobuf:"bytes,1,opt,name=float_numerator_val,json=floatNumeratorVal,proto3" json:"float_numerator_val,omitempty"` + FloatDenominatorVal *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=float_denominator_val,json=floatDenominatorVal,proto3" json:"float_denominator_val,omitempty"` + // int_numerator_val exposes a int value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + IntNumeratorVal *wrappers.Int64Value `protobuf:"bytes,3,opt,name=int_numerator_val,json=intNumeratorVal,proto3" json:"int_numerator_val,omitempty"` + IntDenominatorVal *wrappers.Int64Value `protobuf:"bytes,4,opt,name=int_denominator_val,json=intDenominatorVal,proto3" json:"int_denominator_val,omitempty"` + // string_val exposes a string value. These are likely annotations. + StringVal *wrappers.StringValue `protobuf:"bytes,5,opt,name=string_val,json=stringVal,proto3" json:"string_val,omitempty"` + // bool_val exposes a boolean statistic. + BoolVal *wrappers.BoolValue `protobuf:"bytes,6,opt,name=bool_val,json=boolVal,proto3" json:"bool_val,omitempty"` + // unit gives the unit type: °F, %, MHz, MB, etc. + Unit string `protobuf:"bytes,7,opt,name=unit,proto3" json:"unit,omitempty"` + // desc provides a human readable description of the statistic. + Desc string `protobuf:"bytes,8,opt,name=desc,proto3" json:"desc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatValue) Reset() { *m = StatValue{} } +func (m *StatValue) String() string { return proto.CompactTextString(m) } +func (*StatValue) ProtoMessage() {} +func (*StatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cbe97f35e2eb2516, []int{1} +} + +func (m *StatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatValue.Unmarshal(m, b) +} +func (m *StatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatValue.Marshal(b, m, deterministic) +} +func (m *StatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatValue.Merge(m, src) +} +func (m *StatValue) XXX_Size() int { + return xxx_messageInfo_StatValue.Size(m) +} +func (m *StatValue) XXX_DiscardUnknown() { + xxx_messageInfo_StatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StatValue proto.InternalMessageInfo + +func (m *StatValue) GetFloatNumeratorVal() *wrappers.DoubleValue { + if m != nil { + return m.FloatNumeratorVal + } + return nil +} + +func (m *StatValue) GetFloatDenominatorVal() *wrappers.DoubleValue { + if m != nil { + return m.FloatDenominatorVal + } + return nil +} + +func (m *StatValue) GetIntNumeratorVal() *wrappers.Int64Value { + if m != nil { + return m.IntNumeratorVal + } + return nil +} + +func (m *StatValue) GetIntDenominatorVal() *wrappers.Int64Value { + if m != nil { + return m.IntDenominatorVal + } + return nil +} + +func (m *StatValue) GetStringVal() *wrappers.StringValue { + if m != nil { + return m.StringVal + } + return nil +} + +func (m *StatValue) GetBoolVal() *wrappers.BoolValue { + if m != nil { + return m.BoolVal + } + return nil +} + +func (m *StatValue) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *StatValue) GetDesc() string { + if m != nil { + return m.Desc + } + return "" +} + +func init() { + proto.RegisterType((*StatObject)(nil), "hashicorp.nomad.plugins.shared.structs.StatObject") + proto.RegisterMapType((map[string]*StatValue)(nil), "hashicorp.nomad.plugins.shared.structs.StatObject.AttributesEntry") + proto.RegisterMapType((map[string]*StatObject)(nil), "hashicorp.nomad.plugins.shared.structs.StatObject.NestedEntry") + proto.RegisterType((*StatValue)(nil), "hashicorp.nomad.plugins.shared.structs.StatValue") +} + +func init() { + proto.RegisterFile("plugins/shared/structs/proto/stats.proto", fileDescriptor_cbe97f35e2eb2516) +} + +var fileDescriptor_cbe97f35e2eb2516 = []byte{ + // 444 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0xd2, 0xdf, 0x6a, 0x13, 0x41, + 0x14, 0x06, 0x70, 0x36, 0xdb, 0x24, 0xcd, 0xc9, 0x45, 0xed, 0x14, 0x61, 0x89, 0x22, 0xa1, 0x17, + 0x92, 0xab, 0x59, 0x8c, 0x7f, 0x10, 0x05, 0xc1, 0x50, 0xa9, 0xa2, 0x54, 0x49, 0x21, 0x17, 0xde, + 0x94, 0xd9, 0xec, 0x74, 0x33, 0x3a, 0x3b, 0xb3, 0xcc, 0x9c, 0xa9, 0xf4, 0x91, 0x7c, 0x28, 0xdf, + 0x45, 0x76, 0x66, 0x93, 0xa6, 0x5b, 0xc5, 0xd6, 0xab, 0x9c, 0x4c, 0xf2, 0xfd, 0xbe, 0xb3, 0xcc, + 0xc2, 0xa4, 0x92, 0xae, 0x10, 0xca, 0xa6, 0x76, 0xc5, 0x0c, 0xcf, 0x53, 0x8b, 0xc6, 0x2d, 0xd1, + 0xa6, 0x95, 0xd1, 0xa8, 0x53, 0x8b, 0x0c, 0x2d, 0xf5, 0x33, 0x79, 0xbc, 0x62, 0x76, 0x25, 0x96, + 0xda, 0x54, 0x54, 0xe9, 0x92, 0xe5, 0xb4, 0x49, 0xd2, 0x90, 0xa4, 0x4d, 0x72, 0xf4, 0xa8, 0xd0, + 0xba, 0x90, 0x3c, 0x08, 0x99, 0x3b, 0x4f, 0x7f, 0x18, 0x56, 0x55, 0xdc, 0x34, 0xce, 0xe1, 0xcf, + 0x18, 0xe0, 0x14, 0x19, 0x7e, 0xce, 0xbe, 0xf1, 0x25, 0x92, 0x05, 0xf4, 0x14, 0xb7, 0xc8, 0xf3, + 0x24, 0x1a, 0xc7, 0x93, 0xe1, 0xf4, 0x0d, 0xbd, 0x5d, 0x0f, 0xbd, 0x32, 0xe8, 0x89, 0x07, 0xde, + 0x29, 0x34, 0x97, 0xf3, 0x46, 0x23, 0x19, 0x00, 0x43, 0x34, 0x22, 0x73, 0xc8, 0x6d, 0xd2, 0xf1, + 0xf6, 0xec, 0x3f, 0xec, 0xb7, 0x1b, 0x24, 0xf8, 0x5b, 0xea, 0xa8, 0x84, 0xe1, 0x56, 0x35, 0xb9, + 0x07, 0xf1, 0x77, 0x7e, 0x99, 0x44, 0xe3, 0x68, 0x32, 0x98, 0xd7, 0x23, 0x79, 0x0f, 0xdd, 0x0b, + 0x26, 0x1d, 0x4f, 0x3a, 0xe3, 0x68, 0x32, 0x9c, 0x4e, 0xef, 0xde, 0x3f, 0x0f, 0xc0, 0xab, 0xce, + 0xcb, 0x68, 0x54, 0xc1, 0x5e, 0x6b, 0x9b, 0x3f, 0x54, 0x1e, 0x5f, 0xaf, 0x7c, 0x72, 0x97, 0xca, + 0x45, 0x1d, 0xdc, 0x6a, 0x3c, 0xfc, 0x15, 0xc3, 0x60, 0xf3, 0x03, 0xf9, 0x04, 0x07, 0xe7, 0x52, + 0x33, 0x3c, 0x53, 0xae, 0xe4, 0x86, 0xa1, 0x36, 0x67, 0x17, 0x4c, 0xfa, 0xf2, 0xe1, 0xf4, 0x21, + 0x0d, 0xf7, 0x4e, 0xd7, 0xf7, 0x4e, 0x8f, 0xb4, 0xcb, 0x24, 0x0f, 0xe6, 0xbe, 0x0f, 0x9e, 0xac, + 0x73, 0x0b, 0x26, 0xc9, 0x17, 0xb8, 0x1f, 0xb4, 0x9c, 0x2b, 0x5d, 0x0a, 0xb5, 0xf1, 0x3a, 0xb7, + 0xf0, 0xc2, 0x22, 0x47, 0x57, 0xc9, 0x5a, 0x3c, 0x86, 0x7d, 0xa1, 0xda, 0xdb, 0xc5, 0x5e, 0x7b, + 0x70, 0x43, 0xfb, 0xa0, 0xf0, 0xc5, 0xb3, 0x80, 0xed, 0x09, 0x75, 0x7d, 0xb5, 0x8f, 0x70, 0x50, + 0x43, 0xed, 0xc5, 0x76, 0xfe, 0x4d, 0xd5, 0x0b, 0xb4, 0xb6, 0x7a, 0x0d, 0x60, 0xd1, 0x08, 0x55, + 0x78, 0xa3, 0xfb, 0x97, 0x87, 0x3b, 0xf5, 0x7f, 0x09, 0xc8, 0xc0, 0xae, 0xbf, 0x90, 0xe7, 0xb0, + 0x9b, 0x69, 0x2d, 0x7d, 0xb4, 0xe7, 0xa3, 0xa3, 0x1b, 0xd1, 0x99, 0xd6, 0x32, 0x04, 0xfb, 0x59, + 0x18, 0x09, 0x81, 0x1d, 0xa7, 0x04, 0x26, 0x7d, 0xff, 0x5e, 0xf8, 0xb9, 0x3e, 0xcb, 0xb9, 0x5d, + 0x26, 0xbb, 0xe1, 0xac, 0x9e, 0x67, 0xfd, 0xaf, 0xdd, 0xc0, 0xf4, 0xfc, 0xc7, 0xd3, 0xdf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x7d, 0x1d, 0x2d, 0xe3, 0x0f, 0x04, 0x00, 0x00, +} diff --git a/plugin_interface/shared/structs/proto/stats.proto b/plugin_interface/shared/structs/proto/stats.proto new file mode 100644 index 00000000000..cd17e7bdef0 --- /dev/null +++ b/plugin_interface/shared/structs/proto/stats.proto @@ -0,0 +1,47 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package hashicorp.nomad.plugins.shared.structs; +option go_package = "proto"; + +import "google/protobuf/wrappers.proto"; + +// StatObject is a collection of statistics either exposed at the top +// level or via nested StatObjects. +message StatObject { + // nested is a mapping of object name to a nested stats object. + map nested = 1; + + // attributes is a mapping of statistic name to its value. + map attributes = 2; +} + +// StatValue exposes the values of a particular statistic. The value may +// be of type double, integer, string or boolean. Numeric types can be +// exposed as a single value or as a fraction. +message StatValue { + // float_numerator_val exposes a floating point value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + google.protobuf.DoubleValue float_numerator_val = 1; + google.protobuf.DoubleValue float_denominator_val = 2; + + // int_numerator_val exposes a int value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + google.protobuf.Int64Value int_numerator_val = 3; + google.protobuf.Int64Value int_denominator_val = 4; + + // string_val exposes a string value. These are likely annotations. + google.protobuf.StringValue string_val = 5; + + // bool_val exposes a boolean statistic. + google.protobuf.BoolValue bool_val = 6; + + // unit gives the unit type: °F, %, MHz, MB, etc. + string unit = 7; + + // desc provides a human readable description of the statistic. + string desc = 8; +} diff --git a/plugin_interface/shared/structs/stats.go b/plugin_interface/shared/structs/stats.go new file mode 100644 index 00000000000..b74d7a90c83 --- /dev/null +++ b/plugin_interface/shared/structs/stats.go @@ -0,0 +1,41 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package structs + +// StatObject is a collection of statistics either exposed at the top +// level or via nested StatObjects. +type StatObject struct { + // Nested is a mapping of object name to a nested stats object. + Nested map[string]*StatObject + + // Attributes is a mapping of statistic name to its value. + Attributes map[string]*StatValue +} + +// StatValue exposes the values of a particular statistic. The value may be of +// type float, integer, string or boolean. Numeric types can be exposed as a +// single value or as a fraction. +type StatValue struct { + // FloatNumeratorVal exposes a floating point value. If denominator is set + // it is assumed to be a fractional value, otherwise it is a scalar. + FloatNumeratorVal *float64 `json:",omitempty"` + FloatDenominatorVal *float64 `json:",omitempty"` + + // IntNumeratorVal exposes a int value. If denominator is set it is assumed + // to be a fractional value, otherwise it is a scalar. + IntNumeratorVal *int64 `json:",omitempty"` + IntDenominatorVal *int64 `json:",omitempty"` + + // StringVal exposes a string value. These are likely annotations. + StringVal *string `json:",omitempty"` + + // BoolVal exposes a boolean statistic. + BoolVal *bool `json:",omitempty"` + + // Unit gives the unit type: °F, %, MHz, MB, etc. + Unit string `json:",omitempty"` + + // Desc provides a human readable description of the statistic. + Desc string `json:",omitempty"` +} diff --git a/plugin_interface/shared/structs/units.go b/plugin_interface/shared/structs/units.go new file mode 100644 index 00000000000..10c3b285552 --- /dev/null +++ b/plugin_interface/shared/structs/units.go @@ -0,0 +1,263 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package structs + +import "sort" + +const ( + // Binary SI Byte Units + UnitKiB = "KiB" + UnitMiB = "MiB" + UnitGiB = "GiB" + UnitTiB = "TiB" + UnitPiB = "PiB" + UnitEiB = "EiB" + + // Decimal SI Byte Units + UnitkB = "kB" + UnitKB = "KB" + UnitMB = "MB" + UnitGB = "GB" + UnitTB = "TB" + UnitPB = "PB" + UnitEB = "EB" + + // Binary SI Byte Rates + UnitKiBPerS = "KiB/s" + UnitMiBPerS = "MiB/s" + UnitGiBPerS = "GiB/s" + UnitTiBPerS = "TiB/s" + UnitPiBPerS = "PiB/s" + UnitEiBPerS = "EiB/s" + + // Decimal SI Byte Rates + UnitkBPerS = "kB/s" + UnitKBPerS = "KB/s" + UnitMBPerS = "MB/s" + UnitGBPerS = "GB/s" + UnitTBPerS = "TB/s" + UnitPBPerS = "PB/s" + UnitEBPerS = "EB/s" + + // Hertz units + UnitMHz = "MHz" + UnitGHz = "GHz" + + // Watts units + UnitmW = "mW" + UnitW = "W" + UnitkW = "kW" + UnitMW = "MW" + UnitGW = "GW" +) + +var ( + // numUnits is the number of known units + numUnits = len(binarySIBytes) + len(decimalSIBytes) + len(binarySIByteRates) + len(decimalSIByteRates) + len(watts) + len(hertz) + + // UnitIndex is a map of unit name to unit + UnitIndex = make(map[string]*Unit, numUnits) + + // lengthSortedUnits is a list of unit names sorted by length with longest + // first + lengthSortedUnits = make([]string, 0, numUnits) + + binarySIBytes = []*Unit{ + { + Name: UnitKiB, + Base: UnitByte, + Multiplier: 1 << 10, + }, + { + Name: UnitMiB, + Base: UnitByte, + Multiplier: 1 << 20, + }, + { + Name: UnitGiB, + Base: UnitByte, + Multiplier: 1 << 30, + }, + { + Name: UnitTiB, + Base: UnitByte, + Multiplier: 1 << 40, + }, + { + Name: UnitPiB, + Base: UnitByte, + Multiplier: 1 << 50, + }, + { + Name: UnitEiB, + Base: UnitByte, + Multiplier: 1 << 60, + }, + } + + decimalSIBytes = []*Unit{ + { + Name: UnitkB, + Base: UnitByte, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitKB, // Alternative name for kB + Base: UnitByte, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitMB, + Base: UnitByte, + Multiplier: Pow(1000, 2), + }, + { + Name: UnitGB, + Base: UnitByte, + Multiplier: Pow(1000, 3), + }, + { + Name: UnitTB, + Base: UnitByte, + Multiplier: Pow(1000, 4), + }, + { + Name: UnitPB, + Base: UnitByte, + Multiplier: Pow(1000, 5), + }, + { + Name: UnitEB, + Base: UnitByte, + Multiplier: Pow(1000, 6), + }, + } + + binarySIByteRates = []*Unit{ + { + Name: UnitKiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 10, + }, + { + Name: UnitMiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 20, + }, + { + Name: UnitGiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 30, + }, + { + Name: UnitTiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 40, + }, + { + Name: UnitPiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 50, + }, + { + Name: UnitEiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 60, + }, + } + + decimalSIByteRates = []*Unit{ + { + Name: UnitkBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitKBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitMBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 2), + }, + { + Name: UnitGBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 3), + }, + { + Name: UnitTBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 4), + }, + { + Name: UnitPBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 5), + }, + { + Name: UnitEBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 6), + }, + } + + hertz = []*Unit{ + { + Name: UnitMHz, + Base: UnitHertz, + Multiplier: Pow(1000, 2), + }, + { + Name: UnitGHz, + Base: UnitHertz, + Multiplier: Pow(1000, 3), + }, + } + + watts = []*Unit{ + { + Name: UnitmW, + Base: UnitWatt, + Multiplier: Pow(10, 3), + InverseMultiplier: true, + }, + { + Name: UnitW, + Base: UnitWatt, + Multiplier: 1, + }, + { + Name: UnitkW, + Base: UnitWatt, + Multiplier: Pow(10, 3), + }, + { + Name: UnitMW, + Base: UnitWatt, + Multiplier: Pow(10, 6), + }, + { + Name: UnitGW, + Base: UnitWatt, + Multiplier: Pow(10, 9), + }, + } +) + +func init() { + // Build the index + for _, units := range [][]*Unit{binarySIBytes, decimalSIBytes, binarySIByteRates, decimalSIByteRates, watts, hertz} { + for _, unit := range units { + UnitIndex[unit.Name] = unit + lengthSortedUnits = append(lengthSortedUnits, unit.Name) + } + } + + sort.Slice(lengthSortedUnits, func(i, j int) bool { + return len(lengthSortedUnits[i]) >= len(lengthSortedUnits[j]) + }) +} diff --git a/plugin_interface/shared/structs/util.go b/plugin_interface/shared/structs/util.go new file mode 100644 index 00000000000..616679ae4bb --- /dev/null +++ b/plugin_interface/shared/structs/util.go @@ -0,0 +1,257 @@ +// Copyright IBM Corp. 2015, 2025 +// SPDX-License-Identifier: MPL-2.0 + +package structs + +import ( + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/hashicorp/nomad/plugin-interface/helper" + "github.com/hashicorp/nomad/plugin-interface/shared/structs/proto" +) + +func ConvertProtoAttribute(in *proto.Attribute) *Attribute { + out := &Attribute{ + Unit: in.Unit, + } + + switch in.Value.(type) { + case *proto.Attribute_BoolVal: + out.Bool = helper.PointerOf(in.GetBoolVal()) + case *proto.Attribute_FloatVal: + out.Float = helper.PointerOf(in.GetFloatVal()) + case *proto.Attribute_IntVal: + out.Int = helper.PointerOf(in.GetIntVal()) + case *proto.Attribute_StringVal: + out.String = helper.PointerOf(in.GetStringVal()) + default: + } + + return out +} + +func ConvertProtoAttributeMap(in map[string]*proto.Attribute) map[string]*Attribute { + if in == nil { + return nil + } + + out := make(map[string]*Attribute, len(in)) + for k, a := range in { + out[k] = ConvertProtoAttribute(a) + } + + return out +} + +func ConvertStructsAttribute(in *Attribute) *proto.Attribute { + out := &proto.Attribute{ + Unit: in.Unit, + } + + if in.Int != nil { + out.Value = &proto.Attribute_IntVal{ + IntVal: *in.Int, + } + } else if in.Float != nil { + out.Value = &proto.Attribute_FloatVal{ + FloatVal: *in.Float, + } + } else if in.String != nil { + out.Value = &proto.Attribute_StringVal{ + StringVal: *in.String, + } + } else if in.Bool != nil { + out.Value = &proto.Attribute_BoolVal{ + BoolVal: *in.Bool, + } + } + + return out +} + +func ConvertStructAttributeMap(in map[string]*Attribute) map[string]*proto.Attribute { + if in == nil { + return nil + } + + out := make(map[string]*proto.Attribute, len(in)) + for k, a := range in { + out[k] = ConvertStructsAttribute(a) + } + + return out +} + +func Pow(a, b int64) int64 { + var p int64 = 1 + for b > 0 { + if b&1 != 0 { + p *= a + } + b >>= 1 + a *= a + } + return p +} + +// CopyMapStringAttribute copies a map of string to Attribute +func CopyMapStringAttribute(in map[string]*Attribute) map[string]*Attribute { + if in == nil { + return nil + } + + out := make(map[string]*Attribute, len(in)) + for k, v := range in { + out[k] = v.Copy() + } + return out +} + +// ConvertProtoStatObject converts between a proto and struct StatObject +func ConvertProtoStatObject(in *proto.StatObject) *StatObject { + if in == nil { + return nil + } + + out := &StatObject{ + Nested: make(map[string]*StatObject, len(in.Nested)), + Attributes: make(map[string]*StatValue, len(in.Attributes)), + } + + for k, v := range in.Nested { + out.Nested[k] = ConvertProtoStatObject(v) + } + + for k, v := range in.Attributes { + out.Attributes[k] = ConvertProtoStatValue(v) + } + + return out +} + +// ConvertProtoStatValue converts between a proto and struct StatValue +func ConvertProtoStatValue(in *proto.StatValue) *StatValue { + if in == nil { + return nil + } + + return &StatValue{ + FloatNumeratorVal: unwrapDouble(in.FloatNumeratorVal), + FloatDenominatorVal: unwrapDouble(in.FloatDenominatorVal), + IntNumeratorVal: unwrapInt64(in.IntNumeratorVal), + IntDenominatorVal: unwrapInt64(in.IntDenominatorVal), + StringVal: unwrapString(in.StringVal), + BoolVal: unwrapBool(in.BoolVal), + Unit: in.Unit, + Desc: in.Desc, + } +} + +// ConvertStructStatObject converts between a struct and proto StatObject +func ConvertStructStatObject(in *StatObject) *proto.StatObject { + if in == nil { + return nil + } + + out := &proto.StatObject{ + Nested: make(map[string]*proto.StatObject, len(in.Nested)), + Attributes: make(map[string]*proto.StatValue, len(in.Attributes)), + } + + for k, v := range in.Nested { + out.Nested[k] = ConvertStructStatObject(v) + } + + for k, v := range in.Attributes { + out.Attributes[k] = ConvertStructStatValue(v) + } + + return out +} + +// ConvertStructStatValue converts between a struct and proto StatValue +func ConvertStructStatValue(in *StatValue) *proto.StatValue { + if in == nil { + return nil + } + + return &proto.StatValue{ + FloatNumeratorVal: wrapDouble(in.FloatNumeratorVal), + FloatDenominatorVal: wrapDouble(in.FloatDenominatorVal), + IntNumeratorVal: wrapInt64(in.IntNumeratorVal), + IntDenominatorVal: wrapInt64(in.IntDenominatorVal), + StringVal: wrapString(in.StringVal), + BoolVal: wrapBool(in.BoolVal), + Unit: in.Unit, + Desc: in.Desc, + } +} + +// Helper functions for proto wrapping + +func unwrapDouble(w *wrappers.DoubleValue) *float64 { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapDouble(v *float64) *wrappers.DoubleValue { + if v == nil { + return nil + } + + return &wrappers.DoubleValue{Value: *v} +} + +func unwrapInt64(w *wrappers.Int64Value) *int64 { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapInt64(v *int64) *wrappers.Int64Value { + if v == nil { + return nil + } + + return &wrappers.Int64Value{Value: *v} +} + +func unwrapString(w *wrappers.StringValue) *string { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapString(v *string) *wrappers.StringValue { + if v == nil { + return nil + } + + return &wrappers.StringValue{Value: *v} +} + +func unwrapBool(w *wrappers.BoolValue) *bool { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapBool(v *bool) *wrappers.BoolValue { + if v == nil { + return nil + } + + return &wrappers.BoolValue{Value: *v} +}