Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
209 changes: 184 additions & 25 deletions thirdparty/cli-utils/status/cmdstatus.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"context"
"fmt"
"os"
ss "strings"
"time"

"github.com/GoogleContainerTools/kpt/internal/docs/generated/livedocs"
Expand All @@ -16,8 +17,11 @@ import (
"github.com/GoogleContainerTools/kpt/pkg/live"
"github.com/GoogleContainerTools/kpt/pkg/status"
statusprinters "github.com/GoogleContainerTools/kpt/thirdparty/cli-utils/status/printers"
"github.com/GoogleContainerTools/kpt/thirdparty/cli-utils/status/printers/list"
"github.com/go-errors/errors"
"github.com/spf13/cobra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/slice"
Expand All @@ -28,6 +32,8 @@ import (
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/cli-utils/pkg/print/common"
"sigs.k8s.io/cli-utils/pkg/printers"
)

Expand All @@ -38,6 +44,11 @@ const (
Forever = "forever"
)

const (
Local = "local"
Remote = "remote"
)

var (
PollUntilOptions = []string{Known, Current, Deleted, Forever}
)
Expand Down Expand Up @@ -65,6 +76,10 @@ func NewRunner(ctx context.Context, factory util.Factory) *Runner {
c.Flags().StringVar(&r.output, "output", "events", "Output format.")
c.Flags().DurationVar(&r.timeout, "timeout", 0,
"How long to wait before exiting")
c.Flags().StringVar(&r.invType, "inv-type", Local, "Type of the inventory info, must be local or remote")
c.Flags().StringVar(&r.inventoryNames, "inv-names", "", "Names of targeted inventory: inv1,inv2,...")
c.Flags().StringVar(&r.namespaces, "namespaces", "", "Names of targeted namespaces: ns1,ns2,...")
c.Flags().StringVar(&r.statuses, "statuses", "", "Targeted status: st1,st2...")
return r
}

Expand All @@ -85,6 +100,14 @@ type Runner struct {
timeout time.Duration
output string

invType string
inventoryNames string
inventoryNameSet map[string]bool
namespaces string
namespaceSet map[string]bool
statuses string
statusSet map[string]bool

pollerFactoryFunc func(util.Factory) (poller.Poller, error)
}

Expand All @@ -97,19 +120,48 @@ func (r *Runner) preRunE(*cobra.Command, []string) error {
if found := printers.ValidatePrinterType(r.output); !found {
return fmt.Errorf("unknown output type %q", r.output)
}

if r.invType != Local && r.invType != Remote {
return fmt.Errorf("inv-type flag should be either local or remote")
}

if r.invType == Local && r.inventoryNames != "" {
return fmt.Errorf("inv-names flag should only be used when inv-type is set to remote")
}

if r.inventoryNames != "" {
r.inventoryNameSet = make(map[string]bool)
for _, name := range ss.Split(r.inventoryNames, ",") {
r.inventoryNameSet[name] = true
}
}

if r.namespaces != "" {
r.namespaceSet = make(map[string]bool)
for _, ns := range ss.Split(r.namespaces, ",") {
r.namespaceSet[ns] = true
}
}

if r.statuses != "" {
r.statusSet = make(map[string]bool)
for _, st := range ss.Split(r.statuses, ",") {
parsedST := ss.ToLower(st)
r.statusSet[parsedST] = true
}
}
return nil
}

// runE implements the logic of the command and will delegate to the
// poller to compute status for each of the resources. One of the printer
// implementations takes care of printing the output.
func (r *Runner) runE(c *cobra.Command, args []string) error {
pr := printer.FromContextOrDie(r.ctx)
// Load inventory info from local storage
// and get info from the cluster based on the local info
// wrap it to be a map mapping from string to objectMetadataSet
func (r *Runner) loadInvFromDisk(c *cobra.Command, args []string) (*list.PrintData, error) {
if len(args) == 0 {
// default to the current working directory
cwd, err := os.Getwd()
if err != nil {
return err
return nil, err
}
args = append(args, cwd)
}
Expand All @@ -119,53 +171,149 @@ func (r *Runner) runE(c *cobra.Command, args []string) error {
if args[0] != "-" {
path, err = argutil.ResolveSymlink(r.ctx, path)
if err != nil {
return err
return nil, err
}
}

_, inv, err := live.Load(r.factory, path, c.InOrStdin())
if err != nil {
return err
return nil, err
}

invInfo, err := live.ToInventoryInfo(inv)
if err != nil {
return err
return nil, err
}

invClient, err := r.invClientFunc(r.factory)
if err != nil {
return err
return nil, err
}

// Based on the inventory template manifest we look up the inventory
// from the live state using the inventory client.
identifiers, err := invClient.GetClusterObjs(invInfo)
if err != nil {
return nil, err
}

printData := list.PrintData{}
// initialize maps in printData
printData.InvNameMap = make(map[object.ObjMetadata]string)
for _, obj := range identifiers {
// check if the object is under one of the targeted namespaces
if _, ok := r.namespaceSet[obj.Namespace]; ok || len(r.namespaceSet) == 0 {
// add to the map for future reference
printData.InvNameMap[obj] = inv.Name
// append to identifiers
printData.Identifiers = append(printData.Identifiers, obj)
}
}
return &printData, nil
}

// Retrieve a list of inventory object from the cluster
// Wrap it to become a map mapping from string to ObjMetadataSet
// Refer to the backbone of GetClusterObjs function in inventory package
func (r *Runner) listInvFromCluster() (*list.PrintData, error) {
// Create an emtpy PrintData object
printData := list.PrintData{}
// Launch a dynamic client
dc, err := r.factory.DynamicClient()
if err != nil {
return nil, err
}

// Launch a mapper
mapper, err := r.factory.ToRESTMapper()
if err != nil {
return nil, err
}

// Define the mapping
mapping, err := mapper.RESTMapping(live.ResourceGroupGVK.GroupKind(), live.ResourceGroupGVK.Version)
if err != nil {
return nil, err
}

// retrieve the list from the cluster
clusterInvs, err := dc.Resource(mapping.Resource).List(r.ctx, metav1.ListOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return nil, err
}
if apierrors.IsNotFound(err) {
return &printData, nil
}

// wrapping
// initialize maps in printData
printData.InvNameMap = make(map[object.ObjMetadata]string)

for _, inv := range clusterInvs.Items {
invName := inv.GetName()
// Check if there are targeted inventory names and include the current inventory name
if _, ok := r.inventoryNameSet[invName]; !ok && len(r.inventoryNameSet) != 0 {
continue
}

// Get wrapped object
wrappedInvObj := live.WrapInventoryObj(&inv)
wrappedInvObjSlice, err := wrappedInvObj.Load()
if err != nil {
return nil, err
}

// Filter objects
for _, obj := range wrappedInvObjSlice {
// check if the object is under one of the targeted namespaces
if _, ok := r.namespaceSet[obj.Namespace]; ok || len(r.namespaceSet) == 0 {
// add to the map for future reference
printData.InvNameMap[obj] = invName
// append to identifiers
printData.Identifiers = append(printData.Identifiers, obj)
}
}
}
return &printData, nil
}

// runE implements the logic of the command and will delegate to the
// poller to compute status for each of the resources. One of the printer
// implementations takes care of printing the output.
func (r *Runner) runE(c *cobra.Command, args []string) error {
pr := printer.FromContextOrDie(r.ctx)

var printData *list.PrintData
var err error
if r.invType == Remote {
if len(args) != 0 {
fmt.Printf("%c[%dm", common.ESC, common.YELLOW)
fmt.Println("Warning: Path is assigned while list flag is enabled, ignore the path")
fmt.Printf("%c[%dm", common.ESC, common.RESET)
}
printData, err = r.listInvFromCluster()
} else {
printData, err = r.loadInvFromDisk(c, args)
}
if err != nil {
return err
}

// Exit here if the inventory is empty.
if len(identifiers) == 0 {
if len(printData.Identifiers) == 0 {
pr.Printf("no resources found in the inventory\n")
return nil
}

statusPoller, err := r.pollerFactoryFunc(r.factory)
if err != nil {
return err
// If status flag is assigned, include it to printData
if len(r.statusSet) != 0 {
printData.StatusSet = r.statusSet
}

// Fetch a printer implementation based on the desired output format as
// specified in the output flag.
printer, err := statusprinters.CreatePrinter(r.output, genericclioptions.IOStreams{
Out: pr.OutStream(),
ErrOut: pr.ErrStream(),
})
statusPoller, err := r.pollerFactoryFunc(r.factory)
if err != nil {
return errors.WrapPrefix(err, "error creating printer", 1)
return err
}

// If the user has specified a timeout, we create a context with timeout,
// otherwise we create a context with cancel.
var ctx context.Context
Expand All @@ -192,19 +340,30 @@ func (r *Runner) runE(c *cobra.Command, args []string) error {
default:
return fmt.Errorf("unknown value for pollUntil: %q", r.pollUntil)
}
// Fetch a printer implementation based on the desired output format as
// specified in the output flag.

statusPrinter, err := statusprinters.CreatePrinter(r.output, genericclioptions.IOStreams{
Out: pr.OutStream(),
ErrOut: pr.ErrStream(),
}, printData)

if err != nil {
return errors.WrapPrefix(err, "error creating printer", 1)
}

eventChannel := statusPoller.Poll(ctx, identifiers, polling.PollOptions{
eventChannel := statusPoller.Poll(ctx, printData.Identifiers, polling.PollOptions{
PollInterval: r.period,
})

return printer.Print(eventChannel, identifiers, cancelFunc)
return statusPrinter.Print(eventChannel, printData.Identifiers, cancelFunc)
}

// desiredStatusNotifierFunc returns an Observer function for the
// ResourceStatusCollector that will cancel the context (using the cancelFunc)
// when all resources have reached the desired status.
func desiredStatusNotifierFunc(cancelFunc context.CancelFunc,
desired kstatus.Status) collector.ObserverFunc {
desired kstatus.Status) collector.ObserverFunc {
return func(rsc *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
for _, rs := range rsc.ResourceStatuses {
Expand Down
Loading