Initial k8s support in libplayground (#216)
* Initial k8s support in libplayground * Make tasks never give up + custom event for k8s cluster status
This commit is contained in:
@@ -76,39 +76,30 @@ func (s *scheduler) processInstance(ctx context.Context, si *scheduledInstance)
|
||||
defer s.unscheduleInstance(si.instance)
|
||||
for {
|
||||
select {
|
||||
case <-si.ticker.C:
|
||||
// First check if instance still exists
|
||||
_, err := s.storage.InstanceGet(si.instance.Name)
|
||||
if err != nil {
|
||||
if storage.NotFound(err) {
|
||||
// Instance doesn't exists anymore. Unschedule.
|
||||
log.Printf("Instance %s doesn't exists in storage.\n", si.instance.Name)
|
||||
return
|
||||
}
|
||||
log.Printf("Error retrieving instance %s from storage. Got: %v\n", si.instance.Name, err)
|
||||
continue
|
||||
}
|
||||
failed := false
|
||||
for _, task := range s.tasks {
|
||||
err := task.Run(ctx, si.instance)
|
||||
if err != nil {
|
||||
failed = true
|
||||
log.Printf("Error running task %s on instance %s. Got: %v\n", task.Name(), si.instance.Name, err)
|
||||
// Since one task failed, we just assume something might be wrong with the instance, so we don't try to process the rest of the tasks.
|
||||
si.fails++
|
||||
if si.fails > 5 {
|
||||
log.Printf("Instance %s has failed to execute tasks too many times. Giving up.\n", si.instance.Name)
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !failed {
|
||||
si.fails = 0
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Printf("Processing tasks for instance %s has been canceled.\n", si.instance.Name)
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-si.ticker.C:
|
||||
// First check if instance still exists
|
||||
_, err := s.storage.InstanceGet(si.instance.Name)
|
||||
if err != nil {
|
||||
if storage.NotFound(err) {
|
||||
// Instance doesn't exists anymore. Unschedule.
|
||||
log.Printf("Instance %s doesn't exists in storage.\n", si.instance.Name)
|
||||
return
|
||||
}
|
||||
log.Printf("Error retrieving instance %s from storage. Got: %v\n", si.instance.Name, err)
|
||||
continue
|
||||
}
|
||||
for _, task := range s.tasks {
|
||||
err := task.Run(ctx, si.instance)
|
||||
if err != nil {
|
||||
log.Printf("Error running task %s on instance %s. Got: %v\n", task.Name(), si.instance.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
77
scheduler/task/check_k8s_cluster_exposed_ports.go
Normal file
77
scheduler/task/check_k8s_cluster_exposed_ports.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/play-with-docker/play-with-docker/event"
|
||||
"github.com/play-with-docker/play-with-docker/k8s"
|
||||
"github.com/play-with-docker/play-with-docker/pwd/types"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type checkK8sClusterExposedPortsTask struct {
|
||||
event event.EventApi
|
||||
factory k8s.FactoryApi
|
||||
}
|
||||
|
||||
var CheckK8sClusterExpoedPortsEvent event.EventType
|
||||
|
||||
func init() {
|
||||
CheckK8sClusterExpoedPortsEvent = event.EventType("instance k8s cluster ports")
|
||||
}
|
||||
|
||||
func (t *checkK8sClusterExposedPortsTask) Name() string {
|
||||
return "CheckK8sClusterPorts"
|
||||
}
|
||||
|
||||
func NewCheckK8sClusterExposedPorts(e event.EventApi, f k8s.FactoryApi) *checkK8sClusterExposedPortsTask {
|
||||
return &checkK8sClusterExposedPortsTask{event: e, factory: f}
|
||||
}
|
||||
|
||||
func (c checkK8sClusterExposedPortsTask) Run(ctx context.Context, i *types.Instance) error {
|
||||
|
||||
kc, err := c.factory.GetKubeletForInstance(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isManager, err := kc.IsManager(); err != nil {
|
||||
log.Println(err)
|
||||
return err
|
||||
} else if !isManager {
|
||||
return nil
|
||||
}
|
||||
|
||||
k8s, err := c.factory.GetForInstance(i)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := k8s.CoreV1().Services("").List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exposedPorts := []int{}
|
||||
|
||||
for _, svc := range list.Items {
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.NodePort > 0 {
|
||||
exposedPorts = append(exposedPorts, int(p.NodePort))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeList, err := k8s.CoreV1().Nodes().List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instances := []string{}
|
||||
for _, node := range nodeList.Items {
|
||||
instances = append(instances, node.Name)
|
||||
}
|
||||
|
||||
c.event.Emit(CheckSwarmPortsEvent, i.SessionId, ClusterPorts{Manager: i.Name, Instances: instances, Ports: exposedPorts})
|
||||
return nil
|
||||
}
|
||||
54
scheduler/task/check_k8s_cluster_status_task.go
Normal file
54
scheduler/task/check_k8s_cluster_status_task.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/play-with-docker/play-with-docker/event"
|
||||
"github.com/play-with-docker/play-with-docker/k8s"
|
||||
"github.com/play-with-docker/play-with-docker/pwd/types"
|
||||
)
|
||||
|
||||
type checkK8sClusterStatusTask struct {
|
||||
event event.EventApi
|
||||
factory k8s.FactoryApi
|
||||
}
|
||||
|
||||
var CheckK8sStatusEvent event.EventType
|
||||
|
||||
func init() {
|
||||
CheckK8sStatusEvent = event.EventType("instance k8s status")
|
||||
}
|
||||
|
||||
func NewCheckK8sClusterStatus(e event.EventApi, f k8s.FactoryApi) *checkK8sClusterStatusTask {
|
||||
return &checkK8sClusterStatusTask{event: e, factory: f}
|
||||
}
|
||||
|
||||
func (c *checkK8sClusterStatusTask) Name() string {
|
||||
return "CheckK8sClusterStatus"
|
||||
}
|
||||
|
||||
func (c checkK8sClusterStatusTask) Run(ctx context.Context, i *types.Instance) error {
|
||||
status := ClusterStatus{Instance: i.Name}
|
||||
|
||||
kc, err := c.factory.GetKubeletForInstance(i)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
c.event.Emit(CheckSwarmStatusEvent, i.SessionId, status)
|
||||
return err
|
||||
}
|
||||
|
||||
if isManager, err := kc.IsManager(); err != nil {
|
||||
c.event.Emit(CheckSwarmStatusEvent, i.SessionId, status)
|
||||
return err
|
||||
} else if !isManager {
|
||||
// Not a manager node, nothing to do for this task
|
||||
status.IsWorker = true
|
||||
} else {
|
||||
status.IsManager = true
|
||||
}
|
||||
|
||||
c.event.Emit(CheckK8sStatusEvent, i.SessionId, status)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -9,12 +9,6 @@ import (
|
||||
"github.com/play-with-docker/play-with-docker/pwd/types"
|
||||
)
|
||||
|
||||
type DockerSwarmPorts struct {
|
||||
Manager string `json:"manager"`
|
||||
Instances []string `json:"instances"`
|
||||
Ports []int `json:"ports"`
|
||||
}
|
||||
|
||||
type checkSwarmPorts struct {
|
||||
event event.EventApi
|
||||
factory docker.FactoryApi
|
||||
@@ -57,7 +51,7 @@ func (t *checkSwarmPorts) Run(ctx context.Context, instance *types.Instance) err
|
||||
ports[i] = int(port)
|
||||
}
|
||||
|
||||
t.event.Emit(CheckSwarmPortsEvent, instance.SessionId, DockerSwarmPorts{Manager: instance.Name, Instances: hosts, Ports: ports})
|
||||
t.event.Emit(CheckSwarmPortsEvent, instance.SessionId, ClusterPorts{Manager: instance.Name, Instances: hosts, Ports: ports})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,12 +10,6 @@ import (
|
||||
"github.com/play-with-docker/play-with-docker/pwd/types"
|
||||
)
|
||||
|
||||
type DockerSwarmStatus struct {
|
||||
IsManager bool `json:"is_manager"`
|
||||
IsWorker bool `json:"is_worker"`
|
||||
Instance string `json:"instance"`
|
||||
}
|
||||
|
||||
type checkSwarmStatus struct {
|
||||
event event.EventApi
|
||||
factory docker.FactoryApi
|
||||
@@ -53,8 +47,8 @@ func NewCheckSwarmStatus(e event.EventApi, f docker.FactoryApi) *checkSwarmStatu
|
||||
return &checkSwarmStatus{event: e, factory: f}
|
||||
}
|
||||
|
||||
func getDockerSwarmStatus(ctx context.Context, client docker.DockerApi) (DockerSwarmStatus, error) {
|
||||
status := DockerSwarmStatus{}
|
||||
func getDockerSwarmStatus(ctx context.Context, client docker.DockerApi) (ClusterStatus, error) {
|
||||
status := ClusterStatus{}
|
||||
info, err := client.GetDaemonInfo()
|
||||
if err != nil {
|
||||
return status, err
|
||||
|
||||
13
scheduler/task/types.go
Normal file
13
scheduler/task/types.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package task
|
||||
|
||||
type ClusterStatus struct {
|
||||
IsManager bool `json:"is_manager"`
|
||||
IsWorker bool `json:"is_worker"`
|
||||
Instance string `json:"instance"`
|
||||
}
|
||||
|
||||
type ClusterPorts struct {
|
||||
Manager string `json:"manager"`
|
||||
Instances []string `json:"instances"`
|
||||
Ports []int `json:"ports"`
|
||||
}
|
||||
Reference in New Issue
Block a user