Refactor GetForInstance so it doesn't depends on storage

This commit is contained in:
Marcos Lilljedahl
2017-08-11 18:16:14 -03:00
parent 1263f36bc8
commit fb1e50deaf
12 changed files with 52 additions and 42 deletions

View File

@@ -1,6 +1,8 @@
package docker
import "github.com/play-with-docker/play-with-docker/pwd/types"
type FactoryApi interface {
GetForSession(sessionId string) (DockerApi, error)
GetForInstance(sessionId, instanceName string) (DockerApi, error)
GetForInstance(instance *types.Instance) (DockerApi, error)
}

View File

@@ -1,6 +1,9 @@
package docker
import "github.com/stretchr/testify/mock"
import (
"github.com/play-with-docker/play-with-docker/pwd/types"
"github.com/stretchr/testify/mock"
)
type FactoryMock struct {
mock.Mock
@@ -11,7 +14,7 @@ func (m *FactoryMock) GetForSession(sessionId string) (DockerApi, error) {
return args.Get(0).(DockerApi), args.Error(1)
}
func (m *FactoryMock) GetForInstance(sessionId, instanceName string) (DockerApi, error) {
args := m.Called(sessionId, instanceName)
func (m *FactoryMock) GetForInstance(instance *types.Instance) (DockerApi, error) {
args := m.Called(instance)
return args.Get(0).(DockerApi), args.Error(1)
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/docker/docker/api"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
"github.com/play-with-docker/play-with-docker/pwd/types"
"github.com/play-with-docker/play-with-docker/router"
"github.com/play-with-docker/play-with-docker/storage"
)
@@ -52,8 +53,8 @@ func (f *localCachedFactory) GetForSession(sessionId string) (DockerApi, error)
return f.sessionClient, nil
}
func (f *localCachedFactory) GetForInstance(sessionId, instanceName string) (DockerApi, error) {
key := sessionId + instanceName
func (f *localCachedFactory) GetForInstance(instance *types.Instance) (DockerApi, error) {
key := instance.SessionId + instance.Name
f.irw.Lock()
c, found := f.instanceClients[key]
@@ -71,10 +72,6 @@ func (f *localCachedFactory) GetForInstance(sessionId, instanceName string) (Doc
return c.client, nil
}
instance, err := f.storage.InstanceGet(sessionId, instanceName)
if err != nil {
return nil, err
}
// Need to create client to the DinD docker daemon
// We check if the client needs to use TLS
var tlsConfig *tls.Config

View File

@@ -50,6 +50,7 @@ func (d *windows) InstanceNew(session *types.Session, conf types.InstanceConfig)
conf.ImageName = config.GetSSHImage()
winfo, err := d.getWindowsInstanceInfo(session.Id)
if err != nil {
return nil, err
}
@@ -82,20 +83,21 @@ func (d *windows) InstanceNew(session *types.Session, conf types.InstanceConfig)
dockerClient, err := d.factory.GetForSession(session.Id)
if err != nil {
d.releaseInstance(session.Id, winfo.id)
return nil, err
}
_, err = dockerClient.CreateContainer(opts)
if err != nil {
d.releaseInstance(session.Id, winfo.id)
return nil, err
}
instance := &types.Instance{}
instance.Image = opts.Image
instance.IP = winfo.privateIP
instance.IP = winfo.publicIP
instance.SessionId = session.Id
instance.Name = containerName
instance.WindowsId = winfo.id
instance.Hostname = conf.Hostname
instance.Cert = conf.Cert
instance.Key = conf.Key
instance.Type = conf.Type
@@ -108,6 +110,18 @@ func (d *windows) InstanceNew(session *types.Session, conf types.InstanceConfig)
// For now this condition holds through. In the future we might need a more complex logic.
instance.IsDockerHost = opts.Privileged
if cli, err := d.factory.GetForInstance(instance); err != nil {
d.InstanceDelete(session, instance)
return nil, err
} else {
info, err := cli.GetDaemonInfo()
if err != nil {
d.InstanceDelete(session, instance)
return nil, err
}
instance.Hostname = info.Name
}
return instance, nil
}
@@ -122,14 +136,12 @@ func (d *windows) InstanceDelete(session *types.Session, instance *types.Instanc
return err
}
err = d.storage.InstanceDeleteWindows(session.Id, instance.WindowsId)
if err != nil {
return err
}
// TODO trigger deletion in AWS
return d.releaseInstance(session.Id, instance.WindowsId)
}
return nil
func (d *windows) releaseInstance(sessionId, instanceId string) error {
return d.storage.InstanceDeleteWindows(sessionId, instanceId)
}
func (d *windows) InstanceResizeTerminal(instance *types.Instance, rows, cols uint) error {
@@ -198,6 +210,7 @@ func (d *windows) getWindowsInstanceInfo(sessionId string) (*instanceInfo, error
})
if err != nil {
// TODO retry x times and free the instance that was picked?
d.releaseInstance(sessionId, avInstanceId)
return nil, err
}
@@ -217,7 +230,6 @@ func (d *windows) getWindowsInstanceInfo(sessionId string) (*instanceInfo, error
// select free instance and lock it into db.
// additionally check if ASG needs to be resized
func (d *windows) pickFreeInstance(sessionId string, availInstances, assignedInstances []string) string {
for _, av := range availInstances {
found := false
for _, as := range assignedInstances {
@@ -225,11 +237,9 @@ func (d *windows) pickFreeInstance(sessionId string, availInstances, assignedIns
found = true
break
}
}
if !found {
fmt.Println("ABOUT TO PERSIST", av)
err := d.storage.InstanceCreateWindows(&types.WindowsInstance{SessionId: sessionId, ID: av})
if err != nil {
// TODO either storage error or instance is already assigned (race condition)
@@ -237,8 +247,6 @@ func (d *windows) pickFreeInstance(sessionId string, availInstances, assignedIns
return av
}
}
// all availalbe instances are assigned
return ""
}

View File

@@ -259,7 +259,7 @@ func (p *pwd) SessionSetup(session *types.Session, conf SessionSetupConf) error
if err != nil {
return err
}
dockerClient, err := p.dockerFactory.GetForInstance(session.Id, i.Name)
dockerClient, err := p.dockerFactory.GetForInstance(i)
if err != nil {
return err
}
@@ -293,7 +293,7 @@ func (p *pwd) SessionSetup(session *types.Session, conf SessionSetupConf) error
if firstSwarmManager != nil {
if c.IsSwarmManager {
dockerClient, err := p.dockerFactory.GetForInstance(session.Id, i.Name)
dockerClient, err := p.dockerFactory.GetForInstance(i)
if err != nil {
log.Println(err)
return
@@ -307,7 +307,7 @@ func (p *pwd) SessionSetup(session *types.Session, conf SessionSetupConf) error
}
}
if c.IsSwarmWorker {
dockerClient, err := p.dockerFactory.GetForInstance(session.Id, i.Name)
dockerClient, err := p.dockerFactory.GetForInstance(i)
if err != nil {
log.Println(err)
return

View File

@@ -84,22 +84,22 @@ func TestSessionSetup(t *testing.T) {
_s.On("InstanceCount").Return(0, nil)
_d.On("CreateContainer", docker.CreateContainerOpts{Image: "franela/dind", SessionId: "aaaabbbbcccc", PwdIpAddress: "10.0.0.1", ContainerName: "aaaabbbb_manager1", Hostname: "manager1", Privileged: true, HostFQDN: "localhost"}).Return("10.0.0.2", nil)
_f.On("GetForInstance", "aaaabbbbcccc", "aaaabbbb_manager1").Return(_d, nil)
_f.On("GetForInstance", mock.AnythingOfType("*types.Instance")).Return(_d, nil)
_d.On("SwarmInit").Return(&docker.SwarmTokens{Manager: "managerToken", Worker: "workerToken"}, nil)
_e.M.On("Emit", event.INSTANCE_NEW, "aaaabbbbcccc", []interface{}{"aaaabbbb_manager1", "10.0.0.2", "manager1", "ip10-0-0-2-aaaabbbbcccc"}).Return()
_d.On("CreateContainer", docker.CreateContainerOpts{Image: "franela/dind", SessionId: "aaaabbbbcccc", PwdIpAddress: "10.0.0.1", ContainerName: "aaaabbbb_manager2", Hostname: "manager2", Privileged: true}).Return("10.0.0.3", nil)
_f.On("GetForInstance", "aaaabbbbcccc", "aaaabbbb_manager2").Return(_d, nil)
_f.On("GetForInstance", mock.AnythingOfType("*types.Instance")).Return(_d, nil)
_d.On("SwarmJoin", "10.0.0.2:2377", "managerToken").Return(nil)
_e.M.On("Emit", event.INSTANCE_NEW, "aaaabbbbcccc", []interface{}{"aaaabbbb_manager2", "10.0.0.3", "manager2", "ip10-0-0-3-aaaabbbbcccc"}).Return()
_d.On("CreateContainer", docker.CreateContainerOpts{Image: "franela/dind:overlay2-dev", SessionId: "aaaabbbbcccc", PwdIpAddress: "10.0.0.1", ContainerName: "aaaabbbb_manager3", Hostname: "manager3", Privileged: true}).Return("10.0.0.4", nil)
_f.On("GetForInstance", "aaaabbbbcccc", "aaaabbbb_manager3").Return(_d, nil)
_f.On("GetForInstance", mock.AnythingOfType("*types.Instance")).Return(_d, nil)
_d.On("SwarmJoin", "10.0.0.2:2377", "managerToken").Return(nil)
_e.M.On("Emit", event.INSTANCE_NEW, "aaaabbbbcccc", []interface{}{"aaaabbbb_manager3", "10.0.0.4", "manager3", "ip10-0-0-4-aaaabbbbcccc"}).Return()
_d.On("CreateContainer", docker.CreateContainerOpts{Image: "franela/dind", SessionId: "aaaabbbbcccc", PwdIpAddress: "10.0.0.1", ContainerName: "aaaabbbb_worker1", Hostname: "worker1", Privileged: true}).Return("10.0.0.5", nil)
_f.On("GetForInstance", "aaaabbbbcccc", "aaaabbbb_worker1").Return(_d, nil)
_f.On("GetForInstance", mock.AnythingOfType("*types.Instance")).Return(_d, nil)
_d.On("SwarmJoin", "10.0.0.2:2377", "workerToken").Return(nil)
_e.M.On("Emit", event.INSTANCE_NEW, "aaaabbbbcccc", []interface{}{"aaaabbbb_worker1", "10.0.0.5", "worker1", "ip10-0-0-5-aaaabbbbcccc"}).Return()

View File

@@ -30,7 +30,7 @@ func (t *checkPorts) Name() string {
}
func (t *checkPorts) Run(ctx context.Context, instance *types.Instance) error {
dockerClient, err := t.factory.GetForInstance(instance.SessionId, instance.Name)
dockerClient, err := t.factory.GetForInstance(instance)
if err != nil {
log.Println(err)
return err

View File

@@ -26,16 +26,16 @@ func TestCheckPorts_Run(t *testing.T) {
e := &event.Mock{}
f := &docker.FactoryMock{}
d.On("GetPorts").Return([]uint16{8080, 9090}, nil)
f.On("GetForInstance", "aaaabbbbcccc", "aaaabbbb_node1").Return(d, nil)
e.M.On("Emit", CheckPortsEvent, "aaaabbbbcccc", []interface{}{DockerPorts{Instance: "aaaabbbb_node1", Ports: []int{8080, 9090}}}).Return()
i := &types.Instance{
IP: "10.0.0.1",
Name: "aaaabbbb_node1",
SessionId: "aaaabbbbcccc",
}
d.On("GetPorts").Return([]uint16{8080, 9090}, nil)
f.On("GetForInstance", i).Return(d, nil)
e.M.On("Emit", CheckPortsEvent, "aaaabbbbcccc", []interface{}{DockerPorts{Instance: "aaaabbbb_node1", Ports: []int{8080, 9090}}}).Return()
task := NewCheckPorts(e, f)
ctx := context.Background()

View File

@@ -32,7 +32,7 @@ func (t *checkSwarmPorts) Name() string {
}
func (t *checkSwarmPorts) Run(ctx context.Context, instance *types.Instance) error {
dockerClient, err := t.factory.GetForInstance(instance.SessionId, instance.Name)
dockerClient, err := t.factory.GetForInstance(instance)
if err != nil {
log.Println(err)
return err

View File

@@ -40,7 +40,7 @@ func TestCheckSwarmPorts_RunWhenManager(t *testing.T) {
},
}
f.On("GetForInstance", "aaaabbbbcccc", "aaaabbbb_node1").Return(d, nil)
f.On("GetForInstance", i).Return(d, nil)
d.On("GetDaemonInfo").Return(info, nil)
d.On("GetSwarmPorts").Return([]string{"node1", "node2"}, []uint16{8080, 9090}, nil)
e.M.On("Emit", CheckSwarmPortsEvent, "aaaabbbbcccc", []interface{}{DockerSwarmPorts{Manager: i.Name, Instances: []string{i.Name, "aaaabbbb_node2"}, Ports: []int{8080, 9090}}}).Return()

View File

@@ -32,7 +32,7 @@ func (t *checkSwarmStatus) Name() string {
}
func (t *checkSwarmStatus) Run(ctx context.Context, instance *types.Instance) error {
dockerClient, err := t.factory.GetForInstance(instance.SessionId, instance.Name)
dockerClient, err := t.factory.GetForInstance(instance)
if err != nil {
log.Println(err)
return err

View File

@@ -39,7 +39,7 @@ func TestCheckSwarmStatus_RunWhenInactive(t *testing.T) {
},
}
f.On("GetForInstance", "aaabbbccc", "node1").Return(d, nil)
f.On("GetForInstance", i).Return(d, nil)
d.On("GetDaemonInfo").Return(infoInactive, nil)
e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{DockerSwarmStatus{IsManager: false, IsWorker: false, Instance: "node1"}}).Return()
@@ -70,7 +70,7 @@ func TestCheckSwarmStatus_RunWhenLocked(t *testing.T) {
},
}
f.On("GetForInstance", "aaabbbccc", "node1").Return(d, nil)
f.On("GetForInstance", i).Return(d, nil)
d.On("GetDaemonInfo").Return(infoLocked, nil)
e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{DockerSwarmStatus{IsManager: false, IsWorker: false, Instance: "node1"}}).Return()
@@ -102,7 +102,7 @@ func TestCheckSwarmStatus_RunWhenManager(t *testing.T) {
},
}
f.On("GetForInstance", "aaabbbccc", "node1").Return(d, nil)
f.On("GetForInstance", i).Return(d, nil)
d.On("GetDaemonInfo").Return(infoLocked, nil)
e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{DockerSwarmStatus{IsManager: true, IsWorker: false, Instance: "node1"}}).Return()
@@ -134,7 +134,7 @@ func TestCheckSwarmStatus_RunWhenWorker(t *testing.T) {
},
}
f.On("GetForInstance", "aaabbbccc", "node1").Return(d, nil)
f.On("GetForInstance", i).Return(d, nil)
d.On("GetDaemonInfo").Return(infoLocked, nil)
e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{DockerSwarmStatus{IsManager: false, IsWorker: true, Instance: "node1"}}).Return()