Parallel implementation of the setup, makes each around 3 times faster
This commit is contained in:
100
pwd/session.go
100
pwd/session.go
@@ -225,18 +225,17 @@ func (p *pwd) SessionSetup(session *Session, conf SessionSetupConf) error {
|
|||||||
var tokens *docker.SwarmTokens = nil
|
var tokens *docker.SwarmTokens = nil
|
||||||
var firstSwarmManager *Instance = nil
|
var firstSwarmManager *Instance = nil
|
||||||
|
|
||||||
// First create all instances and record who is a swarm manager and who is a swarm worker
|
// first look for a swarm manager and create it
|
||||||
for _, conf := range conf.Instances {
|
for _, conf := range conf.Instances {
|
||||||
instanceConf := InstanceConfig{
|
if conf.IsSwarmManager {
|
||||||
ImageName: conf.Image,
|
instanceConf := InstanceConfig{
|
||||||
Hostname: conf.Hostname,
|
ImageName: conf.Image,
|
||||||
}
|
Hostname: conf.Hostname,
|
||||||
i, err := p.InstanceNew(session, instanceConf)
|
}
|
||||||
if err != nil {
|
i, err := p.InstanceNew(session, instanceConf)
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
if conf.IsSwarmManager || conf.IsSwarmWorker {
|
}
|
||||||
// check if we have connection to the daemon, if not, create it
|
|
||||||
if i.docker == nil {
|
if i.docker == nil {
|
||||||
dock, err := p.docker.New(i.IP, i.Cert, i.Key)
|
dock, err := p.docker.New(i.IP, i.Cert, i.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -244,34 +243,69 @@ func (p *pwd) SessionSetup(session *Session, conf SessionSetupConf) error {
|
|||||||
}
|
}
|
||||||
i.docker = dock
|
i.docker = dock
|
||||||
}
|
}
|
||||||
}
|
tkns, err := i.docker.SwarmInit()
|
||||||
if conf.IsSwarmManager {
|
|
||||||
// this is a swarm manager
|
|
||||||
// if no swarm cluster has been initiated, then initiate it!
|
|
||||||
if firstSwarmManager == nil {
|
|
||||||
tkns, err := i.docker.SwarmInit()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tokens = tkns
|
|
||||||
firstSwarmManager = i
|
|
||||||
} else {
|
|
||||||
// cluster has already been initiated, join as manager
|
|
||||||
err := i.docker.SwarmJoin(fmt.Sprintf("%s:2377", firstSwarmManager.IP), tokens.Manager)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if conf.IsSwarmWorker {
|
|
||||||
// this is a swarm worker
|
|
||||||
err := i.docker.SwarmJoin(fmt.Sprintf("%s:2377", firstSwarmManager.IP), tokens.Worker)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
tokens = tkns
|
||||||
|
firstSwarmManager = i
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// now create the rest in parallel
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
for _, c := range conf.Instances {
|
||||||
|
if firstSwarmManager != nil && c.Hostname != firstSwarmManager.Hostname {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(c SessionSetupInstanceConf) {
|
||||||
|
defer wg.Done()
|
||||||
|
instanceConf := InstanceConfig{
|
||||||
|
ImageName: c.Image,
|
||||||
|
Hostname: c.Hostname,
|
||||||
|
}
|
||||||
|
i, err := p.InstanceNew(session, instanceConf)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.IsSwarmManager || c.IsSwarmWorker {
|
||||||
|
// check if we have connection to the daemon, if not, create it
|
||||||
|
if i.docker == nil {
|
||||||
|
dock, err := p.docker.New(i.IP, i.Cert, i.Key)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i.docker = dock
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstSwarmManager != nil {
|
||||||
|
if c.IsSwarmManager {
|
||||||
|
// this is a swarm manager
|
||||||
|
// cluster has already been initiated, join as manager
|
||||||
|
err := i.docker.SwarmJoin(fmt.Sprintf("%s:2377", firstSwarmManager.IP), tokens.Manager)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.IsSwarmWorker {
|
||||||
|
// this is a swarm worker
|
||||||
|
err := i.docker.SwarmJoin(fmt.Sprintf("%s:2377", firstSwarmManager.IP), tokens.Worker)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -84,8 +84,6 @@ func TestSessionNew(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSessionSetup(t *testing.T) {
|
func TestSessionSetup(t *testing.T) {
|
||||||
ips := []string{"10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4", "10.0.0.5"}
|
|
||||||
nextIp := 0
|
|
||||||
swarmInitOnMaster1 := false
|
swarmInitOnMaster1 := false
|
||||||
manager2JoinedHasManager := false
|
manager2JoinedHasManager := false
|
||||||
manager3JoinedHasManager := false
|
manager3JoinedHasManager := false
|
||||||
@@ -93,9 +91,20 @@ func TestSessionSetup(t *testing.T) {
|
|||||||
|
|
||||||
dock := &mockDocker{}
|
dock := &mockDocker{}
|
||||||
dock.createContainer = func(opts docker.CreateContainerOpts) (string, error) {
|
dock.createContainer = func(opts docker.CreateContainerOpts) (string, error) {
|
||||||
ip := ips[nextIp]
|
if opts.Hostname == "manager1" {
|
||||||
nextIp++
|
return "10.0.0.1", nil
|
||||||
return ip, nil
|
} else if opts.Hostname == "manager2" {
|
||||||
|
return "10.0.0.2", nil
|
||||||
|
} else if opts.Hostname == "manager3" {
|
||||||
|
return "10.0.0.3", nil
|
||||||
|
} else if opts.Hostname == "worker1" {
|
||||||
|
return "10.0.0.4", nil
|
||||||
|
} else if opts.Hostname == "other" {
|
||||||
|
return "10.0.0.5", nil
|
||||||
|
} else {
|
||||||
|
assert.Fail(t, "Should not have reached here")
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
}
|
}
|
||||||
dock.new = func(ip string, cert, key []byte) (docker.DockerApi, error) {
|
dock.new = func(ip string, cert, key []byte) (docker.DockerApi, error) {
|
||||||
if ip == "10.0.0.1" {
|
if ip == "10.0.0.1" {
|
||||||
|
|||||||
Reference in New Issue
Block a user