More fixes
This commit is contained in:
@@ -54,7 +54,7 @@ func (f *localCachedFactory) GetForInstance(sessionId, instanceName string) (Doc
|
||||
return c, nil
|
||||
}
|
||||
|
||||
instance, err := f.storage.InstanceFind(sessionId, instanceName)
|
||||
instance, err := f.storage.InstanceGet(sessionId, instanceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func (f *localCachedFactory) GetForInstance(sessionId, instanceName string) (Doc
|
||||
cli := &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
dc, err := client.NewClient(fmt.Sprintf("http://%s:443", instance.Session.Host), api.DefaultVersion, cli, map[string]string{"Host": router.EncodeHost(instance.SessionId, instance.IP, router.HostOpts{EncodedPort: 2375})})
|
||||
dc, err := client.NewClient("http://192.168.1.5:443", api.DefaultVersion, cli, map[string]string{"X-Forwarded-Host": router.EncodeHost(instance.SessionId, instance.IP, router.HostOpts{EncodedPort: 2375})})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not connect to DinD docker daemon", err)
|
||||
}
|
||||
@@ -90,12 +90,14 @@ func (f *localCachedFactory) GetForInstance(sessionId, instanceName string) (Doc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.instanceClients[sessionId+instance.Name] = NewDocker(dc)
|
||||
dockerClient := NewDocker(dc)
|
||||
f.instanceClients[sessionId+instance.Name] = dockerClient
|
||||
|
||||
return f.instanceClients[instance.Name], nil
|
||||
return dockerClient, nil
|
||||
}
|
||||
|
||||
func (f *localCachedFactory) check(c *client.Client) error {
|
||||
ok := false
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err := c.Ping(context.Background())
|
||||
if err != nil {
|
||||
@@ -107,8 +109,12 @@ func (f *localCachedFactory) check(c *client.Client) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("Connection to docker daemon was not established.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/play-with-docker/play-with-docker/event"
|
||||
"github.com/play-with-docker/play-with-docker/pwd"
|
||||
"github.com/play-with-docker/play-with-docker/scheduler"
|
||||
"github.com/play-with-docker/play-with-docker/scheduler/task"
|
||||
"github.com/play-with-docker/play-with-docker/storage"
|
||||
)
|
||||
|
||||
@@ -18,7 +19,6 @@ var e event.EventApi
|
||||
var ws *socketio.Server
|
||||
|
||||
func Bootstrap() {
|
||||
|
||||
s, err := storage.NewFileStorage(config.SessionsFile)
|
||||
e = event.NewLocalBroker()
|
||||
|
||||
@@ -29,7 +29,17 @@ func Bootstrap() {
|
||||
}
|
||||
core = pwd.NewPWD(f, e, s)
|
||||
|
||||
scheduler.NewScheduler(s, e, core)
|
||||
sch, err := scheduler.NewScheduler(s, e, core)
|
||||
if err != nil {
|
||||
log.Fatal("Error initializing the scheduler: ", err)
|
||||
}
|
||||
|
||||
sch.AddTask(task.NewCheckPorts(e, f))
|
||||
sch.AddTask(task.NewCheckSwarmPorts(e, f))
|
||||
sch.AddTask(task.NewCheckSwarmStatus(e, f))
|
||||
sch.AddTask(task.NewCollectStats(e, f))
|
||||
|
||||
sch.Start()
|
||||
}
|
||||
|
||||
func RegisterEvents(s *socketio.Server) {
|
||||
|
||||
@@ -63,7 +63,7 @@ func WS(so socketio.Socket) {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
ws.Emit("instance terminal out", instanceName, b)
|
||||
ws.Emit("instance terminal out", instanceName, string(b))
|
||||
}
|
||||
}(instance.Name, conn, ws)
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func (p *pwd) InstanceGet(session *types.Session, name string) *types.Instance {
|
||||
|
||||
func (p *pwd) InstanceFind(sessionId, ip string) *types.Instance {
|
||||
defer observeAction("InstanceFind", time.Now())
|
||||
i, err := p.storage.InstanceFind(sessionId, ip)
|
||||
i, err := p.storage.InstanceFindByIP(sessionId, ip)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -211,7 +211,8 @@ func (p *pwd) InstanceNew(session *types.Session, conf InstanceConfig) (*types.I
|
||||
instance.ServerKey = conf.ServerKey
|
||||
instance.CACert = conf.CACert
|
||||
instance.Session = session
|
||||
instance.Proxy = router.EncodeHost(session.Id, ip, router.HostOpts{})
|
||||
instance.ProxyHost = router.EncodeHost(session.Id, ip, router.HostOpts{})
|
||||
instance.SessionHost = session.Host
|
||||
// For now this condition holds through. In the future we might need a more complex logic.
|
||||
instance.IsDockerHost = opts.Privileged
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestInstanceNew(t *testing.T) {
|
||||
IsDockerHost: true,
|
||||
SessionId: session.Id,
|
||||
Session: session,
|
||||
Proxy: router.EncodeHost(session.Id, "10.0.0.1", router.HostOpts{}),
|
||||
ProxyHost: router.EncodeHost(session.Id, "10.0.0.1", router.HostOpts{}),
|
||||
}
|
||||
expectedContainerOpts := docker.CreateContainerOpts{
|
||||
Image: expectedInstance.Image,
|
||||
@@ -132,7 +132,7 @@ func TestInstanceNew_WithNotAllowedImage(t *testing.T) {
|
||||
SessionId: session.Id,
|
||||
IsDockerHost: false,
|
||||
Session: session,
|
||||
Proxy: router.EncodeHost(session.Id, "10.0.0.1", router.HostOpts{}),
|
||||
ProxyHost: router.EncodeHost(session.Id, "10.0.0.1", router.HostOpts{}),
|
||||
}
|
||||
expectedContainerOpts := docker.CreateContainerOpts{
|
||||
Image: expectedInstance.Image,
|
||||
@@ -193,7 +193,7 @@ func TestInstanceNew_WithCustomHostname(t *testing.T) {
|
||||
IsDockerHost: false,
|
||||
Session: session,
|
||||
SessionId: session.Id,
|
||||
Proxy: router.EncodeHost(session.Id, "10.0.0.1", router.HostOpts{}),
|
||||
ProxyHost: router.EncodeHost(session.Id, "10.0.0.1", router.HostOpts{}),
|
||||
}
|
||||
expectedContainerOpts := docker.CreateContainerOpts{
|
||||
Image: expectedInstance.Image,
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
import "context"
|
||||
|
||||
type Instance struct {
|
||||
Image string `json:"image" bson:"image"`
|
||||
@@ -17,8 +14,8 @@ type Instance struct {
|
||||
Key []byte `json:"key" bson:"key"`
|
||||
IsDockerHost bool `json:"is_docker_host" bson:"is_docker_host"`
|
||||
SessionId string `json:"session_id" bson:"session_id"`
|
||||
Proxy string `json:"proxy" bson:"proxy"`
|
||||
ProxyHost string `json:"proxy_host" bson:"proxy_host"`
|
||||
SessionHost string `json:"session_host" bson:"session_host"`
|
||||
Session *Session `json:"-" bson:"-"`
|
||||
ctx context.Context `json:"-" bson:"-"`
|
||||
rw sync.Mutex
|
||||
}
|
||||
|
||||
@@ -382,7 +382,11 @@ func (r *proxyRouter) handleConnection(c net.Conn) {
|
||||
// It is not http neither. So just close the connection.
|
||||
return
|
||||
}
|
||||
dstHost, err := r.director(req.Host)
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
dstHost, err := r.director(host)
|
||||
if err != nil {
|
||||
log.Printf("Error directing request: %v\n", err)
|
||||
return
|
||||
|
||||
@@ -123,8 +123,9 @@ func (s *scheduler) Start() {
|
||||
}
|
||||
|
||||
func (s *scheduler) register(session *types.Session) *scheduledSession {
|
||||
s.scheduledSessions[session.Id] = &scheduledSession{session: session}
|
||||
return s.scheduledSessions[session.Id]
|
||||
ss := &scheduledSession{session: session}
|
||||
s.scheduledSessions[session.Id] = ss
|
||||
return ss
|
||||
}
|
||||
|
||||
func (s *scheduler) cron(ctx context.Context, session *scheduledSession) {
|
||||
|
||||
@@ -48,7 +48,22 @@ func (store *storage) SessionPut(s *types.Session) error {
|
||||
return store.save()
|
||||
}
|
||||
|
||||
func (store *storage) InstanceFind(sessionId, ip string) (*types.Instance, error) {
|
||||
func (store *storage) InstanceGet(sessionId, name string) (*types.Instance, error) {
|
||||
store.rw.Lock()
|
||||
defer store.rw.Unlock()
|
||||
|
||||
s := store.db[sessionId]
|
||||
if s == nil {
|
||||
return nil, fmt.Errorf("%s", notFound)
|
||||
}
|
||||
i := s.Instances[name]
|
||||
if i == nil {
|
||||
return nil, fmt.Errorf("%s", notFound)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (store *storage) InstanceFindByIP(sessionId, ip string) (*types.Instance, error) {
|
||||
store.rw.Lock()
|
||||
defer store.rw.Unlock()
|
||||
|
||||
@@ -116,19 +131,6 @@ func (store *storage) InstanceCount() (int, error) {
|
||||
return ins, nil
|
||||
}
|
||||
|
||||
func (store *storage) ClientCount() (int, error) {
|
||||
store.rw.Lock()
|
||||
defer store.rw.Unlock()
|
||||
|
||||
var cli int
|
||||
|
||||
for _, s := range store.db {
|
||||
cli += len(s.Clients)
|
||||
}
|
||||
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func (store *storage) SessionDelete(sessionId string) error {
|
||||
store.rw.Lock()
|
||||
defer store.rw.Unlock()
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestSessionGetAll(t *testing.T) {
|
||||
assert.Equal(t, s2, loadedSessions[s2.Id])
|
||||
}
|
||||
|
||||
func TestInstanceFind(t *testing.T) {
|
||||
func TestInstanceFindByIP(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "pwd")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -124,27 +124,50 @@ func TestInstanceFind(t *testing.T) {
|
||||
err = storage.SessionPut(s2)
|
||||
assert.Nil(t, err)
|
||||
|
||||
foundInstance, err := storage.InstanceFind("session1", "10.0.0.1")
|
||||
foundInstance, err := storage.InstanceFindByIP("session1", "10.0.0.1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, i1, foundInstance)
|
||||
|
||||
foundInstance, err = storage.InstanceFind("session2", "10.1.0.1")
|
||||
foundInstance, err = storage.InstanceFindByIP("session2", "10.1.0.1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, i2, foundInstance)
|
||||
|
||||
foundInstance, err = storage.InstanceFind("session3", "10.1.0.1")
|
||||
foundInstance, err = storage.InstanceFindByIP("session3", "10.1.0.1")
|
||||
assert.True(t, NotFound(err))
|
||||
assert.Nil(t, foundInstance)
|
||||
|
||||
foundInstance, err = storage.InstanceFind("session1", "10.1.0.1")
|
||||
foundInstance, err = storage.InstanceFindByIP("session1", "10.1.0.1")
|
||||
assert.True(t, NotFound(err))
|
||||
assert.Nil(t, foundInstance)
|
||||
|
||||
foundInstance, err = storage.InstanceFind("session1", "192.168.0.1")
|
||||
foundInstance, err = storage.InstanceFindByIP("session1", "192.168.0.1")
|
||||
assert.True(t, NotFound(err))
|
||||
assert.Nil(t, foundInstance)
|
||||
}
|
||||
|
||||
func TestInstanceGet(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "pwd")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
tmpfile.Close()
|
||||
os.Remove(tmpfile.Name())
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
storage, err := NewFileStorage(tmpfile.Name())
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
i1 := &types.Instance{Name: "i1", IP: "10.0.0.1"}
|
||||
s1 := &types.Session{Id: "session1", Instances: map[string]*types.Instance{"i1": i1}}
|
||||
err = storage.SessionPut(s1)
|
||||
assert.Nil(t, err)
|
||||
|
||||
foundInstance, err := storage.InstanceGet("session1", "i1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, i1, foundInstance)
|
||||
}
|
||||
|
||||
func TestInstanceCreate(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "pwd")
|
||||
if err != nil {
|
||||
|
||||
@@ -34,7 +34,12 @@ func (m *Mock) SessionGetAll() (map[string]*types.Session, error) {
|
||||
return args.Get(0).(map[string]*types.Session), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *Mock) InstanceFind(sessionId, ip string) (*types.Instance, error) {
|
||||
func (m *Mock) InstanceGet(sessionId, name string) (*types.Instance, error) {
|
||||
args := m.Called(sessionId, name)
|
||||
return args.Get(0).(*types.Instance), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *Mock) InstanceFindByIP(sessionId, ip string) (*types.Instance, error) {
|
||||
args := m.Called(sessionId, ip)
|
||||
return args.Get(0).(*types.Instance), args.Error(1)
|
||||
}
|
||||
|
||||
@@ -15,9 +15,9 @@ type StorageApi interface {
|
||||
SessionDelete(string) error
|
||||
SessionGetAll() (map[string]*types.Session, error)
|
||||
|
||||
InstanceFind(session, ip string) (*types.Instance, error)
|
||||
InstanceGet(sessionId, name string) (*types.Instance, error)
|
||||
InstanceFindByIP(session, ip string) (*types.Instance, error)
|
||||
InstanceCreate(sessionId string, instance *types.Instance) error
|
||||
InstanceDelete(sessionId, instanceName string) error
|
||||
|
||||
InstanceCount() (int, error)
|
||||
}
|
||||
|
||||
@@ -212,11 +212,33 @@
|
||||
$scope.connected = true;
|
||||
});
|
||||
|
||||
socket.on('instance stats', function(name, mem, cpu, isManager, ports) {
|
||||
$scope.idx[name].mem = mem;
|
||||
$scope.idx[name].cpu = cpu;
|
||||
$scope.idx[name].isManager = isManager;
|
||||
$scope.idx[name].ports = ports;
|
||||
socket.on('instance stats', function(stats) {
|
||||
$scope.idx[stats.instance].mem = stats.mem;
|
||||
$scope.idx[stats.instance].cpu = stats.cpu;
|
||||
$scope.$apply();
|
||||
});
|
||||
|
||||
socket.on('instance docker swarm status', function(status) {
|
||||
if (status.is_manager) {
|
||||
$scope.idx[status.instance].isManager = true
|
||||
} else if (status.is_worker) {
|
||||
$scope.idx[status.instance].isManager = false
|
||||
} else {
|
||||
$scope.idx[status.instance].isManager = null
|
||||
}
|
||||
$scope.$apply();
|
||||
});
|
||||
|
||||
socket.on('instance docker ports', function(status) {
|
||||
$scope.idx[status.instance].ports = status.ports;
|
||||
$scope.$apply();
|
||||
});
|
||||
|
||||
socket.on('instance docker swarm ports', function(status) {
|
||||
for(var i in status.instances) {
|
||||
var instance = status.instances[i];
|
||||
$scope.idx[instance].swarmPorts = status.ports;
|
||||
}
|
||||
$scope.$apply();
|
||||
});
|
||||
|
||||
|
||||
@@ -90,6 +90,11 @@
|
||||
<strong><a href="{{getProxyUrl(instance, $chip)}}" title="{{getProxyUrl(instance, $chip)}}" target="_blank">{{$chip}}</a></strong>
|
||||
</md-chip-template>
|
||||
</md-chips>
|
||||
<md-chips ng-model="instance.swarmPorts" name="port" readonly="true" md-removable="false">
|
||||
<md-chip-template>
|
||||
<strong><a href="{{getProxyUrl(instance, $chip)}}" title="{{getProxyUrl(instance, $chip)}}" target="_blank">{{$chip}}</a></strong>
|
||||
</md-chip-template>
|
||||
</md-chips>
|
||||
</div>
|
||||
<div layout-gt-sm="row">
|
||||
<md-input-container class="md-block" flex-gt-sm>
|
||||
|
||||
Reference in New Issue
Block a user