summaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorLeonardo Bishop <me@leonardobishop.net>2026-01-07 23:39:53 +0000
committerLeonardo Bishop <me@leonardobishop.net>2026-01-07 23:39:53 +0000
commit03cd6bdfbd473dba3f3dc50a1b15e389aac5bc70 (patch)
tree5fea2b1840e298aaab953add749fb9226bd4a710 /pkg
Initial commit
Diffstat (limited to 'pkg')
-rw-r--r--pkg/deployer/client.go114
-rw-r--r--pkg/deployer/config.go124
-rw-r--r--pkg/deployer/constants.go17
-rw-r--r--pkg/deployer/deploy.go245
-rw-r--r--pkg/deployer/instance.go163
-rw-r--r--pkg/janitor/janitor.go26
-rw-r--r--pkg/registry/list.go62
-rw-r--r--pkg/session/session.go85
8 files changed, 836 insertions, 0 deletions
diff --git a/pkg/deployer/client.go b/pkg/deployer/client.go
new file mode 100644
index 0000000..b240857
--- /dev/null
+++ b/pkg/deployer/client.go
@@ -0,0 +1,114 @@
+package deployer
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "sync"
+
+ "github.com/google/uuid"
+ "github.com/moby/moby/api/types/registry"
+ "github.com/moby/moby/client"
+)
+
+type DockerDeployer struct {
+ client *client.Client
+ deployJobs map[string]*DeployJob
+ deployJobsMutex sync.RWMutex
+
+ containerCreateMutex sync.Mutex
+
+ instancerDomain string
+
+ registryURL string
+ registryUsername string
+ registryPassword string
+
+ proxyContainerName string
+ imagePrefix string
+}
+
+type DeployJob struct {
+ DeployChan chan DeployStatus
+
+ deployKey string
+ challenge string
+ team string
+}
+
+func New(registryURL, registryUsername, registryPassword, instancerDomain, imagePrefix, proxyContainerName string) (DockerDeployer, error) {
+ c, err := client.New(client.FromEnv)
+ if err != nil {
+ return DockerDeployer{}, fmt.Errorf("docker client error: %w", err)
+ }
+
+ return DockerDeployer{
+ client: c,
+ deployJobs: make(map[string]*DeployJob),
+
+ registryURL: registryURL,
+ registryUsername: registryUsername,
+ registryPassword: registryPassword,
+ instancerDomain: instancerDomain,
+ imagePrefix: imagePrefix,
+ proxyContainerName: proxyContainerName,
+ }, nil
+}
+
+func (d *DockerDeployer) GetJob(deployKey string) *DeployJob {
+ d.deployJobsMutex.RLock()
+ defer d.deployJobsMutex.RUnlock()
+
+ return d.deployJobs[deployKey]
+}
+
+func (d *DockerDeployer) registryLogin() string {
+ options := registry.AuthConfig{
+ ServerAddress: d.registryURL,
+ Username: d.registryUsername,
+ Password: d.registryPassword,
+ }
+ encodedJSON, err := json.Marshal(options)
+ if err != nil {
+ panic(err)
+ }
+
+ return base64.StdEncoding.EncodeToString(encodedJSON)
+}
+
+func (d *DockerDeployer) createDeployJob(challenge, team string) (string, *DeployJob) {
+ d.deployJobsMutex.Lock()
+ defer d.deployJobsMutex.Unlock()
+
+ deploymentKey := uuid.New().String()
+ deploymentChan := make(chan DeployStatus)
+
+ job := &DeployJob{
+ DeployChan: deploymentChan,
+
+ deployKey: deploymentKey,
+ challenge: challenge,
+ team: team,
+ }
+ d.deployJobs[deploymentKey] = job
+
+ return deploymentKey, job
+}
+
+func (d *DockerDeployer) writeDeployChannel(deploymentKey string, status DeployStatus) {
+ d.deployJobsMutex.RLock()
+ defer d.deployJobsMutex.RUnlock()
+
+ job := d.deployJobs[deploymentKey]
+ if job == nil || job.DeployChan == nil {
+ return
+ }
+
+ job.DeployChan <- status
+
+ if status.Status == statusSuccess || status.Status == statusError {
+ job.DeployChan <- DeployStatus{Status: "done", Message: "done"}
+ close(job.DeployChan)
+ //TODO cleanup
+ }
+}
diff --git a/pkg/deployer/config.go b/pkg/deployer/config.go
new file mode 100644
index 0000000..f8c2795
--- /dev/null
+++ b/pkg/deployer/config.go
@@ -0,0 +1,124 @@
+package deployer
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type ImageConfig struct {
+ Port int
+
+ ProxyEnable bool
+ ProxyKind string
+
+ Limits struct {
+ Memory int64
+ CPU int64
+ }
+
+ Security struct {
+ ReadOnlyFS bool
+ SecurityOpt []string
+ CapAdd []string
+ CapDrop []string
+ }
+}
+
+func defaultImageConfig() ImageConfig {
+ cfg := ImageConfig{
+ Port: 0,
+ ProxyEnable: true,
+ ProxyKind: "http",
+ }
+
+ cfg.Limits.Memory = 1024 * 1024 * 1024
+ cfg.Limits.CPU = 1000000000
+
+ cfg.Security.ReadOnlyFS = true
+ cfg.Security.SecurityOpt = []string{"no-new-privileges"}
+ cfg.Security.CapAdd = []string{}
+ cfg.Security.CapDrop = []string{"ALL"}
+
+ return cfg
+}
+
+func extractImageConfig(imageLabels map[string]string) (ImageConfig, error) {
+ var prefix = Namespace + "."
+
+ cfg := defaultImageConfig()
+
+ for k, v := range imageLabels {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+
+ key := strings.TrimPrefix(k, prefix)
+
+ switch key {
+ case "port":
+ p, err := strconv.Atoi(v)
+ if err != nil {
+ return cfg, fmt.Errorf("invalid port: %q", v)
+ }
+ cfg.Port = p
+
+ case "proxy.enable":
+ cfg.ProxyEnable = v == "true"
+
+ case "proxy.kind":
+ if v != "http" && v != "tcp" {
+ return cfg, fmt.Errorf("invalid proxy.kind: %q", v)
+ }
+ cfg.ProxyKind = v
+
+ case "limits.memory":
+ cfg.Limits.Memory = parseMemory(v)
+
+ case "limits.cpu":
+ cfg.Limits.CPU = parseCPU(v)
+
+ case "security.read-only-fs":
+ cfg.Security.ReadOnlyFS = v == "true"
+
+ case "security.security-opt":
+ cfg.Security.SecurityOpt = strings.Split(v, ",")
+
+ case "security.cap-add":
+ cfg.Security.CapAdd = strings.Split(v, ",")
+
+ case "security.cap-drop":
+ cfg.Security.CapDrop = strings.Split(v, ",")
+ }
+ }
+
+ if cfg.Port == 0 {
+ return cfg, fmt.Errorf("no port given")
+ }
+
+ return cfg, nil
+}
+
+func parseMemory(mem string) int64 {
+ mem = strings.ToUpper(mem)
+
+ switch {
+ case strings.HasSuffix(mem, "G"):
+ v, _ := strconv.ParseInt(strings.TrimSuffix(mem, "G"), 10, 64)
+ return v * 1024 * 1024 * 1024
+ case strings.HasSuffix(mem, "M"):
+ v, _ := strconv.ParseInt(strings.TrimSuffix(mem, "M"), 10, 64)
+ return v * 1024 * 1024
+ default:
+ v, _ := strconv.ParseInt(mem, 10, 64)
+ return v
+ }
+}
+
+func parseCPU(cpu string) int64 {
+ f, err := strconv.ParseFloat(cpu, 64)
+ if err != nil {
+ return 1000000000
+ }
+ return int64(f * 1000000000)
+}
diff --git a/pkg/deployer/constants.go b/pkg/deployer/constants.go
new file mode 100644
index 0000000..9a14b8d
--- /dev/null
+++ b/pkg/deployer/constants.go
@@ -0,0 +1,17 @@
+package deployer
+
+var (
+ Namespace = "net.leonardobishop.instancer"
+)
+
+var (
+ ContainerLabelManaged = Namespace + ".managed"
+ ContainerLabelCreatedAt = Namespace + ".created-at"
+ ContainerLabelExpiresAt = Namespace + ".expires-at"
+ ContainerLabelChallenge = Namespace + ".challenge"
+ ContainerLabelForTeam = Namespace + ".for-team"
+ ContainerLabelRequestedBy = Namespace + ".requested-by"
+ ContainerLabelDeployKey = Namespace + ".deploy-key"
+ ContainerLabelAddress = Namespace + ".address"
+ ContainerLabelAddressFormat = Namespace + ".address-format"
+)
diff --git a/pkg/deployer/deploy.go b/pkg/deployer/deploy.go
new file mode 100644
index 0000000..3e21ce4
--- /dev/null
+++ b/pkg/deployer/deploy.go
@@ -0,0 +1,245 @@
+package deployer
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "maps"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/moby/moby/api/types/container"
+ "github.com/moby/moby/api/types/network"
+ "github.com/moby/moby/client"
+ "tailscale.com/util/truncate"
+)
+
+type DeployStatus struct {
+ Status string
+ Message string
+}
+
+const (
+ statusProgress = "progress"
+ statusError = "error"
+ statusSuccess = "success"
+)
+
+func (d *DockerDeployer) StartDeploy(challenge, team string) string {
+ deploymentKey, job := d.createDeployJob(challenge, team)
+
+ go func() {
+ err := d.doDeployment(deploymentKey, job, context.Background())
+ if err != nil {
+ d.writeDeployChannel(deploymentKey, DeployStatus{Status: statusError, Message: `<div class="alert alert-danger mb-0" role="alert">Aborted: ` + err.Error() + `</div>`})
+ }
+ }()
+
+ return deploymentKey
+}
+
+func (d *DockerDeployer) doDeployment(deployKey string, job *DeployJob, ctx context.Context) error {
+ log := slog.With(
+ "deployment", deployKey,
+ "challenge", job.challenge,
+ "team", job.team,
+ )
+
+ var unlockOnce sync.Once
+ // if !d.containerCreateMutex.TryLock() {
+ // d.writeDeployChannel(deployKey, DeployStatus{statusProgress, "Challenge deployment queued"})
+ // d.containerCreateMutex.Lock()
+ // }
+ d.containerCreateMutex.Lock()
+ defer unlockOnce.Do(d.containerCreateMutex.Unlock)
+
+ log.Info("starting challenge deployment")
+ imageName := fmt.Sprintf("%s%s:latest", d.imagePrefix, job.challenge)
+
+ if err := d.containerExistsForTeam(ctx, log, job); err != nil {
+ return err
+ }
+
+ d.writeDeployChannel(deployKey, DeployStatus{statusProgress, "Pulling image"})
+ if err := d.pullImage(ctx, log, imageName); err != nil {
+ return err
+ }
+
+ d.writeDeployChannel(deployKey, DeployStatus{statusProgress, "Reading challenge configuration"})
+ imageCfg, err := d.readImageMetadata(ctx, log, imageName)
+ if err != nil {
+ return err
+ }
+
+ proxyRouteName := strings.ReplaceAll(deployKey, "-", "")
+ url := truncate.String(job.challenge, 30) + "-" + proxyRouteName + "." + d.instancerDomain
+ expiry := time.Now().Add(90 * time.Minute)
+
+ d.writeDeployChannel(deployKey, DeployStatus{statusProgress, "Configuring network"})
+ networkID, err := d.setupNetwork(ctx, log, job, expiry, proxyRouteName, imageCfg.ProxyEnable)
+ if err != nil {
+ return err
+ }
+
+ log.Info("challenge network created", "networkID", networkID)
+
+ d.writeDeployChannel(deployKey, DeployStatus{statusProgress, "Creating container"})
+ containerID, err := d.createContainer(ctx, log, job, expiry, imageName, proxyRouteName, networkID, url, imageCfg)
+ if err != nil {
+ return err
+ }
+
+ unlockOnce.Do(d.containerCreateMutex.Unlock)
+
+ log.Info("challenge container created", "containerID", containerID)
+
+ d.writeDeployChannel(deployKey, DeployStatus{Status: statusProgress, Message: "Starting container"})
+ if _, err := d.client.ContainerStart(ctx, containerID, client.ContainerStartOptions{}); err != nil {
+ log.Error("could not start challenge container", "cause", err)
+ return fmt.Errorf("error starting container")
+ }
+
+ d.writeDeployChannel(deployKey, DeployStatus{Status: statusSuccess, Message: `<div class="alert alert-success mb-0" role="alert">Challenge created at <a class="alert-link" target="_blank" href="https://` + url + `">https://` + url + `</a></div>`})
+ return nil
+}
+
+func (d *DockerDeployer) containerExistsForTeam(ctx context.Context, log *slog.Logger, job *DeployJob) error {
+ filters := client.Filters{}
+ filters.Add("label", ContainerLabelChallenge+"="+job.challenge)
+ filters.Add("label", ContainerLabelForTeam+"="+job.team)
+
+ containers, err := d.client.ContainerList(ctx, client.ContainerListOptions{
+ All: true,
+ Filters: filters,
+ })
+ if err != nil {
+ log.Error("could not pull list challenges", "cause", err)
+ return fmt.Errorf("error checking uniqueness")
+ }
+
+ if len(containers.Items) > 0 {
+ return fmt.Errorf("instance of challenge already deployed for team")
+ }
+ return nil
+}
+
+func (d *DockerDeployer) pullImage(ctx context.Context, log *slog.Logger, imageName string) error {
+ resp, err := d.client.ImagePull(ctx, imageName, client.ImagePullOptions{
+ RegistryAuth: d.registryLogin(),
+ })
+ if err != nil {
+ log.Error("could not pull challenge image", "cause", err)
+ return fmt.Errorf("error pulling challenge image")
+ }
+ defer resp.Close()
+ resp.Wait(ctx)
+
+ return nil
+}
+
+func (d *DockerDeployer) readImageMetadata(ctx context.Context, log *slog.Logger, imageName string) (*ImageConfig, error) {
+ inspect, err := d.client.ImageInspect(ctx, imageName)
+ if err != nil {
+ log.Error("could not inspect image", "cause", err)
+ return nil, fmt.Errorf("error loading challenge configuration")
+ }
+
+ imageCfg, err := extractImageConfig(inspect.Config.Labels)
+ if err != nil {
+ log.Error("invalid challenge configuration", "cause", err)
+ return nil, fmt.Errorf("error loading challenge configuration")
+ }
+
+ return &imageCfg, nil
+}
+
+func (d *DockerDeployer) setupNetwork(ctx context.Context, log *slog.Logger, job *DeployJob, expiry time.Time, routeName string, proxy bool) (string, error) {
+ resp, err := d.client.NetworkCreate(ctx, "n-"+routeName, client.NetworkCreateOptions{
+ Driver: "bridge",
+ Labels: map[string]string{
+ ContainerLabelManaged: "yes",
+ ContainerLabelCreatedAt: strconv.FormatInt(time.Now().Unix(), 10),
+ ContainerLabelExpiresAt: strconv.FormatInt(expiry.Unix(), 10),
+ ContainerLabelDeployKey: job.deployKey,
+ ContainerLabelChallenge: job.challenge,
+ ContainerLabelForTeam: job.team,
+ },
+ })
+ if err != nil {
+ log.Error("could not create challenge network", "cause", err)
+ return "", fmt.Errorf("could not create challenge network")
+ }
+
+ if proxy {
+ if _, err := d.client.NetworkConnect(ctx, resp.ID, client.NetworkConnectOptions{
+ Container: d.proxyContainerName,
+ }); err != nil {
+ log.Error("could not connect proxy to challenge network", "cause", err)
+ return "", fmt.Errorf("could not connect proxy to challenge network")
+ }
+ }
+
+ return resp.ID, nil
+}
+
+func (d *DockerDeployer) createContainer(ctx context.Context, log *slog.Logger, job *DeployJob, expiry time.Time, imageName, routeName, networkID, url string, imageCfg *ImageConfig) (string, error) {
+ now := time.Now()
+
+ labels := map[string]string{
+ ContainerLabelManaged: "yes",
+ ContainerLabelCreatedAt: strconv.FormatInt(now.Unix(), 10),
+ ContainerLabelExpiresAt: strconv.FormatInt(expiry.Unix(), 10),
+ ContainerLabelDeployKey: job.deployKey,
+ ContainerLabelChallenge: job.challenge,
+ ContainerLabelForTeam: job.team,
+ ContainerLabelAddress: url,
+ }
+
+ if imageCfg.ProxyEnable {
+ //TODO do tcp
+ maps.Copy(labels, map[string]string{
+ ContainerLabelAddressFormat: "https",
+
+ "traefik.enable": "true",
+ "traefik.docker.network": "n-" + routeName,
+
+ "traefik.http.routers." + routeName + ".entrypoints": "websecure",
+ "traefik.http.routers." + routeName + ".rule": "Host(`" + url + "`)",
+ "traefik.http.routers." + routeName + ".tls": "true",
+ "traefik.http.routers." + routeName + ".service": routeName,
+
+ "traefik.http.services." + routeName + ".loadbalancer.server.port": strconv.Itoa(imageCfg.Port),
+ })
+ }
+
+ resp, err := d.client.ContainerCreate(ctx, client.ContainerCreateOptions{
+ Image: imageName,
+ Config: &container.Config{
+ Labels: labels,
+ },
+ Name: "c-" + routeName,
+ HostConfig: &container.HostConfig{
+ Resources: container.Resources{
+ Memory: imageCfg.Limits.Memory,
+ NanoCPUs: imageCfg.Limits.CPU,
+ },
+ ReadonlyRootfs: imageCfg.Security.ReadOnlyFS,
+ SecurityOpt: imageCfg.Security.SecurityOpt,
+ CapAdd: imageCfg.Security.CapAdd,
+ CapDrop: imageCfg.Security.CapDrop,
+ },
+ NetworkingConfig: &network.NetworkingConfig{
+ EndpointsConfig: map[string]*network.EndpointSettings{
+ networkID: {},
+ },
+ },
+ })
+ if err != nil {
+ log.Error("could not create challenge container", "cause", err)
+ return "", fmt.Errorf("error creating container")
+ }
+
+ return resp.ID, nil
+}
diff --git a/pkg/deployer/instance.go b/pkg/deployer/instance.go
new file mode 100644
index 0000000..99171f4
--- /dev/null
+++ b/pkg/deployer/instance.go
@@ -0,0 +1,163 @@
+package deployer
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "strconv"
+ "time"
+
+ "github.com/moby/moby/client"
+)
+
+type Instance struct {
+ ChallengeName string
+ DeployKey string
+ Address string
+ AddressFormat string
+ ExpiresAt time.Time
+}
+
+func (d *DockerDeployer) GetTeamInstances(ctx context.Context, team string) ([]Instance, error) {
+ filters := client.Filters{}
+ filters.Add("label", ContainerLabelForTeam+"="+team)
+
+ containers, err := d.client.ContainerList(ctx, client.ContainerListOptions{
+ All: true,
+ Filters: filters,
+ })
+ if err != nil {
+ return []Instance{}, err
+ }
+
+ var instances []Instance
+ for _, c := range containers.Items {
+ expiresAt, err := strconv.Atoi(c.Labels[ContainerLabelExpiresAt])
+ if err != nil {
+ slog.Error("container has invalid expiry", "container", c.ID, "expiry", c.Labels[ContainerLabelExpiresAt])
+ continue
+ }
+ instances = append(instances, Instance{
+ ChallengeName: c.Labels[ContainerLabelChallenge],
+ DeployKey: c.Labels[ContainerLabelDeployKey],
+ Address: c.Labels[ContainerLabelAddress],
+ AddressFormat: c.Labels[ContainerLabelAddressFormat],
+ ExpiresAt: time.Unix(int64(expiresAt), 0),
+ })
+ }
+ return instances, nil
+}
+
+func (d *DockerDeployer) StopInstance(ctx context.Context, deployKey, team string) error {
+ if deployKey == "" || team == "" {
+ return fmt.Errorf("deploy key/team is invalid")
+ }
+
+ filters := client.Filters{}
+ filters.Add("label", ContainerLabelForTeam+"="+team)
+ filters.Add("label", ContainerLabelDeployKey+"="+deployKey)
+
+ containers, err := d.client.ContainerList(ctx, client.ContainerListOptions{
+ All: true,
+ Filters: filters,
+ })
+ if err != nil {
+ return fmt.Errorf("docker error")
+ }
+
+ if len(containers.Items) == 0 {
+ return fmt.Errorf("no such instance")
+ }
+
+ for _, c := range containers.Items {
+ _, err := d.client.ContainerRemove(ctx, c.ID, client.ContainerRemoveOptions{
+ Force: true,
+ })
+ if err != nil {
+ return fmt.Errorf("docker error")
+ }
+ slog.Info("container removed early", "container", c.ID)
+ }
+
+ networks, err := d.client.NetworkList(ctx, client.NetworkListOptions{})
+ if err != nil {
+ return fmt.Errorf("docker error")
+ }
+ for _, n := range networks.Items {
+ if err = d.forceRemoveNetwork(ctx, n.ID); err != nil {
+ slog.Warn("failed to remove network", "network", n.ID)
+ continue
+ }
+ slog.Info("network removed early", "network", n.ID)
+ }
+
+ return nil
+}
+
+func (d *DockerDeployer) RemoveExpiredResources(ctx context.Context) error {
+ filters := client.Filters{}
+ filters.Add("label", ContainerLabelManaged+"=yes")
+
+ containers, err := d.client.ContainerList(ctx, client.ContainerListOptions{
+ All: true,
+ Filters: filters,
+ })
+ if err != nil {
+ return err
+ }
+ for _, c := range containers.Items {
+ expiry, err := strconv.ParseInt(c.Labels[ContainerLabelExpiresAt], 10, 64)
+ if err != nil {
+ slog.Warn("invalid timestamp on container label", "container", c.ID, "timestamp", c.Labels[ContainerLabelExpiresAt])
+ continue
+ }
+ if expiry > time.Now().Unix() {
+ continue
+ }
+
+ _, err = d.client.ContainerRemove(ctx, c.ID, client.ContainerRemoveOptions{
+ Force: true,
+ })
+ if err != nil {
+ return err
+ }
+ slog.Info("expired container removed", "container", c.ID)
+ }
+
+ networks, err := d.client.NetworkList(ctx, client.NetworkListOptions{
+ Filters: filters,
+ })
+ if err != nil {
+ return err
+ }
+ for _, n := range networks.Items {
+ expiry, err := strconv.ParseInt(n.Labels[ContainerLabelExpiresAt], 10, 64)
+ if err != nil {
+ slog.Warn("invalid timestamp on network label", "network", n.ID, "timestamp", n.Labels[ContainerLabelExpiresAt])
+ continue
+ }
+ if expiry > time.Now().Unix() {
+ continue
+ }
+
+ if err = d.forceRemoveNetwork(ctx, n.ID); err != nil {
+ return err
+ }
+ slog.Info("expired network removed", "network", n.ID)
+ }
+
+ return nil
+}
+
+func (d *DockerDeployer) forceRemoveNetwork(ctx context.Context, networkID string) error {
+ _, _ = d.client.NetworkDisconnect(ctx, networkID, client.NetworkDisconnectOptions{
+ Container: d.proxyContainerName,
+ Force: true,
+ })
+
+ _, err := d.client.NetworkRemove(ctx, networkID, client.NetworkRemoveOptions{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/janitor/janitor.go b/pkg/janitor/janitor.go
new file mode 100644
index 0000000..b640eed
--- /dev/null
+++ b/pkg/janitor/janitor.go
@@ -0,0 +1,26 @@
+package janitor
+
+import (
+ "context"
+ "log/slog"
+ "time"
+
+ "git.leonardobishop.net/instancer/pkg/deployer"
+)
+
+func StartJanitor(ctx context.Context, deployer *deployer.DockerDeployer) {
+ ticker := time.NewTicker(1 * time.Minute)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ err := deployer.RemoveExpiredResources(ctx)
+ if err != nil {
+ slog.Error("error occurred when removing expired resources", "cause", err)
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
diff --git a/pkg/registry/list.go b/pkg/registry/list.go
new file mode 100644
index 0000000..0c6073f
--- /dev/null
+++ b/pkg/registry/list.go
@@ -0,0 +1,62 @@
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+type RegistryClient struct {
+ URL string
+ Username string
+ Password string
+}
+
+type CatalogResponse struct {
+ Repositories []string `json:"repositories"`
+}
+
+func (c *RegistryClient) ListRepositories() ([]string, error) {
+ repos := []string{}
+ last := ""
+ pageSize := 100
+
+ for {
+ url := fmt.Sprintf("%s/v2/_catalog?n=%d", c.URL, pageSize)
+ if last != "" {
+ url += "&last=" + last
+ }
+
+ req, _ := http.NewRequest("GET", url, nil)
+ if c.Username != "" {
+ req.SetBasicAuth(c.Username, c.Password)
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("registry returned %d: %s", resp.StatusCode, body)
+ }
+
+ var catalog CatalogResponse
+ if err := json.NewDecoder(resp.Body).Decode(&catalog); err != nil {
+ return nil, err
+ }
+
+ repos = append(repos, catalog.Repositories...)
+
+ if len(catalog.Repositories) < pageSize {
+ break
+ }
+
+ last = catalog.Repositories[len(catalog.Repositories)-1]
+ }
+
+ return repos, nil
+}
diff --git a/pkg/session/session.go b/pkg/session/session.go
new file mode 100644
index 0000000..87306ac
--- /dev/null
+++ b/pkg/session/session.go
@@ -0,0 +1,85 @@
+package session
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "sync"
+)
+
+type UserSession struct {
+ Token string
+ Team string
+}
+
+// implemtation adapted from
+// https://git.leonardobishop.net/confplanner/tree/pkg/session/memory.go
+
+// TODO add expiry
+type MemoryStore struct {
+ sessions map[string]*UserSession
+ lock sync.RWMutex
+}
+
+func NewMemoryStore() *MemoryStore {
+ return &MemoryStore{
+ sessions: make(map[string]*UserSession),
+ }
+}
+
+func (s *MemoryStore) GetByToken(token string) *UserSession {
+ if token == "" {
+ return nil
+ }
+
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ return s.sessions[token]
+}
+
+func (s *MemoryStore) Create(team string) (*UserSession, error) {
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ var token string
+ for {
+ token = generateSessionToken()
+
+ _, tokenExists := s.sessions[token]
+
+ if !tokenExists {
+ break
+ }
+ }
+
+ session := &UserSession{
+ Token: token,
+ Team: team,
+ }
+ s.sessions[token] = session
+
+ return session, nil
+}
+
+func (s *MemoryStore) Destroy(token string) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ session := s.sessions[token]
+ if session == nil {
+ return fmt.Errorf("session does not exist")
+ }
+
+ delete(s.sessions, token)
+ return nil
+}
+
+func generateSessionToken() string {
+ b := make([]byte, 100)
+ if _, err := rand.Read(b); err != nil {
+ return ""
+ }
+ return base64.StdEncoding.EncodeToString(b)
+}