mirror of
https://github.com/lensapp/lens.git
synced 2025-05-20 05:10:56 +00:00
Kubeconfigs as file references (#466)
Signed-off-by: Jussi Nummelin <jussi.nummelin@gmail.com> Co-authored-by: Lauri Nevala <lauri.nevala@gmail.com> Co-authored-by: Jari Kolehmainen <jari.kolehmainen@gmail.com>
This commit is contained in:
parent
c15aa7972c
commit
55687b7d35
@ -6,6 +6,7 @@ import * as version260Beta2 from "../migrations/cluster-store/2.6.0-beta.2"
|
||||
import * as version260Beta3 from "../migrations/cluster-store/2.6.0-beta.3"
|
||||
import * as version270Beta0 from "../migrations/cluster-store/2.7.0-beta.0"
|
||||
import * as version270Beta1 from "../migrations/cluster-store/2.7.0-beta.1"
|
||||
import * as version360Beta1 from "../migrations/cluster-store/3.6.0-beta.1"
|
||||
import { getAppVersion } from "./utils/app-version";
|
||||
|
||||
export class ClusterStore {
|
||||
@ -25,7 +26,8 @@ export class ClusterStore {
|
||||
"2.6.0-beta.2": version260Beta2.migration,
|
||||
"2.6.0-beta.3": version260Beta3.migration,
|
||||
"2.7.0-beta.0": version270Beta0.migration,
|
||||
"2.7.0-beta.1": version270Beta1.migration
|
||||
"2.7.0-beta.1": version270Beta1.migration,
|
||||
"3.6.0-beta.1": version360Beta1.migration
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -72,7 +74,8 @@ export class ClusterStore {
|
||||
const index = clusters.findIndex((cl) => cl.id === cluster.id)
|
||||
const storable = {
|
||||
id: cluster.id,
|
||||
kubeConfig: cluster.kubeConfig,
|
||||
kubeConfigPath: cluster.kubeConfigPath,
|
||||
contextName: cluster.contextName,
|
||||
preferences: cluster.preferences,
|
||||
workspace: cluster.workspace
|
||||
}
|
||||
@ -95,7 +98,8 @@ export class ClusterStore {
|
||||
public reloadCluster(cluster: ClusterBaseInfo): void {
|
||||
const storedCluster = this.getCluster(cluster.id);
|
||||
if (storedCluster) {
|
||||
cluster.kubeConfig = storedCluster.kubeConfig
|
||||
cluster.kubeConfigPath = storedCluster.kubeConfigPath
|
||||
cluster.contextName = storedCluster.contextName
|
||||
cluster.preferences = storedCluster.preferences
|
||||
cluster.workspace = storedCluster.workspace
|
||||
}
|
||||
@ -113,4 +117,4 @@ export class ClusterStore {
|
||||
}
|
||||
}
|
||||
|
||||
export const clusterStore = ClusterStore.getInstance();
|
||||
export const clusterStore: ClusterStore = ClusterStore.getInstance();
|
||||
|
||||
@ -1,8 +1,19 @@
|
||||
import mockFs from "mock-fs"
|
||||
import yaml from "js-yaml"
|
||||
import * as fs from "fs"
|
||||
import { ClusterStore } from "./cluster-store";
|
||||
import { Cluster } from "../main/cluster";
|
||||
|
||||
jest.mock("electron", () => {
|
||||
return {
|
||||
app: {
|
||||
getVersion: () => '99.99.99',
|
||||
getPath: () => 'tmp',
|
||||
getLocale: () => 'en'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Console.log needs to be called before fs-mocks, see https://github.com/tschaub/mock-fs/issues/234
|
||||
console.log("");
|
||||
|
||||
@ -24,7 +35,8 @@ describe("for an empty config", () => {
|
||||
it("allows to store and retrieve a cluster", async () => {
|
||||
const cluster = new Cluster({
|
||||
id: 'foo',
|
||||
kubeConfig: 'kubeconfig string',
|
||||
kubeConfigPath: 'kubeconfig',
|
||||
contextName: "foo",
|
||||
preferences: {
|
||||
terminalCWD: '/tmp',
|
||||
icon: 'path to icon'
|
||||
@ -33,7 +45,8 @@ describe("for an empty config", () => {
|
||||
const clusterStore = ClusterStore.getInstance()
|
||||
clusterStore.storeCluster(cluster);
|
||||
const storedCluster = clusterStore.getCluster(cluster.id);
|
||||
expect(storedCluster.kubeConfig).toBe(cluster.kubeConfig)
|
||||
expect(storedCluster.kubeConfigPath).toBe(cluster.kubeConfigPath)
|
||||
expect(storedCluster.contextName).toBe(cluster.contextName)
|
||||
expect(storedCluster.preferences.icon).toBe(cluster.preferences.icon)
|
||||
expect(storedCluster.preferences.terminalCWD).toBe(cluster.preferences.terminalCWD)
|
||||
expect(storedCluster.id).toBe(cluster.id)
|
||||
@ -42,7 +55,8 @@ describe("for an empty config", () => {
|
||||
it("allows to delete a cluster", async () => {
|
||||
const cluster = new Cluster({
|
||||
id: 'foofoo',
|
||||
kubeConfig: 'kubeconfig string',
|
||||
kubeConfigPath: 'kubeconfig',
|
||||
contextName: "foo",
|
||||
preferences: {
|
||||
terminalCWD: '/tmp'
|
||||
}
|
||||
@ -74,12 +88,12 @@ describe("for a config with existing clusters", () => {
|
||||
clusters: [
|
||||
{
|
||||
id: 'cluster1',
|
||||
kubeConfig: 'foo',
|
||||
kubeConfigPath: 'foo',
|
||||
preferences: { terminalCWD: '/foo' }
|
||||
},
|
||||
{
|
||||
id: 'cluster2',
|
||||
kubeConfig: 'foo2',
|
||||
kubeConfigPath: 'foo2',
|
||||
preferences: { terminalCWD: '/foo2' }
|
||||
}
|
||||
]
|
||||
@ -96,12 +110,12 @@ describe("for a config with existing clusters", () => {
|
||||
it("allows to retrieve a cluster", async () => {
|
||||
const clusterStore = ClusterStore.getInstance()
|
||||
const storedCluster = clusterStore.getCluster('cluster1')
|
||||
expect(storedCluster.kubeConfig).toBe('foo')
|
||||
expect(storedCluster.kubeConfigPath).toBe('foo')
|
||||
expect(storedCluster.preferences.terminalCWD).toBe('/foo')
|
||||
expect(storedCluster.id).toBe('cluster1')
|
||||
|
||||
const storedCluster2 = clusterStore.getCluster('cluster2')
|
||||
expect(storedCluster2.kubeConfig).toBe('foo2')
|
||||
expect(storedCluster2.kubeConfigPath).toBe('foo2')
|
||||
expect(storedCluster2.preferences.terminalCWD).toBe('/foo2')
|
||||
expect(storedCluster2.id).toBe('cluster2')
|
||||
})
|
||||
@ -122,7 +136,8 @@ describe("for a config with existing clusters", () => {
|
||||
it("allows to reload a cluster in-place", async () => {
|
||||
const cluster = new Cluster({
|
||||
id: 'cluster1',
|
||||
kubeConfig: 'kubeconfig string',
|
||||
kubeConfigPath: 'kubeconfig string',
|
||||
contextName: "foo",
|
||||
preferences: {
|
||||
terminalCWD: '/tmp'
|
||||
}
|
||||
@ -131,7 +146,7 @@ describe("for a config with existing clusters", () => {
|
||||
const clusterStore = ClusterStore.getInstance()
|
||||
clusterStore.reloadCluster(cluster)
|
||||
|
||||
expect(cluster.kubeConfig).toBe('foo')
|
||||
expect(cluster.kubeConfigPath).toBe('foo')
|
||||
expect(cluster.preferences.terminalCWD).toBe('/foo')
|
||||
expect(cluster.id).toBe('cluster1')
|
||||
})
|
||||
@ -142,11 +157,11 @@ describe("for a config with existing clusters", () => {
|
||||
|
||||
expect(storedClusters[0].id).toBe('cluster1')
|
||||
expect(storedClusters[0].preferences.terminalCWD).toBe('/foo')
|
||||
expect(storedClusters[0].kubeConfig).toBe('foo')
|
||||
expect(storedClusters[0].kubeConfigPath).toBe('foo')
|
||||
|
||||
expect(storedClusters[1].id).toBe('cluster2')
|
||||
expect(storedClusters[1].preferences.terminalCWD).toBe('/foo2')
|
||||
expect(storedClusters[1].kubeConfig).toBe('foo2')
|
||||
expect(storedClusters[1].kubeConfigPath).toBe('foo2')
|
||||
})
|
||||
|
||||
it("allows storing the clusters in a different order", async () => {
|
||||
@ -187,7 +202,7 @@ describe("for a pre 2.0 config with an existing cluster", () => {
|
||||
it("migrates to modern format with kubeconfig under a key", async () => {
|
||||
const clusterStore = ClusterStore.getInstance()
|
||||
const storedCluster = clusterStore.store.get('clusters')[0]
|
||||
expect(storedCluster.kubeConfig).toBe('kubeconfig content')
|
||||
expect(storedCluster.kubeConfigPath).toBe(`tmp/kubeconfigs/${storedCluster.id}`)
|
||||
})
|
||||
})
|
||||
|
||||
@ -254,9 +269,10 @@ describe("for a pre 2.6.0 config with a cluster that has arrays in auth config",
|
||||
it("replaces array format access token and expiry into string", async () => {
|
||||
const clusterStore = ClusterStore.getInstance()
|
||||
const storedClusterData = clusterStore.store.get('clusters')[0]
|
||||
const kc = yaml.safeLoad(storedClusterData.kubeConfig)
|
||||
const kc = yaml.safeLoad(fs.readFileSync(storedClusterData.kubeConfigPath).toString())
|
||||
expect(kc.users[0].user['auth-provider'].config['access-token']).toBe("should be string")
|
||||
expect(kc.users[0].user['auth-provider'].config['expiry']).toBe("should be string")
|
||||
expect(storedClusterData.contextName).toBe("minikube")
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
16
src/common/utils/kubeconfig.ts
Normal file
16
src/common/utils/kubeconfig.ts
Normal file
@ -0,0 +1,16 @@
|
||||
import { app, remote } from "electron"
|
||||
import { ensureDirSync, writeFileSync } from "fs-extra"
|
||||
import * as path from "path"
|
||||
|
||||
// Writes kubeconfigs to "embedded" store, i.e. .../Lens/kubeconfigs/
|
||||
export function writeEmbeddedKubeConfig(clusterId: string, kubeConfig: string): string {
|
||||
// This can be called from main & renderer
|
||||
const a = (app || remote.app)
|
||||
const kubeConfigBase = path.join(a.getPath("userData"), "kubeconfigs")
|
||||
ensureDirSync(kubeConfigBase)
|
||||
|
||||
const kubeConfigFile = path.join(kubeConfigBase, clusterId)
|
||||
writeFileSync(kubeConfigFile, kubeConfig)
|
||||
|
||||
return kubeConfigFile
|
||||
}
|
||||
@ -53,7 +53,7 @@ export class MetricsFeature extends Feature {
|
||||
|
||||
async install(cluster: Cluster): Promise<boolean> {
|
||||
// Check if there are storageclasses
|
||||
const storageClient = cluster.contextHandler.kc.makeApiClient(k8s.StorageV1Api)
|
||||
const storageClient = cluster.proxyKubeconfig().makeApiClient(k8s.StorageV1Api)
|
||||
const scs = await storageClient.listStorageClass();
|
||||
scs.body.items.forEach(sc => {
|
||||
if(sc.metadata.annotations &&
|
||||
@ -93,9 +93,9 @@ export class MetricsFeature extends Feature {
|
||||
|
||||
async uninstall(cluster: Cluster): Promise<boolean> {
|
||||
return new Promise<boolean>(async (resolve, reject) => {
|
||||
const rbacClient = cluster.contextHandler.kc.makeApiClient(RbacAuthorizationV1Api)
|
||||
const rbacClient = cluster.proxyKubeconfig().makeApiClient(RbacAuthorizationV1Api)
|
||||
try {
|
||||
await this.deleteNamespace(cluster.contextHandler.kc, "lens-metrics")
|
||||
await this.deleteNamespace(cluster.proxyKubeconfig(), "lens-metrics")
|
||||
await rbacClient.deleteClusterRole("lens-prometheus");
|
||||
await rbacClient.deleteClusterRoleBinding("lens-prometheus");
|
||||
resolve(true);
|
||||
|
||||
@ -37,7 +37,7 @@ export class UserModeFeature extends Feature {
|
||||
|
||||
async uninstall(cluster: Cluster): Promise<boolean> {
|
||||
return new Promise<boolean>(async (resolve, reject) => {
|
||||
const rbacClient = cluster.contextHandler.kc.makeApiClient(RbacAuthorizationV1Api)
|
||||
const rbacClient = cluster.proxyKubeconfig().makeApiClient(RbacAuthorizationV1Api)
|
||||
try {
|
||||
await rbacClient.deleteClusterRole("lens-user");
|
||||
await rbacClient.deleteClusterRoleBinding("lens-user");
|
||||
|
||||
@ -44,12 +44,12 @@ export class ClusterManager {
|
||||
this.clusters = new Map()
|
||||
clusters.forEach((clusterInfo) => {
|
||||
try {
|
||||
const kc = this.loadKubeConfig(clusterInfo.kubeConfig)
|
||||
logger.debug(`Starting to load target definitions for ${ kc.currentContext }`)
|
||||
const kc = this.loadKubeConfig(clusterInfo.kubeConfigPath)
|
||||
const cluster = new Cluster({
|
||||
id: clusterInfo.id,
|
||||
port: this.port,
|
||||
kubeConfig: clusterInfo.kubeConfig,
|
||||
kubeConfigPath: clusterInfo.kubeConfigPath,
|
||||
contextName: clusterInfo.contextName,
|
||||
preferences: clusterInfo.preferences,
|
||||
workspace: clusterInfo.workspace
|
||||
})
|
||||
@ -77,33 +77,31 @@ export class ClusterManager {
|
||||
clusters.map(cluster => cluster.stopServer())
|
||||
}
|
||||
|
||||
protected loadKubeConfig(config: string): KubeConfig {
|
||||
protected loadKubeConfig(configPath: string): KubeConfig {
|
||||
const kc = new KubeConfig();
|
||||
kc.loadFromString(config);
|
||||
kc.loadFromFile(configPath)
|
||||
return kc;
|
||||
}
|
||||
|
||||
protected async addNewCluster(clusterData: ClusterBaseInfo): Promise<Cluster> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const configs: KubeConfig[] = k8s.loadAndSplitConfig(clusterData.kubeConfig)
|
||||
if(configs.length == 0) {
|
||||
reject("No cluster contexts defined")
|
||||
}
|
||||
configs.forEach(c => {
|
||||
k8s.validateConfig(c)
|
||||
const cluster = new Cluster({
|
||||
id: uuid(),
|
||||
port: this.port,
|
||||
kubeConfig: k8s.dumpConfigYaml(c),
|
||||
preferences: clusterData.preferences,
|
||||
workspace: clusterData.workspace
|
||||
})
|
||||
cluster.init(c)
|
||||
cluster.save()
|
||||
this.clusters.set(cluster.id, cluster)
|
||||
resolve(cluster)
|
||||
});
|
||||
const kc = this.loadKubeConfig(clusterData.kubeConfigPath)
|
||||
k8s.validateConfig(kc)
|
||||
kc.setCurrentContext(clusterData.contextName)
|
||||
const cluster = new Cluster({
|
||||
id: uuid(),
|
||||
port: this.port,
|
||||
kubeConfigPath: clusterData.kubeConfigPath,
|
||||
contextName: clusterData.contextName,
|
||||
preferences: clusterData.preferences,
|
||||
workspace: clusterData.workspace
|
||||
})
|
||||
cluster.init(kc)
|
||||
cluster.save()
|
||||
this.clusters.set(cluster.id, cluster)
|
||||
resolve(cluster)
|
||||
|
||||
} catch(error) {
|
||||
logger.error(error)
|
||||
reject(error)
|
||||
|
||||
@ -6,9 +6,9 @@ import logger from "./logger"
|
||||
import { AuthorizationV1Api, CoreV1Api, KubeConfig, V1ResourceAttributes } from "@kubernetes/client-node"
|
||||
import * as fm from "./feature-manager";
|
||||
import { Kubectl } from "./kubectl";
|
||||
import { KubeconfigManager } from "./kubeconfig-manager"
|
||||
import { PromiseIpc } from "electron-promise-ipc"
|
||||
import request from "request-promise-native"
|
||||
import { KubeconfigManager } from "./kubeconfig-manager"
|
||||
import { apiPrefix } from "../common/vars";
|
||||
|
||||
enum ClusterStatus {
|
||||
@ -19,7 +19,8 @@ enum ClusterStatus {
|
||||
|
||||
export interface ClusterBaseInfo {
|
||||
id: string;
|
||||
kubeConfig: string;
|
||||
kubeConfigPath: string;
|
||||
contextName: string;
|
||||
preferences?: ClusterPreferences;
|
||||
port?: number;
|
||||
workspace?: string;
|
||||
@ -73,7 +74,7 @@ export class Cluster implements ClusterInfo {
|
||||
public isAdmin: boolean;
|
||||
public features: FeatureStatusMap;
|
||||
public kubeCtl: Kubectl
|
||||
public kubeConfig: string;
|
||||
public kubeConfigPath: string;
|
||||
public eventCount: number;
|
||||
public preferences: ClusterPreferences;
|
||||
|
||||
@ -85,18 +86,25 @@ export class Cluster implements ClusterInfo {
|
||||
constructor(clusterInfo: ClusterBaseInfo) {
|
||||
if (clusterInfo) Object.assign(this, clusterInfo)
|
||||
if (!this.preferences) this.preferences = {}
|
||||
this.kubeconfigManager = new KubeconfigManager(this.kubeConfig)
|
||||
}
|
||||
|
||||
public kubeconfigPath() {
|
||||
public proxyKubeconfigPath() {
|
||||
return this.kubeconfigManager.getPath()
|
||||
}
|
||||
|
||||
public proxyKubeconfig() {
|
||||
const kc = new KubeConfig()
|
||||
kc.loadFromFile(this.proxyKubeconfigPath())
|
||||
return kc
|
||||
}
|
||||
|
||||
public async init(kc: KubeConfig) {
|
||||
this.contextHandler = new ContextHandler(kc, this)
|
||||
this.contextName = kc.currentContext
|
||||
this.url = this.contextHandler.url
|
||||
this.apiUrl = kc.getCurrentCluster().server
|
||||
this.contextHandler = new ContextHandler(kc, this)
|
||||
await this.contextHandler.init() // So we get the proxy port reserved
|
||||
this.kubeconfigManager = new KubeconfigManager(this)
|
||||
|
||||
this.url = this.contextHandler.url
|
||||
}
|
||||
|
||||
public stopServer() {
|
||||
@ -129,7 +137,7 @@ export class Cluster implements ClusterInfo {
|
||||
|
||||
if (this.accessible) {
|
||||
this.distribution = this.detectKubernetesDistribution(this.version)
|
||||
this.features = await fm.getFeatures(this.contextHandler)
|
||||
this.features = await fm.getFeatures(this)
|
||||
this.isAdmin = await this.isClusterAdmin()
|
||||
this.nodes = await this.getNodeCount()
|
||||
this.kubeCtl = new Kubectl(this.version)
|
||||
@ -138,16 +146,6 @@ export class Cluster implements ClusterInfo {
|
||||
this.eventCount = await this.getEventCount();
|
||||
}
|
||||
|
||||
public updateKubeconfig(kubeconfig: string) {
|
||||
const storedCluster = clusterStore.getCluster(this.id)
|
||||
if (!storedCluster) {
|
||||
return
|
||||
}
|
||||
|
||||
this.kubeConfig = kubeconfig
|
||||
this.save()
|
||||
}
|
||||
|
||||
public getPrometheusApiPrefix() {
|
||||
if (!this.preferences.prometheus?.prefix) {
|
||||
return ""
|
||||
@ -164,7 +162,7 @@ export class Cluster implements ClusterInfo {
|
||||
id: this.id,
|
||||
workspace: this.workspace,
|
||||
url: this.url,
|
||||
contextName: this.contextHandler.kc.currentContext,
|
||||
contextName: this.contextName,
|
||||
apiUrl: this.apiUrl,
|
||||
online: this.online,
|
||||
accessible: this.accessible,
|
||||
@ -175,7 +173,7 @@ export class Cluster implements ClusterInfo {
|
||||
isAdmin: this.isAdmin,
|
||||
features: this.features,
|
||||
kubeCtl: this.kubeCtl,
|
||||
kubeConfig: this.kubeConfig,
|
||||
kubeConfigPath: this.kubeConfigPath,
|
||||
preferences: this.preferences
|
||||
}
|
||||
}
|
||||
@ -224,7 +222,7 @@ export class Cluster implements ClusterInfo {
|
||||
}
|
||||
|
||||
public async canI(resourceAttributes: V1ResourceAttributes): Promise<boolean> {
|
||||
const authApi = this.contextHandler.kc.makeApiClient(AuthorizationV1Api)
|
||||
const authApi = this.proxyKubeconfig().makeApiClient(AuthorizationV1Api)
|
||||
try {
|
||||
const accessReview = await authApi.createSelfSubjectAccessReview({
|
||||
apiVersion: "authorization.k8s.io/v1",
|
||||
@ -286,7 +284,7 @@ export class Cluster implements ClusterInfo {
|
||||
if (!this.isAdmin) {
|
||||
return 0;
|
||||
}
|
||||
const client = this.contextHandler.kc.makeApiClient(CoreV1Api);
|
||||
const client = this.proxyKubeconfig().makeApiClient(CoreV1Api);
|
||||
try {
|
||||
const response = await client.listEventForAllNamespaces(false, null, null, null, 1000);
|
||||
const uniqEventSources = new Set();
|
||||
|
||||
@ -7,27 +7,25 @@ import { getFreePort } from "./port"
|
||||
import { KubeAuthProxy } from "./kube-auth-proxy"
|
||||
import { Cluster, ClusterPreferences } from "./cluster"
|
||||
import { prometheusProviders } from "../common/prometheus-providers"
|
||||
import { PrometheusProvider, PrometheusService } from "./prometheus/provider-registry"
|
||||
import { PrometheusService, PrometheusProvider } from "./prometheus/provider-registry"
|
||||
|
||||
export class ContextHandler {
|
||||
public contextName: string
|
||||
public id: string
|
||||
public url: string
|
||||
public kc: KubeConfig
|
||||
public clusterUrl: url.UrlWithStringQuery
|
||||
public proxyServer: KubeAuthProxy
|
||||
public proxyPort: number
|
||||
public certData: string
|
||||
public authCertData: string
|
||||
public cluster: Cluster
|
||||
|
||||
protected apiTarget: ServerOptions
|
||||
protected proxyTarget: ServerOptions
|
||||
protected clusterUrl: url.UrlWithStringQuery
|
||||
protected proxyServer: KubeAuthProxy
|
||||
|
||||
protected clientCert: string
|
||||
protected clientKey: string
|
||||
protected secureApiConnection = true
|
||||
protected defaultNamespace: string
|
||||
protected proxyPort: number
|
||||
protected kubernetesApi: string
|
||||
protected prometheusProvider: string
|
||||
protected prometheusPath: string
|
||||
@ -35,39 +33,21 @@ export class ContextHandler {
|
||||
|
||||
constructor(kc: KubeConfig, cluster: Cluster) {
|
||||
this.id = cluster.id
|
||||
this.kc = new KubeConfig()
|
||||
this.kc.users = [
|
||||
{
|
||||
name: kc.getCurrentUser().name,
|
||||
token: this.id
|
||||
}
|
||||
]
|
||||
this.kc.contexts = [
|
||||
{
|
||||
name: kc.currentContext,
|
||||
cluster: kc.getCurrentCluster().name,
|
||||
user: kc.getCurrentUser().name,
|
||||
namespace: kc.getContextObject(kc.currentContext).namespace
|
||||
}
|
||||
]
|
||||
this.kc.setCurrentContext(kc.currentContext)
|
||||
|
||||
this.cluster = cluster
|
||||
this.clusterUrl = url.parse(kc.getCurrentCluster().server)
|
||||
this.contextName = kc.currentContext;
|
||||
this.defaultNamespace = kc.getContextObject(kc.currentContext).namespace
|
||||
this.clusterUrl = url.parse(cluster.apiUrl)
|
||||
this.contextName = cluster.contextName;
|
||||
this.defaultNamespace = kc.getContextObject(cluster.contextName).namespace
|
||||
this.url = `http://${this.id}.localhost:${cluster.port}/`
|
||||
this.kubernetesApi = `http://127.0.0.1:${cluster.port}/${this.id}`
|
||||
this.kc.clusters = [
|
||||
{
|
||||
name: kc.getCurrentCluster().name,
|
||||
server: this.kubernetesApi,
|
||||
skipTLSVerify: true
|
||||
}
|
||||
]
|
||||
|
||||
this.setClusterPreferences(cluster.preferences)
|
||||
}
|
||||
|
||||
public async init() {
|
||||
await this.resolveProxyPort()
|
||||
}
|
||||
|
||||
public setClusterPreferences(clusterPreferences?: ClusterPreferences) {
|
||||
this.prometheusProvider = clusterPreferences.prometheusProvider?.type
|
||||
|
||||
@ -103,7 +83,7 @@ export class ContextHandler {
|
||||
public async getPrometheusService(): Promise<PrometheusService> {
|
||||
const providers = this.prometheusProvider ? prometheusProviders.filter((p, _) => p.id == this.prometheusProvider) : prometheusProviders
|
||||
const prometheusPromises: Promise<PrometheusService>[] = providers.map(async (provider: PrometheusProvider): Promise<PrometheusService> => {
|
||||
const apiClient = this.kc.makeApiClient(CoreV1Api)
|
||||
const apiClient = this.cluster.proxyKubeconfig().makeApiClient(CoreV1Api)
|
||||
return await provider.getPrometheusService(apiClient)
|
||||
})
|
||||
const resolvedPrometheusServices = await Promise.all(prometheusPromises)
|
||||
@ -174,7 +154,7 @@ export class ContextHandler {
|
||||
|
||||
public async withTemporaryKubeconfig(callback: (kubeconfig: string) => Promise<any>) {
|
||||
try {
|
||||
await callback(this.cluster.kubeconfigPath())
|
||||
await callback(this.cluster.proxyKubeconfigPath())
|
||||
} catch (error) {
|
||||
throw(error)
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { ContextHandler } from "./context-handler"
|
||||
import { KubeConfig } from "@kubernetes/client-node"
|
||||
import logger from "./logger";
|
||||
import { Cluster } from "./cluster";
|
||||
import { Feature, FeatureStatusMap } from "./feature"
|
||||
@ -10,17 +10,19 @@ const ALL_FEATURES: any = {
|
||||
'user-mode': new UserModeFeature(null),
|
||||
}
|
||||
|
||||
export async function getFeatures(clusterContext: ContextHandler): Promise<FeatureStatusMap> {
|
||||
export async function getFeatures(cluster: Cluster): Promise<FeatureStatusMap> {
|
||||
return new Promise<FeatureStatusMap>(async (resolve, reject) => {
|
||||
const result: FeatureStatusMap = {};
|
||||
logger.debug(`features for ${clusterContext.contextName}`);
|
||||
logger.debug(`features for ${cluster.contextName}`);
|
||||
for (const key in ALL_FEATURES) {
|
||||
logger.debug(`feature ${key}`);
|
||||
if (ALL_FEATURES.hasOwnProperty(key)) {
|
||||
logger.debug("getting feature status...");
|
||||
const feature = ALL_FEATURES[key] as Feature;
|
||||
const kc = new KubeConfig()
|
||||
kc.loadFromFile(cluster.proxyKubeconfigPath())
|
||||
|
||||
const status = await feature.featureStatus(clusterContext.kc);
|
||||
const status = await feature.featureStatus(kc);
|
||||
result[feature.name] = status
|
||||
|
||||
} else {
|
||||
|
||||
@ -54,7 +54,7 @@ export class HelmReleaseManager {
|
||||
await fs.promises.writeFile(fileName, yaml.safeDump(values))
|
||||
|
||||
try {
|
||||
const { stdout, stderr } = await promiseExec(`"${helm}" upgrade ${name} ${chart} --version ${version} -f ${fileName} --namespace ${namespace} --kubeconfig ${cluster.kubeconfigPath()}`).catch((error) => { throw(error.stderr)})
|
||||
const { stdout, stderr } = await promiseExec(`"${helm}" upgrade ${name} ${chart} --version ${version} -f ${fileName} --namespace ${namespace} --kubeconfig ${cluster.proxyKubeconfigPath()}`).catch((error) => { throw(error.stderr)})
|
||||
return {
|
||||
log: stdout,
|
||||
release: this.getRelease(name, namespace, cluster)
|
||||
@ -66,7 +66,7 @@ export class HelmReleaseManager {
|
||||
|
||||
public async getRelease(name: string, namespace: string, cluster: Cluster) {
|
||||
const helm = await helmCli.binaryPath()
|
||||
const {stdout, stderr} = await promiseExec(`"${helm}" status ${name} --output json --namespace ${namespace} --kubeconfig ${cluster.kubeconfigPath()}`).catch((error) => { throw(error.stderr)})
|
||||
const {stdout, stderr} = await promiseExec(`"${helm}" status ${name} --output json --namespace ${namespace} --kubeconfig ${cluster.proxyKubeconfigPath()}`).catch((error) => { throw(error.stderr)})
|
||||
const release = JSON.parse(stdout)
|
||||
release.resources = await this.getResources(name, namespace, cluster)
|
||||
return release
|
||||
@ -100,7 +100,7 @@ export class HelmReleaseManager {
|
||||
protected async getResources(name: string, namespace: string, cluster: Cluster) {
|
||||
const helm = await helmCli.binaryPath()
|
||||
const kubectl = await cluster.kubeCtl.kubectlPath()
|
||||
const pathToKubeconfig = cluster.kubeconfigPath()
|
||||
const pathToKubeconfig = cluster.proxyKubeconfigPath()
|
||||
const { stdout } = await promiseExec(`"${helm}" get manifest ${name} --namespace ${namespace} --kubeconfig ${pathToKubeconfig} | "${kubectl}" get -n ${namespace} --kubeconfig ${pathToKubeconfig} -f - -o=json`).catch((error) => {
|
||||
return { stdout: JSON.stringify({items: []})}
|
||||
})
|
||||
|
||||
@ -6,7 +6,7 @@ import { releaseManager } from "./helm-release-manager";
|
||||
|
||||
class HelmService {
|
||||
public async installChart(cluster: Cluster, data: {chart: string; values: {}; name: string; namespace: string; version: string}) {
|
||||
const installResult = await releaseManager.installChart(data.chart, data.values, data.name, data.namespace, data.version, cluster.kubeconfigPath())
|
||||
const installResult = await releaseManager.installChart(data.chart, data.values, data.name, data.namespace, data.version, cluster.proxyKubeconfigPath())
|
||||
return installResult
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ class HelmService {
|
||||
|
||||
public async listReleases(cluster: Cluster, namespace: string = null) {
|
||||
await repoManager.init()
|
||||
const releases = await releaseManager.listReleases(cluster.kubeconfigPath(), namespace)
|
||||
const releases = await releaseManager.listReleases(cluster.proxyKubeconfigPath(), namespace)
|
||||
return releases
|
||||
}
|
||||
|
||||
@ -60,19 +60,19 @@ class HelmService {
|
||||
|
||||
public async getReleaseValues(cluster: Cluster, releaseName: string, namespace: string) {
|
||||
logger.debug("Fetch release values")
|
||||
const values = await releaseManager.getValues(releaseName, namespace, cluster.kubeconfigPath())
|
||||
const values = await releaseManager.getValues(releaseName, namespace, cluster.proxyKubeconfigPath())
|
||||
return values
|
||||
}
|
||||
|
||||
public async getReleaseHistory(cluster: Cluster, releaseName: string, namespace: string) {
|
||||
logger.debug("Fetch release history")
|
||||
const history = await releaseManager.getHistory(releaseName, namespace, cluster.kubeconfigPath())
|
||||
const history = await releaseManager.getHistory(releaseName, namespace, cluster.proxyKubeconfigPath())
|
||||
return(history)
|
||||
}
|
||||
|
||||
public async deleteRelease(cluster: Cluster, releaseName: string, namespace: string) {
|
||||
logger.debug("Delete release")
|
||||
const release = await releaseManager.deleteRelease(releaseName, namespace, cluster.kubeconfigPath())
|
||||
const release = await releaseManager.deleteRelease(releaseName, namespace, cluster.proxyKubeconfigPath())
|
||||
return release
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ class HelmService {
|
||||
|
||||
public async rollback(cluster: Cluster, releaseName: string, namespace: string, revision: number) {
|
||||
logger.debug("Rollback release")
|
||||
const output = await releaseManager.rollback(releaseName, namespace, revision, cluster.kubeconfigPath())
|
||||
const output = await releaseManager.rollback(releaseName, namespace, revision, cluster.proxyKubeconfigPath())
|
||||
return({ message: output })
|
||||
}
|
||||
|
||||
|
||||
@ -70,13 +70,13 @@ export function splitConfig(kubeConfig: k8s.KubeConfig): k8s.KubeConfig[] {
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads KubeConfig from a yaml string and breaks it into several configs. Each context
|
||||
* Loads KubeConfig from a yaml and breaks it into several configs. Each context per KubeConfig object
|
||||
*
|
||||
* @param configString yaml string of kube config
|
||||
* @param configPath path to kube config yaml file
|
||||
*/
|
||||
export function loadAndSplitConfig(configString: string): k8s.KubeConfig[] {
|
||||
export function loadAndSplitConfig(configPath: string): k8s.KubeConfig[] {
|
||||
const allConfigs = new k8s.KubeConfig();
|
||||
allConfigs.loadFromString(configString);
|
||||
allConfigs.loadFromFile(configPath);
|
||||
return splitConfig(allConfigs);
|
||||
}
|
||||
|
||||
|
||||
@ -3,10 +3,8 @@ import logger from "./logger"
|
||||
import * as tcpPortUsed from "tcp-port-used"
|
||||
import { Kubectl, bundledKubectl } from "./kubectl"
|
||||
import { Cluster } from "./cluster"
|
||||
import { readFileSync, watch } from "fs"
|
||||
import { PromiseIpc } from "electron-promise-ipc"
|
||||
import { findMainWebContents } from "./webcontents"
|
||||
import * as url from "url"
|
||||
|
||||
export class KubeAuthProxy {
|
||||
public lastError: string
|
||||
@ -31,26 +29,18 @@ export class KubeAuthProxy {
|
||||
return;
|
||||
}
|
||||
const proxyBin = await this.kubectl.kubectlPath()
|
||||
const configWatcher = watch(this.cluster.kubeconfigPath(), (eventType: string, filename: string) => {
|
||||
if (eventType === "change") {
|
||||
const kc = readFileSync(this.cluster.kubeconfigPath()).toString()
|
||||
if (kc.trim().length > 0) { // Prevent updating empty configs back to store
|
||||
this.cluster.updateKubeconfig(kc)
|
||||
} else {
|
||||
logger.warn(`kubeconfig watch on ${this.cluster.kubeconfigPath()} resulted into empty config, ignoring...`)
|
||||
}
|
||||
}
|
||||
})
|
||||
const clusterUrl = url.parse(this.cluster.apiUrl)
|
||||
let args = [
|
||||
"proxy",
|
||||
"--port", this.port.toString(),
|
||||
"--kubeconfig", this.cluster.kubeconfigPath(),
|
||||
"--accept-hosts", clusterUrl.hostname,
|
||||
"-p", this.port.toString(),
|
||||
"--kubeconfig", this.cluster.kubeConfigPath,
|
||||
"--context", this.cluster.contextName,
|
||||
"--accept-hosts", ".*",
|
||||
"--reject-paths", "^[^/]"
|
||||
]
|
||||
if (process.env.DEBUG_PROXY === "true") {
|
||||
args = args.concat(["-v", "9"])
|
||||
}
|
||||
logger.debug(`spawning kubectl proxy with args: ${args}`)
|
||||
this.proxyProcess = spawn(proxyBin, args, {
|
||||
env: this.env
|
||||
})
|
||||
@ -60,7 +50,6 @@ export class KubeAuthProxy {
|
||||
logger.debug("failed to send IPC log message: " + err.message)
|
||||
})
|
||||
this.proxyProcess = null
|
||||
configWatcher.close()
|
||||
})
|
||||
this.proxyProcess.stdout.on('data', (data) => {
|
||||
let logItem = data.toString()
|
||||
|
||||
@ -2,14 +2,17 @@ import { app } from "electron"
|
||||
import fs from "fs"
|
||||
import { ensureDir, randomFileName} from "./file-helpers"
|
||||
import logger from "./logger"
|
||||
import { Cluster } from "./cluster"
|
||||
import * as k8s from "./k8s"
|
||||
import { KubeConfig } from "@kubernetes/client-node"
|
||||
|
||||
export class KubeconfigManager {
|
||||
protected configDir = app.getPath("temp")
|
||||
protected kubeconfig: string
|
||||
protected tempFile: string
|
||||
protected cluster: Cluster
|
||||
|
||||
constructor(kubeconfig: string) {
|
||||
this.kubeconfig = kubeconfig
|
||||
constructor(cluster: Cluster) {
|
||||
this.cluster = cluster
|
||||
this.tempFile = this.createTemporaryKubeconfig()
|
||||
}
|
||||
|
||||
@ -17,11 +20,38 @@ export class KubeconfigManager {
|
||||
return this.tempFile
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates new "temporary" kubeconfig that point to the kubectl-proxy.
|
||||
* This way any user of the config does not need to know anything about the auth etc. details.
|
||||
*/
|
||||
protected createTemporaryKubeconfig(): string {
|
||||
ensureDir(this.configDir)
|
||||
const path = `${this.configDir}/${randomFileName("kubeconfig")}`
|
||||
logger.debug('Creating temporary kubeconfig: ' + path)
|
||||
fs.writeFileSync(path, this.kubeconfig)
|
||||
const originalKc = new KubeConfig()
|
||||
originalKc.loadFromFile(this.cluster.kubeConfigPath)
|
||||
const kc = {
|
||||
clusters: [
|
||||
{
|
||||
name: this.cluster.contextName,
|
||||
server: `http://127.0.0.1:${this.cluster.contextHandler.proxyPort}`
|
||||
}
|
||||
],
|
||||
users: [
|
||||
{
|
||||
name: "proxy"
|
||||
}
|
||||
],
|
||||
contexts: [
|
||||
{
|
||||
name: this.cluster.contextName,
|
||||
cluster: this.cluster.contextName,
|
||||
namespace: originalKc.getContextObject(this.cluster.contextName).namespace,
|
||||
user: "proxy"
|
||||
}
|
||||
],
|
||||
currentContext: this.cluster.contextName
|
||||
} as KubeConfig
|
||||
fs.writeFileSync(path, k8s.dumpConfigYaml(kc))
|
||||
return path
|
||||
}
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@ export class NodeShellSession extends ShellSession {
|
||||
super(socket, pathToKubeconfig, cluster)
|
||||
this.nodeName = nodeName
|
||||
this.podId = `node-shell-${uuid()}`
|
||||
this.kc = cluster.contextHandler.kc
|
||||
this.kc = cluster.proxyKubeconfig()
|
||||
}
|
||||
|
||||
public async open() {
|
||||
|
||||
@ -6,7 +6,7 @@ class ResourceApplierApi extends LensApi {
|
||||
public async applyResource(request: LensApiRequest) {
|
||||
const { response, cluster, payload } = request
|
||||
try {
|
||||
const resource = await resourceApplier.apply(cluster, cluster.kubeconfigPath(), payload)
|
||||
const resource = await resourceApplier.apply(cluster, cluster.proxyKubeconfigPath(), payload)
|
||||
this.respondJson(response, [resource], 200)
|
||||
} catch(error) {
|
||||
this.respondText(response, error, 422)
|
||||
|
||||
@ -45,7 +45,7 @@ const apiResources = [
|
||||
]
|
||||
|
||||
async function getAllowedNamespaces(cluster: Cluster) {
|
||||
const api = cluster.contextHandler.kc.makeApiClient(CoreV1Api)
|
||||
const api = cluster.proxyKubeconfig().makeApiClient(CoreV1Api)
|
||||
try {
|
||||
const namespaceList = await api.listNamespace()
|
||||
const nsAccessStatuses = await Promise.all(
|
||||
@ -58,9 +58,8 @@ async function getAllowedNamespaces(cluster: Cluster) {
|
||||
return namespaceList.body.items
|
||||
.filter((ns, i) => nsAccessStatuses[i])
|
||||
.map(ns => ns.metadata.name)
|
||||
} catch (error) {
|
||||
const kc = cluster.contextHandler.kc
|
||||
const ctx = kc.getContextObject(kc.currentContext)
|
||||
} catch(error) {
|
||||
const ctx = cluster.proxyKubeconfig().getContextObject(cluster.contextName)
|
||||
if (ctx.namespace) {
|
||||
return [ctx.namespace]
|
||||
}
|
||||
|
||||
@ -12,7 +12,7 @@ function generateKubeConfig(username: string, secret: V1Secret, cluster: Cluster
|
||||
{
|
||||
'name': cluster.contextName,
|
||||
'cluster': {
|
||||
'server': cluster.contextHandler.kc.getCurrentCluster().server,
|
||||
'server': cluster.apiUrl,
|
||||
'certificate-authority-data': secret.data["ca.crt"]
|
||||
}
|
||||
}
|
||||
@ -44,7 +44,7 @@ class KubeconfigRoute extends LensApi {
|
||||
public async routeServiceAccountRoute(request: LensApiRequest) {
|
||||
const { params, response, cluster} = request
|
||||
|
||||
const client = cluster.contextHandler.kc.makeApiClient(CoreV1Api);
|
||||
const client = cluster.proxyKubeconfig().makeApiClient(CoreV1Api);
|
||||
const secretList = await client.listNamespacedSecret(params.namespace)
|
||||
const secret = secretList.body.items.find(secret => {
|
||||
const { annotations } = secret.metadata;
|
||||
|
||||
@ -87,7 +87,7 @@ class PortForwardRoute extends LensApi {
|
||||
namespace: params.namespace,
|
||||
name: params.service,
|
||||
port: params.port,
|
||||
kubeConfig: cluster.kubeconfigPath()
|
||||
kubeConfig: cluster.proxyKubeconfigPath()
|
||||
})
|
||||
const started = await portForward.start()
|
||||
if (!started) {
|
||||
|
||||
@ -38,7 +38,17 @@ class ApiWatcher {
|
||||
clearInterval(this.processor)
|
||||
}
|
||||
logger.debug("Stopping watcher for api: " + this.apiUrl)
|
||||
this.watchRequest.abort()
|
||||
try {
|
||||
this.watchRequest.abort()
|
||||
this.sendEvent({
|
||||
type: "STREAM_END",
|
||||
url: this.apiUrl,
|
||||
status: 410,
|
||||
})
|
||||
logger.debug("watch aborted")
|
||||
} catch (error) {
|
||||
logger.error("Watch abort errored:" + error)
|
||||
}
|
||||
}
|
||||
|
||||
private watchHandler(phase: string, obj: any) {
|
||||
@ -50,12 +60,7 @@ class ApiWatcher {
|
||||
|
||||
private doneHandler(error: Error) {
|
||||
if (error) logger.warn("watch ended: " + error.toString())
|
||||
|
||||
this.sendEvent({
|
||||
type: "STREAM_END",
|
||||
url: this.apiUrl,
|
||||
status: 410,
|
||||
})
|
||||
this.watchRequest.abort()
|
||||
}
|
||||
|
||||
private sendEvent(evt: any) {
|
||||
@ -82,9 +87,10 @@ class WatchRoute extends LensApi {
|
||||
response.setHeader("Content-Type", "text/event-stream")
|
||||
response.setHeader("Cache-Control", "no-cache")
|
||||
response.setHeader("Connection", "keep-alive")
|
||||
logger.debug("watch using kubeconfig:" + JSON.stringify(cluster.proxyKubeconfig(), null, 2))
|
||||
|
||||
apis.forEach(apiUrl => {
|
||||
const watcher = new ApiWatcher(apiUrl, cluster.contextHandler.kc, response)
|
||||
const watcher = new ApiWatcher(apiUrl, cluster.proxyKubeconfig(), response)
|
||||
watcher.start()
|
||||
watchers.push(watcher)
|
||||
})
|
||||
|
||||
39
src/migrations/cluster-store/3.6.0-beta.1.ts
Normal file
39
src/migrations/cluster-store/3.6.0-beta.1.ts
Normal file
@ -0,0 +1,39 @@
|
||||
// move embedded kubeconfig into separate file and add reference to it to cluster settings
|
||||
import { app } from "electron"
|
||||
import { ensureDirSync } from "fs-extra"
|
||||
import * as path from "path"
|
||||
import { KubeConfig } from "@kubernetes/client-node";
|
||||
import { writeEmbeddedKubeConfig } from "../../common/utils/kubeconfig"
|
||||
|
||||
export function migration(store: any) {
|
||||
console.log("CLUSTER STORE, MIGRATION: 3.6.0-beta.1");
|
||||
const clusters: any[] = []
|
||||
|
||||
const kubeConfigBase = path.join(app.getPath("userData"), "kubeconfigs")
|
||||
ensureDirSync(kubeConfigBase)
|
||||
const storedClusters = store.get("clusters") as any[]
|
||||
if (!storedClusters) return
|
||||
|
||||
console.log("num clusters to migrate: ", storedClusters.length)
|
||||
for (const cluster of storedClusters ) {
|
||||
try {
|
||||
// take the embedded kubeconfig and dump it into a file
|
||||
const kubeConfigFile = writeEmbeddedKubeConfig(cluster.id, cluster.kubeConfig)
|
||||
cluster.kubeConfigPath = kubeConfigFile
|
||||
|
||||
const kc = new KubeConfig()
|
||||
kc.loadFromFile(cluster.kubeConfigPath)
|
||||
cluster.contextName = kc.getCurrentContext()
|
||||
|
||||
delete cluster.kubeConfig
|
||||
clusters.push(cluster)
|
||||
} catch(error) {
|
||||
console.error("failed to migrate kubeconfig for cluster:", cluster.id)
|
||||
}
|
||||
}
|
||||
|
||||
// "overwrite" the cluster configs
|
||||
if (clusters.length > 0) {
|
||||
store.set("clusters", clusters)
|
||||
}
|
||||
}
|
||||
@ -10,6 +10,18 @@
|
||||
<b-form-group
|
||||
label="Choose config:"
|
||||
>
|
||||
<b-form-file
|
||||
v-model="file"
|
||||
:state="Boolean(file)"
|
||||
placeholder="Choose a file or drop it here..."
|
||||
drop-placeholder="Drop file here..."
|
||||
@input="reloadKubeContexts()"
|
||||
/>
|
||||
|
||||
<div class="mt-3">
|
||||
Selected file: {{ file ? file.name : '' }}
|
||||
</div>
|
||||
|
||||
<b-form-select
|
||||
id="kubecontext-select"
|
||||
v-model="kubecontext"
|
||||
@ -113,6 +125,10 @@ import * as PrismEditor from 'vue-prism-editor'
|
||||
import * as k8s from "@kubernetes/client-node"
|
||||
import { dumpConfigYaml } from "../../../main/k8s"
|
||||
import ClustersMixin from "@/_vue/mixins/ClustersMixin";
|
||||
import * as path from "path"
|
||||
import fs from 'fs'
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import { writeEmbeddedKubeConfig} from "../../../common/utils/kubeconfig"
|
||||
|
||||
class ClusterAccessError extends Error {}
|
||||
|
||||
@ -125,6 +141,8 @@ export default {
|
||||
},
|
||||
data(){
|
||||
return {
|
||||
file: null,
|
||||
filepath: null,
|
||||
clusterconfig: "",
|
||||
httpsProxy: "",
|
||||
kubecontext: "",
|
||||
@ -136,7 +154,10 @@ export default {
|
||||
}
|
||||
},
|
||||
mounted: function() {
|
||||
this.$store.dispatch("reloadAvailableKubeContexts");
|
||||
const kubeConfigPath = path.join(process.env.HOME, '.kube', 'config')
|
||||
this.filepath = kubeConfigPath
|
||||
this.file = new File(fs.readFileSync(this.filepath), this.filepath)
|
||||
this.$store.dispatch("reloadAvailableKubeContexts", this.filepath);
|
||||
this.seenContexts = JSON.parse(JSON.stringify(this.$store.getters.seenContexts)) // clone seenContexts from store
|
||||
this.storeSeenContexts()
|
||||
},
|
||||
@ -163,6 +184,10 @@ export default {
|
||||
},
|
||||
},
|
||||
methods: {
|
||||
reloadKubeContexts() {
|
||||
this.filepath = this.file.path
|
||||
this.$store.dispatch("reloadAvailableKubeContexts", this.file.path);
|
||||
},
|
||||
isNewContext(context) {
|
||||
return this.newContexts.indexOf(context) > -1
|
||||
},
|
||||
@ -196,8 +221,15 @@ export default {
|
||||
try {
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromString(this.clusterconfig); // throws TypeError if we cannot parse kubeconfig
|
||||
const clusterId = uuidv4();
|
||||
// We need to store the kubeconfig to "app-home"/
|
||||
if (this.kubecontext === "custom") {
|
||||
this.filepath = writeEmbeddedKubeConfig(clusterId, this.clusterconfig)
|
||||
}
|
||||
const clusterInfo = {
|
||||
kubeConfig: dumpConfigYaml(kc),
|
||||
id: clusterId,
|
||||
kubeConfigPath: this.filepath,
|
||||
contextName: kc.currentContext,
|
||||
preferences: {
|
||||
clusterName: kc.currentContext
|
||||
},
|
||||
@ -206,6 +238,7 @@ export default {
|
||||
if (this.httpsProxy) {
|
||||
clusterInfo.preferences.httpsProxy = this.httpsProxy
|
||||
}
|
||||
console.log("sending clusterInfo:", clusterInfo)
|
||||
let res = await this.$store.dispatch('addCluster', clusterInfo)
|
||||
console.log("addCluster result:", res)
|
||||
if(!res){
|
||||
|
||||
@ -6,10 +6,10 @@ const state = {
|
||||
}
|
||||
|
||||
const actions = {
|
||||
reloadAvailableKubeContexts: ({commit}) => {
|
||||
reloadAvailableKubeContexts({commit}, file) {
|
||||
let kc = new k8s.KubeConfig();
|
||||
try {
|
||||
kc.loadFromDefault();
|
||||
kc.loadFromFile(file);
|
||||
} catch (error) {
|
||||
console.error("Failed to read default kubeconfig: " + error.message);
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user