Compare commits

...

3 Commits

15 changed files with 1499 additions and 308 deletions

View File

@@ -272,6 +272,10 @@ func main() {
rpmMirror = rpm.NewMirrorManager(store, logger, rpmMeta)
var uploadStore storage.FileStore
uploadStore = storage.FileStore{BaseDir: filepath.Join(cfg.DataDir, "uploads")}
err = os.MkdirAll(repoManager.BaseDir, 0o755)
if err != nil {
log.Fatalf("git dir error: %v", err)
}
err = os.MkdirAll(rpmBase, 0o755)
if err != nil {
log.Fatalf("rpm dir error: %v", err)
@@ -288,6 +292,7 @@ func main() {
Repos: repoManager,
RpmBase: rpmBase,
RpmMeta: rpmMeta,
RpmMirror: rpmMirror,
DockerBase: dockerBase,
Uploads: uploadStore,
Logger: logger,
@@ -448,10 +453,13 @@ func main() {
router.Handle("GET", "/api/repos/:id/rpm/package", api.RepoRPMPackage)
router.Handle("POST", "/api/repos/:id/rpm/subdirs", api.RepoRPMCreateSubdir)
router.Handle("GET", "/api/repos/:id/rpm/subdir", api.RepoRPMGetSubdir)
router.Handle("POST", "/api/repos/:id/rpm/subdir/update", api.RepoRPMRenameSubdir)
router.Handle("POST", "/api/repos/:id/rpm/subdir/rename", api.RepoRPMRenameSubdir)
router.Handle("POST", "/api/repos/:id/rpm/subdir/sync", api.RepoRPMSyncSubdir)
router.Handle("POST", "/api/repos/:id/rpm/subdir/suspend", api.RepoRPMSuspendSubdir)
router.Handle("POST", "/api/repos/:id/rpm/subdir/resume", api.RepoRPMResumeSubdir)
router.Handle("POST", "/api/repos/:id/rpm/subdir/rebuild-metadata", api.RepoRPMRebuildSubdirMetadata)
router.Handle("POST", "/api/repos/:id/rpm/subdir/cancel", api.RepoRPMCancelSubdirSync)
router.Handle("GET", "/api/repos/:id/rpm/subdir/runs", api.RepoRPMMirrorRuns)
router.Handle("DELETE", "/api/repos/:id/rpm/subdir/runs", api.RepoRPMClearMirrorRuns)
router.Handle("DELETE", "/api/repos/:id/rpm/subdir", api.RepoRPMDeleteSubdir)
@@ -510,7 +518,7 @@ func main() {
mux.Handle("/api/auth/oidc/login", middleware.WithUser(store, middleware.AccessLog(logger, router)))
mux.Handle("/api/auth/oidc/callback", middleware.WithUser(store, middleware.AccessLog(logger, router)))
mux.Handle("/api/health", middleware.AccessLog(logger, router))
mux.Handle("/", middleware.WithUser(store, spaHandler(filepath.Join("..", "frontend", "dist"))))
mux.Handle("/", middleware.WithUser(store, spaHandler(cfg.FrontendDir)))
extraListenerManager = newAdditionalListenerManager(store, mux, logger)
api.OnTLSListenersChanged = extraListenerManager.NotifyReload
api.OnTLSListenerRuntimeStatus = extraListenerManager.ListenerEndpointCounts

View File

@@ -3,6 +3,7 @@ package config
import "encoding/json"
import "errors"
import "os"
import "path/filepath"
import "strings"
import "time"
import "strconv"
@@ -12,6 +13,7 @@ type Config struct {
HTTPSAddrs []string `json:"https_addrs"`
PublicBaseURL string `json:"public_base_url"`
DataDir string `json:"data_dir"`
FrontendDir string `json:"frontend_dir"`
DBDriver string `json:"db_driver"`
DBDSN string `json:"db_dsn"`
SessionTTL Duration `json:"session_ttl"`
@@ -51,6 +53,7 @@ func Load(path string) (Config, error) {
HTTPAddrs: []string{":1080"},
HTTPSAddrs: []string{},
DataDir: "./codit-data",
FrontendDir: filepath.Join("..", "frontend", "dist"),
DBDriver: "sqlite",
DBDSN: "file:./codit-data/codit.db?_pragma=foreign_keys(1)",
SessionTTL: Duration(24 * time.Hour),
@@ -106,6 +109,10 @@ func override(cfg *Config) {
if v != "" {
cfg.DataDir = v
}
v = os.Getenv("CODIT_FRONTEND_DIR")
if v != "" {
cfg.FrontendDir = v
}
v = os.Getenv("CODIT_DB_DRIVER")
if v != "" {
cfg.DBDriver = v

View File

@@ -18,6 +18,9 @@ func TestLoadDefaults(t *testing.T) {
if cfg.GitHTTPPrefix != "/git" {
t.Fatalf("unexpected git prefix default: %s", cfg.GitHTTPPrefix)
}
if cfg.FrontendDir == "" {
t.Fatalf("frontend_dir default missing")
}
}
func TestLoadFromJSONAndEnvOverride(t *testing.T) {
@@ -34,6 +37,7 @@ func TestLoadFromJSONAndEnvOverride(t *testing.T) {
t.Fatalf("write config file: %v", err)
}
t.Setenv("CODIT_DB_DSN", "file:override.db")
t.Setenv("CODIT_FRONTEND_DIR", "/srv/codit/frontend")
cfg, err = Load(path)
if err != nil {
t.Fatalf("Load() error: %v", err)
@@ -44,6 +48,9 @@ func TestLoadFromJSONAndEnvOverride(t *testing.T) {
if cfg.AuthMode != "hybrid" {
t.Fatalf("auth_mode normalization failed: %s", cfg.AuthMode)
}
if cfg.FrontendDir != "/srv/codit/frontend" {
t.Fatalf("frontend_dir env override failed: %s", cfg.FrontendDir)
}
}
func TestDurationUnmarshalJSON(t *testing.T) {

View File

@@ -12,13 +12,13 @@ func (s *Store) ListRPMRepoDirs(repoID string) ([]models.RPMRepoDir, error) {
var items []models.RPMRepoDir
var item models.RPMRepoDir
var err error
rows, err = s.DB.Query(`SELECT repo_id, path, mode, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec, sync_enabled, dirty, next_sync_at, sync_running, sync_status, sync_error, sync_step, sync_total, sync_done, sync_failed, sync_deleted, last_sync_started_at, last_sync_finished_at, last_sync_success_at, last_synced_revision, created_at, updated_at FROM rpm_repo_dirs WHERE repo_id = ? ORDER BY LENGTH(path), path`, repoID)
rows, err = s.DB.Query(`SELECT repo_id, path, mode, allow_delete, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec, sync_enabled, dirty, next_sync_at, sync_running, sync_status, sync_error, sync_step, sync_total, sync_done, sync_failed, sync_deleted, last_sync_started_at, last_sync_finished_at, last_sync_success_at, last_synced_revision, created_at, updated_at FROM rpm_repo_dirs WHERE repo_id = ? ORDER BY LENGTH(path), path`, repoID)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&item.RepoID, &item.Path, &item.Mode, &item.RemoteURL, &item.ConnectHost, &item.HostHeader, &item.TLSServerName, &item.TLSInsecureSkipVerify, &item.SyncIntervalSec, &item.SyncEnabled, &item.Dirty, &item.NextSyncAt, &item.SyncRunning, &item.SyncStatus, &item.SyncError, &item.SyncStep, &item.SyncTotal, &item.SyncDone, &item.SyncFailed, &item.SyncDeleted, &item.LastSyncStartedAt, &item.LastSyncFinishedAt, &item.LastSyncSuccessAt, &item.LastSyncedRevision, &item.CreatedAt, &item.UpdatedAt)
err = rows.Scan(&item.RepoID, &item.Path, &item.Mode, &item.AllowDelete, &item.RemoteURL, &item.ConnectHost, &item.HostHeader, &item.TLSServerName, &item.TLSInsecureSkipVerify, &item.SyncIntervalSec, &item.SyncEnabled, &item.Dirty, &item.NextSyncAt, &item.SyncRunning, &item.SyncStatus, &item.SyncError, &item.SyncStep, &item.SyncTotal, &item.SyncDone, &item.SyncFailed, &item.SyncDeleted, &item.LastSyncStartedAt, &item.LastSyncFinishedAt, &item.LastSyncSuccessAt, &item.LastSyncedRevision, &item.CreatedAt, &item.UpdatedAt)
if err != nil {
return nil, err
}
@@ -39,10 +39,11 @@ func (s *Store) UpsertRPMRepoDir(item models.RPMRepoDir) error {
item.SyncIntervalSec = 300
}
_, err = s.DB.Exec(`
INSERT INTO rpm_repo_dirs (repo_id, path, mode, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec, sync_enabled, dirty, next_sync_at, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
INSERT INTO rpm_repo_dirs (repo_id, path, mode, allow_delete, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec, sync_enabled, dirty, next_sync_at, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(repo_id, path) DO UPDATE SET
mode = excluded.mode,
allow_delete = excluded.allow_delete,
remote_url = excluded.remote_url,
connect_host = excluded.connect_host,
host_header = excluded.host_header,
@@ -57,6 +58,7 @@ func (s *Store) UpsertRPMRepoDir(item models.RPMRepoDir) error {
item.RepoID,
item.Path,
item.Mode,
item.AllowDelete,
item.RemoteURL,
item.ConnectHost,
item.HostHeader,
@@ -75,8 +77,8 @@ func (s *Store) GetRPMRepoDir(repoID string, path string) (models.RPMRepoDir, er
var row *sql.Row
var item models.RPMRepoDir
var err error
row = s.DB.QueryRow(`SELECT repo_id, path, mode, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec, sync_enabled, dirty, next_sync_at, sync_running, sync_status, sync_error, sync_step, sync_total, sync_done, sync_failed, sync_deleted, last_sync_started_at, last_sync_finished_at, last_sync_success_at, last_synced_revision, created_at, updated_at FROM rpm_repo_dirs WHERE repo_id = ? AND path = ?`, repoID, path)
err = row.Scan(&item.RepoID, &item.Path, &item.Mode, &item.RemoteURL, &item.ConnectHost, &item.HostHeader, &item.TLSServerName, &item.TLSInsecureSkipVerify, &item.SyncIntervalSec, &item.SyncEnabled, &item.Dirty, &item.NextSyncAt, &item.SyncRunning, &item.SyncStatus, &item.SyncError, &item.SyncStep, &item.SyncTotal, &item.SyncDone, &item.SyncFailed, &item.SyncDeleted, &item.LastSyncStartedAt, &item.LastSyncFinishedAt, &item.LastSyncSuccessAt, &item.LastSyncedRevision, &item.CreatedAt, &item.UpdatedAt)
row = s.DB.QueryRow(`SELECT repo_id, path, mode, allow_delete, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec, sync_enabled, dirty, next_sync_at, sync_running, sync_status, sync_error, sync_step, sync_total, sync_done, sync_failed, sync_deleted, last_sync_started_at, last_sync_finished_at, last_sync_success_at, last_synced_revision, created_at, updated_at FROM rpm_repo_dirs WHERE repo_id = ? AND path = ?`, repoID, path)
err = row.Scan(&item.RepoID, &item.Path, &item.Mode, &item.AllowDelete, &item.RemoteURL, &item.ConnectHost, &item.HostHeader, &item.TLSServerName, &item.TLSInsecureSkipVerify, &item.SyncIntervalSec, &item.SyncEnabled, &item.Dirty, &item.NextSyncAt, &item.SyncRunning, &item.SyncStatus, &item.SyncError, &item.SyncStep, &item.SyncTotal, &item.SyncDone, &item.SyncFailed, &item.SyncDeleted, &item.LastSyncStartedAt, &item.LastSyncFinishedAt, &item.LastSyncSuccessAt, &item.LastSyncedRevision, &item.CreatedAt, &item.UpdatedAt)
if err != nil {
return item, err
}
@@ -120,7 +122,7 @@ func (s *Store) TryStartRPMMirrorTask(repoID string, path string, now int64) (bo
var res sql.Result
var rows int64
var err error
res, err = s.DB.Exec(`UPDATE rpm_repo_dirs SET sync_running = 1, sync_status = 'running', sync_error = '', sync_step = 'start', sync_total = 0, sync_done = 0, sync_failed = 0, sync_deleted = 0, last_sync_started_at = ?, updated_at = ? WHERE repo_id = ? AND path = ? AND mode = 'mirror' AND sync_running = 0`, now, now, repoID, path)
res, err = s.DB.Exec(`UPDATE rpm_repo_dirs SET sync_running = 1, sync_status = 'running', sync_error = '', sync_step = 'start', sync_total = 0, sync_done = 0, sync_failed = 0, sync_deleted = 0, last_sync_started_at = ?, updated_at = ? WHERE repo_id = ? AND path = ? AND mode = 'mirror' AND sync_enabled = 1 AND sync_running = 0`, now, now, repoID, path)
if err != nil {
return false, err
}
@@ -222,6 +224,18 @@ func (s *Store) ListRPMMirrorPaths() ([]models.RPMMirrorTask, error) {
return out, nil
}
func (s *Store) HasRunningRPMMirrorTask(repoID string) (bool, error) {
var row *sql.Row
var count int64
var err error
row = s.DB.QueryRow(`SELECT COUNT(1) FROM rpm_repo_dirs WHERE repo_id = ? AND mode = 'mirror' AND sync_running = 1`, repoID)
err = row.Scan(&count)
if err != nil {
return false, err
}
return count > 0, nil
}
func (s *Store) CreateRPMMirrorRun(repoID string, path string, startedAt int64) (string, error) {
var id string
var err error
@@ -346,13 +360,18 @@ func (s *Store) MoveRPMRepoDir(repoID string, oldPath string, newPath string) er
return err
}
now = time.Now().UTC().Unix()
oldPrefix = oldPath + "/"
newPrefix = newPath + "/"
_, err = tx.Exec(`DELETE FROM rpm_mirror_runs WHERE repo_id = ? AND (path = ? OR path LIKE (? || '%'))`, repoID, oldPath, oldPrefix)
if err != nil {
_ = tx.Rollback()
return err
}
_, err = tx.Exec(`UPDATE rpm_repo_dirs SET path = ?, updated_at = ? WHERE repo_id = ? AND path = ?`, newPath, now, repoID, oldPath)
if err != nil {
_ = tx.Rollback()
return err
}
oldPrefix = oldPath + "/"
newPrefix = newPath + "/"
_, err = tx.Exec(`UPDATE rpm_repo_dirs SET path = (? || SUBSTR(path, ?)), updated_at = ? WHERE repo_id = ? AND path LIKE (? || '%')`, newPrefix, len(oldPrefix)+1, now, repoID, oldPrefix)
if err != nil {
_ = tx.Rollback()

View File

@@ -31,6 +31,7 @@ type API struct {
Repos git.RepoManager
RpmBase string
RpmMeta *rpm.MetaManager
RpmMirror *rpm.MirrorManager
DockerBase string
Uploads storage.FileStore
Logger *util.Logger
@@ -219,11 +220,12 @@ type repoBranchCreateRequest struct {
From string `json:"from"`
}
type repoRPMSubdirRequest struct {
type repoRPMCreateRequest struct {
Name string `json:"name"`
Type string `json:"type"`
Parent string `json:"parent"`
Mode string `json:"mode"`
AllowDelete bool `json:"allow_delete"`
RemoteURL string `json:"remote_url"`
ConnectHost string `json:"connect_host"`
HostHeader string `json:"host_header"`
@@ -232,16 +234,17 @@ type repoRPMSubdirRequest struct {
SyncIntervalSec int64 `json:"sync_interval_sec"`
}
type repoRPMRenameRequest struct {
Path string `json:"path"`
Name string `json:"name"`
Mode string `json:"mode"`
RemoteURL string `json:"remote_url"`
ConnectHost string `json:"connect_host"`
HostHeader string `json:"host_header"`
TLSServerName string `json:"tls_server_name"`
TLSInsecureSkipVerify bool `json:"tls_insecure_skip_verify"`
SyncIntervalSec int64 `json:"sync_interval_sec"`
type repoRPMUpdateRequest struct {
Path *string `json:"path"`
Name *string `json:"name"`
Mode *string `json:"mode"`
AllowDelete *bool `json:"allow_delete"`
RemoteURL *string `json:"remote_url"`
ConnectHost *string `json:"connect_host"`
HostHeader *string `json:"host_header"`
TLSServerName *string `json:"tls_server_name"`
TLSInsecureSkipVerify *bool `json:"tls_insecure_skip_verify"`
SyncIntervalSec *int64 `json:"sync_interval_sec"`
}
type createAPIKeyRequest struct {
@@ -1322,6 +1325,7 @@ func (api *API) UpdateProject(w http.ResponseWriter, r *http.Request, params map
func (api *API) DeleteProject(w http.ResponseWriter, r *http.Request, params map[string]string) {
var err error
var repos []models.Repo
var running bool
var i int
var tempPaths []string
var sourcePaths []string
@@ -1334,6 +1338,20 @@ func (api *API) DeleteProject(w http.ResponseWriter, r *http.Request, params map
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
for i = 0; i < len(repos); i++ {
if repos[i].Type != "rpm" {
continue
}
running, err = api.Store.HasRunningRPMMirrorTask(repos[i].ID)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if running {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot delete project while rpm mirror sync is running", "repo_id": repos[i].ID, "repo_name": repos[i].Name})
return
}
}
tempPaths = make([]string, 0, len(repos))
sourcePaths = make([]string, 0, len(repos))
for i = 0; i < len(repos); i++ {
@@ -1979,6 +1997,7 @@ func (api *API) UpdateRepo(w http.ResponseWriter, r *http.Request, params map[st
func (api *API) DeleteRepo(w http.ResponseWriter, r *http.Request, params map[string]string) {
var repo models.Repo
var project models.Project
var running bool
var err error
var temp string
repo, err = api.Store.GetRepo(params["id"])
@@ -1989,6 +2008,17 @@ func (api *API) DeleteRepo(w http.ResponseWriter, r *http.Request, params map[st
if !api.requireProjectRole(w, r, repo.ProjectID, "writer") {
return
}
if repo.Type == "rpm" {
running, err = api.Store.HasRunningRPMMirrorTask(repo.ID)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if running {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot delete repository while rpm mirror sync is running"})
return
}
}
project, err = api.Store.GetProject(repo.ProjectID)
if err != nil {
WriteJSON(w, http.StatusNotFound, map[string]string{"error": "project not found"})
@@ -2687,7 +2717,7 @@ func (api *API) RepoRPMCreateSubdir(w http.ResponseWriter, r *http.Request, para
var writeBlocked bool
var writeBlockedPath string
var err error
var req repoRPMSubdirRequest
var req repoRPMCreateRequest
var name string
var dirType string
var mode string
@@ -2699,6 +2729,9 @@ func (api *API) RepoRPMCreateSubdir(w http.ResponseWriter, r *http.Request, para
var fullRelLower string
var absParent string
var hasRepoAncestor bool
var allowDelete bool
var tlsInsecureSkipVerify bool
var syncIntervalSec int64
repo, err = api.Store.GetRepo(params["id"])
if err != nil {
WriteJSON(w, http.StatusNotFound, map[string]string{"error": "repo not found"})
@@ -2766,6 +2799,13 @@ func (api *API) RepoRPMCreateSubdir(w http.ResponseWriter, r *http.Request, para
}
if dirType == "repo" {
mode = normalizeRPMRepoDirMode(req.Mode)
allowDelete = req.AllowDelete
tlsInsecureSkipVerify = req.TLSInsecureSkipVerify
syncIntervalSec = req.SyncIntervalSec
if syncIntervalSec == 0 {
syncIntervalSec = 300
}
syncIntervalSec = normalizeRPMMirrorIntervalSec(syncIntervalSec)
absParent = filepath.Join(repo.Path, parentPath)
hasRepoAncestor, err = hasRepodataAncestor(repo.Path, absParent)
if err != nil {
@@ -2799,12 +2839,13 @@ func (api *API) RepoRPMCreateSubdir(w http.ResponseWriter, r *http.Request, para
RepoID: repo.ID,
Path: fullRel,
Mode: mode,
AllowDelete: allowDelete,
RemoteURL: strings.TrimSpace(req.RemoteURL),
ConnectHost: strings.TrimSpace(req.ConnectHost),
HostHeader: strings.TrimSpace(req.HostHeader),
TLSServerName: strings.TrimSpace(req.TLSServerName),
TLSInsecureSkipVerify: req.TLSInsecureSkipVerify,
SyncIntervalSec: normalizeRPMMirrorIntervalSec(req.SyncIntervalSec),
TLSInsecureSkipVerify: tlsInsecureSkipVerify,
SyncIntervalSec: syncIntervalSec,
SyncEnabled: true,
}
err = api.Store.UpsertRPMRepoDir(dirConfig)
@@ -2917,11 +2958,118 @@ func (api *API) RepoRPMResumeSubdir(w http.ResponseWriter, r *http.Request, para
api.repoRPMSetSyncEnabled(w, r, params, true)
}
func (api *API) RepoRPMRebuildSubdirMetadata(w http.ResponseWriter, r *http.Request, params map[string]string) {
var repo models.Repo
var relPath string
var normalizedPath string
var fullPath string
var repodataPath string
var err error
repo, err = api.Store.GetRepo(params["id"])
if err != nil {
WriteJSON(w, http.StatusNotFound, map[string]string{"error": "repo not found"})
return
}
if !api.requireRepoRole(w, r, repo.ID, "writer") {
return
}
if repo.Type != "rpm" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "repo is not rpm"})
return
}
relPath = strings.TrimSpace(r.URL.Query().Get("path"))
if relPath == "" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path required"})
return
}
if !isSafeSubdirPath(relPath) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid path"})
return
}
normalizedPath = normalizeRPMPath(relPath)
fullPath = filepath.Join(repo.Path, filepath.FromSlash(normalizedPath))
repodataPath = filepath.Join(fullPath, "repodata")
_, err = os.Stat(repodataPath)
if err != nil {
if os.IsNotExist(err) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path is not a repository directory"})
return
}
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if api.RpmMeta == nil {
WriteJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "metadata manager unavailable"})
return
}
api.RpmMeta.Schedule(fullPath)
WriteJSON(w, http.StatusOK, map[string]string{"status": "scheduled"})
}
func (api *API) RepoRPMCancelSubdirSync(w http.ResponseWriter, r *http.Request, params map[string]string) {
var repo models.Repo
var relPath string
var normalizedPath string
var config models.RPMRepoDir
var canceled bool
var err error
repo, err = api.Store.GetRepo(params["id"])
if err != nil {
WriteJSON(w, http.StatusNotFound, map[string]string{"error": "repo not found"})
return
}
if !api.requireRepoRole(w, r, repo.ID, "writer") {
return
}
if repo.Type != "rpm" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "repo is not rpm"})
return
}
relPath = strings.TrimSpace(r.URL.Query().Get("path"))
if relPath == "" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path required"})
return
}
if !isSafeSubdirPath(relPath) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid path"})
return
}
normalizedPath = normalizeRPMPath(relPath)
config, err = api.Store.GetRPMRepoDir(repo.ID, normalizedPath)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
WriteJSON(w, http.StatusNotFound, map[string]string{"error": "repo directory config not found"})
return
}
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if normalizeRPMRepoDirMode(config.Mode) != "mirror" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "sync control is only supported for mirror mode"})
return
}
if !config.SyncRunning {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "sync is not running"})
return
}
if api.RpmMirror == nil {
WriteJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "mirror manager unavailable"})
return
}
canceled = api.RpmMirror.CancelTask(repo.ID, normalizedPath)
if !canceled {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "sync is not running"})
return
}
WriteJSON(w, http.StatusOK, map[string]string{"status": "cancel_requested"})
}
func (api *API) repoRPMSetSyncEnabled(w http.ResponseWriter, r *http.Request, params map[string]string, enabled bool) {
var repo models.Repo
var relPath string
var normalizedPath string
var config models.RPMRepoDir
var cancelRequested bool
var err error
repo, err = api.Store.GetRepo(params["id"])
if err != nil {
@@ -2963,7 +3111,11 @@ func (api *API) repoRPMSetSyncEnabled(w http.ResponseWriter, r *http.Request, pa
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
WriteJSON(w, http.StatusOK, map[string]any{"status": "ok", "sync_enabled": enabled})
cancelRequested = false
if !enabled && api.RpmMirror != nil {
cancelRequested = api.RpmMirror.CancelTask(repo.ID, normalizedPath)
}
WriteJSON(w, http.StatusOK, map[string]any{"status": "ok", "sync_enabled": enabled, "cancel_requested": cancelRequested})
}
func (api *API) RepoRPMMirrorRuns(w http.ResponseWriter, r *http.Request, params map[string]string) {
@@ -3050,6 +3202,14 @@ func (api *API) RepoRPMDeleteSubdir(w http.ResponseWriter, r *http.Request, para
var repo models.Repo
var writeBlocked bool
var writeBlockedPath string
var targetConfig models.RPMRepoDir
var targetHasConfig bool
var mirrorRoot string
var allowMirrorRootDelete bool
var allowMirrorDelete bool
var busy bool
var busyPath string
var busyReason string
var err error
var relPath string
var fullPath string
@@ -3084,16 +3244,54 @@ func (api *API) RepoRPMDeleteSubdir(w http.ResponseWriter, r *http.Request, para
}
relPathClean = filepath.ToSlash(filepath.Clean(filepath.FromSlash(relPath)))
relPathClean = strings.TrimPrefix(relPathClean, "./")
busy, busyPath, busyReason, err = api.hasBusyMirrorRootUnder(repo, relPathClean)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if busy {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot delete directory while mirror activity is running", "mirror_root": busyPath, "reason": busyReason})
return
}
targetConfig, err = api.Store.GetRPMRepoDir(repo.ID, relPathClean)
if err == nil {
targetHasConfig = true
} else if err != nil && !errors.Is(err, sql.ErrNoRows) {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
writeBlocked, writeBlockedPath, err = api.isRPMWriteBlocked(repo, relPathClean)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if writeBlocked {
mirrorRoot = normalizeRPMPath(writeBlockedPath)
allowMirrorRootDelete = targetHasConfig &&
normalizeRPMRepoDirMode(targetConfig.Mode) == "mirror" &&
normalizeRPMPath(targetConfig.Path) == normalizeRPMPath(relPathClean) &&
normalizeRPMPath(relPathClean) == mirrorRoot
if !allowMirrorRootDelete {
allowMirrorDelete, err = api.allowRPMMirrorDelete(repo, relPathClean, true)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if !allowMirrorDelete {
WriteJSON(w, http.StatusForbidden, map[string]string{"error": "writes are disabled for mirror repo subtree", "mirror_root": writeBlockedPath})
return
}
}
if targetConfig.SyncRunning {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot delete mirror repo directory while sync is running", "mirror_root": writeBlockedPath})
return
}
}
fullPath = filepath.Join(repo.Path, filepath.FromSlash(relPathClean))
if writeBlocked && allowMirrorRootDelete && api.RpmMeta != nil && api.RpmMeta.IsRunning(fullPath) {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot delete mirror repo directory while metadata update is running", "mirror_root": writeBlockedPath})
return
}
info, err = os.Stat(fullPath)
if err != nil {
if os.IsNotExist(err) {
@@ -3107,6 +3305,18 @@ func (api *API) RepoRPMDeleteSubdir(w http.ResponseWriter, r *http.Request, para
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path is not a directory"})
return
}
if writeBlocked && !allowMirrorRootDelete && allowMirrorDelete {
repodataPath = filepath.Join(fullPath, "repodata")
_, err = os.Stat(repodataPath)
if err == nil {
WriteJSON(w, http.StatusForbidden, map[string]string{"error": "only container directories can be deleted in mirror subtree", "mirror_root": writeBlockedPath})
return
}
if err != nil && !os.IsNotExist(err) {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
}
err = os.RemoveAll(fullPath)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
@@ -3136,7 +3346,7 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
var renamed bool
var isRepoDir bool
var err error
var req repoRPMRenameRequest
var req repoRPMUpdateRequest
var relPath string
var relPathClean string
var newName string
@@ -3149,6 +3359,19 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
var repodataPath string
var hasAncestor bool
var absParent string
var existingConfigLoaded bool
var targetConfigExists bool
var allowDelete bool
var tlsInsecureSkipVerify bool
var syncIntervalSec int64
var modeValue string
var remoteURL string
var connectHost string
var hostHeader string
var tlsServerName string
var busy bool
var busyPath string
var busyReason string
repo, err = api.Store.GetRepo(params["id"])
if err != nil {
WriteJSON(w, http.StatusNotFound, map[string]string{"error": "repo not found"})
@@ -3166,30 +3389,49 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid json"})
return
}
relPath = strings.TrimSpace(req.Path)
newName = strings.TrimSpace(req.Name)
if relPath == "" || newName == "" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path and name required"})
if req.Path == nil {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path required"})
return
}
if strings.EqualFold(newName, "repodata") {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "repodata is reserved"})
relPath = strings.TrimSpace(*req.Path)
newName = ""
if req.Name != nil {
newName = strings.TrimSpace(*req.Name)
}
if relPath == "" {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "path required"})
return
}
if !isSafeSubdirPath(relPath) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid path"})
return
}
if !isSafeSubdirName(newName) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid name"})
return
}
if isRepodataPath(relPath) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "repodata cannot be renamed"})
return
}
relPathClean = filepath.ToSlash(filepath.Clean(filepath.FromSlash(relPath)))
relPathClean = strings.TrimPrefix(relPathClean, "./")
busy, busyPath, busyReason, err = api.hasBusyMirrorRootUnder(repo, relPathClean)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if busy {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot rename/update directory while mirror activity is running", "mirror_root": busyPath, "reason": busyReason})
return
}
if newName == "" {
newName = filepath.Base(filepath.FromSlash(relPathClean))
}
if strings.EqualFold(newName, "repodata") {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "repodata is reserved"})
return
}
if !isSafeSubdirName(newName) {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid name"})
return
}
writeBlocked, writeBlockedPath, err = api.isRPMWriteBlocked(repo, relPathClean)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
@@ -3220,15 +3462,95 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
parentPath = filepath.FromSlash(parentRel)
newPath = filepath.Join(repo.Path, parentPath, newName)
newRelPath = filepath.ToSlash(filepath.Join(parentRel, newName))
_, err = api.Store.GetRPMRepoDir(repo.ID, newRelPath)
if err == nil {
targetConfigExists = true
} else if err != nil && !errors.Is(err, sql.ErrNoRows) {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
repodataPath = filepath.Join(fullPath, "repodata")
_, err = os.Stat(repodataPath)
if err == nil {
isRepoDir = true
newMode = normalizeRPMRepoDirMode(req.Mode)
existingConfig, err = api.Store.GetRPMRepoDir(repo.ID, relPathClean)
if err == nil {
existingConfigLoaded = true
if normalizeRPMRepoDirMode(existingConfig.Mode) == "mirror" && existingConfig.SyncRunning {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot rename mirror repo directory while sync is running"})
return
}
if normalizeRPMRepoDirMode(existingConfig.Mode) == "mirror" && api.RpmMeta != nil && api.RpmMeta.IsRunning(fullPath) {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "cannot update mirror repo directory while metadata update is running"})
return
}
} else if err != nil && !errors.Is(err, sql.ErrNoRows) {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
modeValue = ""
if req.Mode != nil {
modeValue = *req.Mode
}
newMode = normalizeRPMRepoDirMode(modeValue)
if newMode == "" {
if existingConfigLoaded {
newMode = normalizeRPMRepoDirMode(existingConfig.Mode)
} else {
newMode = "local"
}
err = validateRPMMirrorConfig(newMode, strings.TrimSpace(req.RemoteURL), strings.TrimSpace(req.ConnectHost), strings.TrimSpace(req.HostHeader), strings.TrimSpace(req.TLSServerName))
}
if req.AllowDelete != nil {
allowDelete = *req.AllowDelete
} else if existingConfigLoaded {
allowDelete = existingConfig.AllowDelete
} else {
allowDelete = false
}
if req.TLSInsecureSkipVerify != nil {
tlsInsecureSkipVerify = *req.TLSInsecureSkipVerify
} else if existingConfigLoaded {
tlsInsecureSkipVerify = existingConfig.TLSInsecureSkipVerify
} else {
tlsInsecureSkipVerify = false
}
if req.SyncIntervalSec != nil {
syncIntervalSec = *req.SyncIntervalSec
} else if existingConfigLoaded {
syncIntervalSec = existingConfig.SyncIntervalSec
} else {
syncIntervalSec = 300
}
syncIntervalSec = normalizeRPMMirrorIntervalSec(syncIntervalSec)
if req.RemoteURL != nil {
remoteURL = strings.TrimSpace(*req.RemoteURL)
} else if existingConfigLoaded {
remoteURL = existingConfig.RemoteURL
} else {
remoteURL = ""
}
if req.ConnectHost != nil {
connectHost = strings.TrimSpace(*req.ConnectHost)
} else if existingConfigLoaded {
connectHost = existingConfig.ConnectHost
} else {
connectHost = ""
}
if req.HostHeader != nil {
hostHeader = strings.TrimSpace(*req.HostHeader)
} else if existingConfigLoaded {
hostHeader = existingConfig.HostHeader
} else {
hostHeader = ""
}
if req.TLSServerName != nil {
tlsServerName = strings.TrimSpace(*req.TLSServerName)
} else if existingConfigLoaded {
tlsServerName = existingConfig.TLSServerName
} else {
tlsServerName = ""
}
err = validateRPMMirrorConfig(newMode, remoteURL, connectHost, hostHeader, tlsServerName)
if err != nil {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
@@ -3246,6 +3568,10 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
}
renamed = newPath != fullPath
if renamed {
if targetConfigExists {
WriteJSON(w, http.StatusConflict, map[string]string{"error": "target repo directory config already exists"})
return
}
_, err = os.Stat(newPath)
if err == nil {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "target already exists"})
@@ -3262,6 +3588,7 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
}
err = api.Store.MoveRPMRepoDir(repo.ID, relPathClean, newRelPath)
if err != nil {
_ = os.Rename(newPath, fullPath)
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
@@ -3271,22 +3598,25 @@ func (api *API) RepoRPMRenameSubdir(w http.ResponseWriter, r *http.Request, para
RepoID: repo.ID,
Path: newRelPath,
Mode: newMode,
RemoteURL: strings.TrimSpace(req.RemoteURL),
ConnectHost: strings.TrimSpace(req.ConnectHost),
HostHeader: strings.TrimSpace(req.HostHeader),
TLSServerName: strings.TrimSpace(req.TLSServerName),
TLSInsecureSkipVerify: req.TLSInsecureSkipVerify,
SyncIntervalSec: normalizeRPMMirrorIntervalSec(req.SyncIntervalSec),
AllowDelete: allowDelete,
RemoteURL: remoteURL,
ConnectHost: connectHost,
HostHeader: hostHeader,
TLSServerName: tlsServerName,
TLSInsecureSkipVerify: tlsInsecureSkipVerify,
SyncIntervalSec: syncIntervalSec,
}
existingConfig, err = api.Store.GetRPMRepoDir(repo.ID, relPathClean)
if err == nil {
if existingConfigLoaded {
dirConfig.SyncEnabled = existingConfig.SyncEnabled
} else if errors.Is(err, sql.ErrNoRows) {
dirConfig.SyncEnabled = true
} else {
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
dirConfig.SyncEnabled = true
}
err = api.Store.UpsertRPMRepoDir(dirConfig)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
@@ -3305,6 +3635,7 @@ func (api *API) RepoRPMDeleteFile(w http.ResponseWriter, r *http.Request, params
var repo models.Repo
var writeBlocked bool
var writeBlockedPath string
var allowMirrorDelete bool
var err error
var relPath string
var relPathClean string
@@ -3346,9 +3677,16 @@ func (api *API) RepoRPMDeleteFile(w http.ResponseWriter, r *http.Request, params
return
}
if writeBlocked {
allowMirrorDelete, err = api.allowRPMMirrorDelete(repo, relPathClean, false)
if err != nil {
WriteJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
if !allowMirrorDelete {
WriteJSON(w, http.StatusForbidden, map[string]string{"error": "writes are disabled for mirror repo subtree", "mirror_root": writeBlockedPath})
return
}
}
lower = strings.ToLower(relPathClean)
if !strings.HasSuffix(lower, ".rpm") {
WriteJSON(w, http.StatusBadRequest, map[string]string{"error": "only rpm files can be deleted"})
@@ -5063,6 +5401,78 @@ func (api *API) isRPMWriteBlocked(repo models.Repo, relPath string) (bool, strin
return true, root, nil
}
func (api *API) allowRPMMirrorDelete(repo models.Repo, relPath string, isDir bool) (bool, error) {
var root string
var cfg models.RPMRepoDir
var normalizedPath string
var err error
root, err = api.findRPMMirrorRoot(repo, relPath)
if err != nil {
return false, err
}
if root == "" {
return true, nil
}
normalizedPath = normalizeRPMPath(relPath)
if normalizedPath == normalizeRPMPath(root) {
return false, nil
}
cfg, err = api.Store.GetRPMRepoDir(repo.ID, normalizeRPMPath(root))
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return false, nil
}
return false, err
}
if !cfg.AllowDelete {
return false, nil
}
if !isDir {
return true, nil
}
_, err = api.Store.GetRPMRepoDir(repo.ID, normalizedPath)
if err == nil {
return false, nil
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return false, err
}
return true, nil
}
func (api *API) hasBusyMirrorRootUnder(repo models.Repo, relPath string) (bool, string, string, error) {
var dirs []models.RPMRepoDir
var target string
var root string
var full string
var i int
var err error
target = normalizeRPMPath(relPath)
dirs, err = api.Store.ListRPMRepoDirs(repo.ID)
if err != nil {
return false, "", "", err
}
for i = 0; i < len(dirs); i++ {
if normalizeRPMRepoDirMode(dirs[i].Mode) != "mirror" {
continue
}
root = normalizeRPMPath(dirs[i].Path)
if !pathUnderRoot(root, target) {
continue
}
if dirs[i].SyncRunning {
return true, root, "sync_running", nil
}
if api.RpmMeta != nil {
full = filepath.Join(repo.Path, filepath.FromSlash(root))
if api.RpmMeta.IsRunning(full) {
return true, root, "metadata_running", nil
}
}
}
return false, "", "", nil
}
func nameHasWhitespace(name string) bool {
return strings.IndexFunc(name, unicode.IsSpace) >= 0
}

View File

@@ -48,6 +48,7 @@ type RPMRepoDir struct {
RepoID string `json:"repo_id"`
Path string `json:"path"`
Mode string `json:"mode"`
AllowDelete bool `json:"allow_delete"`
RemoteURL string `json:"remote_url"`
ConnectHost string `json:"connect_host"`
HostHeader string `json:"host_header"`

View File

@@ -1,8 +1,11 @@
package rpm
import "log"
import "os"
import "path/filepath"
import "strings"
import "sync"
import "time"
import repokit "repokit"
@@ -24,6 +27,18 @@ func NewMetaManager() *MetaManager {
return mgr
}
func (m *MetaManager) IsRunning(dir string) bool {
var state *metaState
var ok bool
m.mutex.Lock()
defer m.mutex.Unlock()
state, ok = m.states[dir]
if !ok || state == nil {
return false
}
return state.inProgress
}
func (m *MetaManager) Schedule(dir string) {
var state *metaState
var ok bool
@@ -46,7 +61,15 @@ func (m *MetaManager) Schedule(dir string) {
func (m *MetaManager) run(dir string) {
var err error
var opts repokit.RpmRepoOptions
var state *metaState
var ok bool
var repodataDir string
var repomdPath string
var entries []os.DirEntry
var repomdInfo os.FileInfo
var statErr error
for {
log.Printf("rpm metadata: job begin dir=%s", dir)
opts = repokit.RpmDefaultRepoOptions()
opts.LockMode = repokit.RpmLockFail
opts.AllowMissingRepomd = true
@@ -57,24 +80,55 @@ func (m *MetaManager) run(dir string) {
if err != nil {
if isLockError(err) {
log.Printf("rpm metadata: lock busy dir=%s err=%v", dir, err)
m.states[dir].pending = true
m.states[dir].inProgress = false
log.Printf("rpm metadata: job end dir=%s result=lock_busy", dir)
state, ok = m.states[dir]
if ok {
state.pending = true
state.inProgress = false
}
m.mutex.Unlock()
time.AfterFunc(2*time.Second, func() {
m.Schedule(dir)
})
return
}
log.Printf("rpm metadata: build failed dir=%s err=%v", dir, err)
m.states[dir].inProgress = false
log.Printf("rpm metadata: job end dir=%s result=failed err=%v", dir, err)
state, ok = m.states[dir]
if ok {
state.inProgress = false
}
m.mutex.Unlock()
return
}
repodataDir = filepath.Join(dir, "repodata")
repomdPath = filepath.Join(repodataDir, "repomd.xml")
entries, err = os.ReadDir(repodataDir)
if err != nil {
log.Printf("rpm metadata: post-check dir=%s repodata_dir=%s read_err=%v", dir, repodataDir, err)
} else {
statErr = nil
repomdInfo = nil
repomdInfo, statErr = os.Stat(repomdPath)
if statErr != nil {
log.Printf("rpm metadata: post-check dir=%s repodata_entries=%d repomd_path=%s repomd_err=%v", dir, len(entries), repomdPath, statErr)
} else {
log.Printf("rpm metadata: post-check dir=%s repodata_entries=%d repomd_path=%s repomd_size=%d", dir, len(entries), repomdPath, repomdInfo.Size())
}
}
log.Printf("rpm metadata: build done dir=%s", dir)
if m.states[dir].pending {
m.states[dir].pending = false
state, ok = m.states[dir]
if ok && state.pending {
log.Printf("rpm metadata: job end dir=%s result=pending_rerun", dir)
state.pending = false
m.mutex.Unlock()
continue
}
m.states[dir].inProgress = false
if ok {
state.inProgress = false
}
m.mutex.Unlock()
log.Printf("rpm metadata: job end dir=%s result=success", dir)
return
}
}

View File

@@ -2,7 +2,10 @@ package rpm
import "compress/gzip"
import "context"
import "crypto/md5"
import "crypto/sha1"
import "crypto/sha256"
import "crypto/sha512"
import "crypto/tls"
import "bytes"
import "encoding/hex"
@@ -16,7 +19,9 @@ import "net/http"
import "net/url"
import "os"
import "path/filepath"
import "strconv"
import "strings"
import "sync"
import "time"
import "codit/internal/db"
@@ -28,6 +33,8 @@ type MirrorManager struct {
logger *util.Logger
meta *MetaManager
stopCh chan struct{}
cancelMu sync.Mutex
cancelByKey map[string]context.CancelFunc
}
type repomdDoc struct {
@@ -50,6 +57,7 @@ type primaryDoc struct {
type primaryPackage struct {
Location primaryLocation `xml:"location"`
Checksum primaryChecksum `xml:"checksum"`
Time primaryTime `xml:"time"`
}
type primaryLocation struct {
@@ -61,6 +69,18 @@ type primaryChecksum struct {
Value string `xml:",chardata"`
}
type mirrorChecksum struct {
Algo string
Value string
BuildTime int64
FileTime int64
}
type primaryTime struct {
File string `xml:"file,attr"`
Build string `xml:"build,attr"`
}
type mirrorHTTPConfig struct {
BaseURL string
ConnectHost string
@@ -78,10 +98,28 @@ func NewMirrorManager(store *db.Store, logger *util.Logger, meta *MetaManager) *
logger: logger,
meta: meta,
stopCh: make(chan struct{}),
cancelByKey: make(map[string]context.CancelFunc),
}
return m
}
func (m *MirrorManager) CancelTask(repoID string, path string) bool {
var key string
var cancel context.CancelFunc
if m == nil {
return false
}
key = mirrorTaskKey(repoID, path)
m.cancelMu.Lock()
cancel = m.cancelByKey[key]
m.cancelMu.Unlock()
if cancel == nil {
return false
}
cancel()
return true
}
func (m *MirrorManager) Start() {
var err error
var tasks []models.RPMMirrorTask
@@ -161,16 +199,33 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
var revision string
var primaryHref string
var primaryData []byte
var expected map[string]string
var expected map[string]mirrorChecksum
var duplicateCount int
var runID string
var startedAt int64
var total int64
var done int64
var failed int64
var deleted int64
var changed int64
var err error
var syncCtx context.Context
var syncCancel context.CancelFunc
var canceled bool
var key string
localRoot = filepath.Join(task.RepoPath, filepath.FromSlash(task.MirrorPath))
startedAt = time.Now().UTC().Unix()
syncCtx, syncCancel = context.WithCancel(context.Background())
key = mirrorTaskKey(task.RepoID, task.MirrorPath)
m.cancelMu.Lock()
m.cancelByKey[key] = syncCancel
m.cancelMu.Unlock()
defer func() {
m.cancelMu.Lock()
delete(m.cancelByKey, key)
m.cancelMu.Unlock()
syncCancel()
}()
runID, err = m.store.CreateRPMMirrorRun(task.RepoID, task.MirrorPath, startedAt)
if err != nil {
_ = m.store.FinishRPMMirrorTask(task.RepoID, task.MirrorPath, false, "", err.Error())
@@ -190,7 +245,7 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
m.logger.Write("rpm-mirror", util.LOG_INFO, "sync start repo=%s path=%s remote=%s", task.RepoID, task.MirrorPath, task.RemoteURL)
}
_ = m.store.UpdateRPMMirrorTaskProgress(task.RepoID, task.MirrorPath, "fetch_repodata", 0, 0, 0, 0)
repomdData, err = mirrorFetch(client, cfg, "repodata/repomd.xml")
repomdData, err = mirrorFetch(syncCtx, client, cfg, "repodata/repomd.xml")
if err != nil {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_ERROR, "sync failed repo=%s path=%s step=fetch_repodata err=%v", task.RepoID, task.MirrorPath, err)
@@ -201,6 +256,9 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
}
revision = sha256HexBytes(repomdData)
if !task.Dirty && task.LastSyncedRevision != "" && task.LastSyncedRevision == revision {
if m.meta != nil {
ensureRepodata(task, localRoot, m.meta, m.logger)
}
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_INFO, "sync done repo=%s path=%s status=no_change revision=%s", task.RepoID, task.MirrorPath, revision)
}
@@ -218,7 +276,7 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
return
}
_ = m.store.UpdateRPMMirrorTaskProgress(task.RepoID, task.MirrorPath, "fetch_primary", 0, 0, 0, 0)
primaryData, err = mirrorFetch(client, cfg, primaryHref)
primaryData, err = mirrorFetch(syncCtx, client, cfg, primaryHref)
if err != nil {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_ERROR, "sync failed repo=%s path=%s step=fetch_primary err=%v", task.RepoID, task.MirrorPath, err)
@@ -238,7 +296,7 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
return
}
}
expected, err = parsePrimaryPackages(primaryData)
expected, duplicateCount, err = parsePrimaryPackages(primaryData)
if err != nil {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_ERROR, "sync failed repo=%s path=%s step=parse_primary err=%v", task.RepoID, task.MirrorPath, err)
@@ -247,16 +305,35 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
_ = m.store.FinishRPMMirrorRun(runID, time.Now().UTC().Unix(), "failed", "fetch_primary", 0, 0, 0, 0, "", err.Error())
return
}
total, done, failed, deleted, err = m.applyMirror(task, localRoot, client, cfg, expected)
if err != nil {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_INFO, "primary parsed repo=%s path=%s primary_href=%s packages=%d", task.RepoID, task.MirrorPath, primaryHref, len(expected))
if duplicateCount > 0 {
m.logger.Write("rpm-mirror", util.LOG_WARN, "primary has duplicate package paths repo=%s path=%s primary_href=%s duplicates=%d", task.RepoID, task.MirrorPath, primaryHref, duplicateCount)
}
}
total, done, failed, deleted, changed, err = m.applyMirror(syncCtx, task, localRoot, client, cfg, expected)
if err != nil {
canceled = errors.Is(err, context.Canceled)
if m.logger != nil {
if canceled {
m.logger.Write("rpm-mirror", util.LOG_WARN, "sync canceled repo=%s path=%s step=apply total=%d done=%d failed=%d deleted=%d err=%v", task.RepoID, task.MirrorPath, total, done, failed, deleted, err)
} else {
m.logger.Write("rpm-mirror", util.LOG_ERROR, "sync failed repo=%s path=%s step=apply total=%d done=%d failed=%d deleted=%d err=%v", task.RepoID, task.MirrorPath, total, done, failed, deleted, err)
}
}
if canceled {
_ = m.store.FinishRPMMirrorTask(task.RepoID, task.MirrorPath, false, "", "sync canceled by user")
_ = m.store.FinishRPMMirrorRun(runID, time.Now().UTC().Unix(), "failed", "canceled", total, done, failed, deleted, "", "sync canceled by user")
} else {
_ = m.store.FinishRPMMirrorTask(task.RepoID, task.MirrorPath, false, "", err.Error())
_ = m.store.FinishRPMMirrorRun(runID, time.Now().UTC().Unix(), "failed", "apply", total, done, failed, deleted, "", err.Error())
}
return
}
if m.meta != nil {
if m.meta != nil && changed > 0 {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_INFO, "repodata schedule repo=%s path=%s reason=sync_changed changed=%d", task.RepoID, task.MirrorPath, changed)
}
m.meta.Schedule(localRoot)
}
_ = m.store.FinishRPMMirrorTask(task.RepoID, task.MirrorPath, true, revision, "")
@@ -267,58 +344,91 @@ func (m *MirrorManager) syncOne(task models.RPMMirrorTask) {
}
}
func (m *MirrorManager) applyMirror(task models.RPMMirrorTask, localRoot string, client *http.Client, cfg mirrorHTTPConfig, expected map[string]string) (int64, int64, int64, int64, error) {
func (m *MirrorManager) applyMirror(ctx context.Context, task models.RPMMirrorTask, localRoot string, client *http.Client, cfg mirrorHTTPConfig, expected map[string]mirrorChecksum) (int64, int64, int64, int64, int64, error) {
var local map[string]bool
var total int64
var done int64
var failed int64
var deleted int64
var changed int64
var path string
var checksum string
var checksum mirrorChecksum
var fullPath string
var localSum string
var needDownload bool
var err error
local, err = listLocalRPMs(localRoot)
if err != nil {
return 0, 0, 0, 0, err
return 0, 0, 0, 0, 0, err
}
total = int64(len(expected))
_ = m.store.UpdateRPMMirrorTaskProgress(task.RepoID, task.MirrorPath, "apply", total, 0, 0, 0)
for path = range local {
if expected[path] != "" {
select {
case <-ctx.Done():
return total, done, failed, deleted, changed, ctx.Err()
default:
}
if expected[path].Value != "" {
continue
}
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_DEBUG, "delete local stale repo=%s path=%s file=%s", task.RepoID, task.MirrorPath, path)
}
err = os.Remove(filepath.Join(localRoot, filepath.FromSlash(path)))
if err == nil || os.IsNotExist(err) {
deleted = deleted + 1
changed = changed + 1
} else {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_WARN, "delete local stale failed repo=%s path=%s file=%s err=%v", task.RepoID, task.MirrorPath, path, err)
}
}
}
for path, checksum = range expected {
select {
case <-ctx.Done():
return total, done, failed, deleted, changed, ctx.Err()
default:
}
fullPath = filepath.Join(localRoot, filepath.FromSlash(path))
needDownload = true
_, err = os.Stat(fullPath)
if err == nil {
localSum, err = sha256HexFile(fullPath)
if err == nil && (checksum == "" || strings.EqualFold(localSum, checksum)) {
localSum, err = fileHexByAlgo(fullPath, checksum.Algo)
if err == nil && (checksum.Value == "" || strings.EqualFold(localSum, checksum.Value)) {
needDownload = false
}
}
if needDownload {
err = mirrorDownload(client, cfg, path, fullPath, checksum)
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_DEBUG, "download start repo=%s path=%s file=%s checksum_type=%s checksum=%s", task.RepoID, task.MirrorPath, path, checksum.Algo, checksum.Value)
}
err = mirrorDownload(ctx, client, cfg, path, fullPath, checksum.Algo, checksum.Value)
if err != nil {
failed = failed + 1
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_WARN, "download failed repo=%s path=%s file=%s err=%v", task.RepoID, task.MirrorPath, path, err)
}
_ = m.store.UpdateRPMMirrorTaskProgress(task.RepoID, task.MirrorPath, "apply", total, done, failed, deleted)
continue
}
changed = changed + 1
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_DEBUG, "download done repo=%s path=%s file=%s", task.RepoID, task.MirrorPath, path)
}
} else {
if m.logger != nil {
m.logger.Write("rpm-mirror", util.LOG_DEBUG, "download skip repo=%s path=%s file=%s reason=up-to-date", task.RepoID, task.MirrorPath, path)
}
}
done = done + 1
_ = m.store.UpdateRPMMirrorTaskProgress(task.RepoID, task.MirrorPath, "apply", total, done, failed, deleted)
}
if failed > 0 {
return total, done, failed, deleted, errors.New("some mirror files failed to sync")
return total, done, failed, deleted, changed, errors.New("some mirror files failed to sync")
}
return total, done, failed, deleted, nil
return total, done, failed, deleted, changed, nil
}
func buildMirrorHTTPConfig(task models.RPMMirrorTask) (mirrorHTTPConfig, error) {
@@ -391,14 +501,14 @@ func effectiveServerName(cfg mirrorHTTPConfig) string {
return cfg.DefaultServer
}
func mirrorFetch(client *http.Client, cfg mirrorHTTPConfig, rel string) ([]byte, error) {
func mirrorFetch(ctx context.Context, client *http.Client, cfg mirrorHTTPConfig, rel string) ([]byte, error) {
var fullURL string
var req *http.Request
var res *http.Response
var body []byte
var err error
fullURL = joinRemoteURL(cfg.BaseURL, rel)
req, err = http.NewRequest(http.MethodGet, fullURL, nil)
req, err = http.NewRequestWithContext(ctx, http.MethodGet, fullURL, nil)
if err != nil {
return nil, err
}
@@ -418,7 +528,7 @@ func mirrorFetch(client *http.Client, cfg mirrorHTTPConfig, rel string) ([]byte,
return body, nil
}
func mirrorDownload(client *http.Client, cfg mirrorHTTPConfig, rel string, dstPath string, checksum string) error {
func mirrorDownload(ctx context.Context, client *http.Client, cfg mirrorHTTPConfig, rel string, dstPath string, checksumType string, checksum string) error {
var fullURL string
var req *http.Request
var res *http.Response
@@ -426,13 +536,16 @@ func mirrorDownload(client *http.Client, cfg mirrorHTTPConfig, rel string, dstPa
var out *os.File
var hash hashWriter
var copied int64
var actualSum string
var contentType string
var finalURL string
var err error
err = os.MkdirAll(filepath.Dir(dstPath), 0o755)
if err != nil {
return err
}
fullURL = joinRemoteURL(cfg.BaseURL, rel)
req, err = http.NewRequest(http.MethodGet, fullURL, nil)
req, err = http.NewRequestWithContext(ctx, http.MethodGet, fullURL, nil)
if err != nil {
return err
}
@@ -445,13 +558,22 @@ func mirrorDownload(client *http.Client, cfg mirrorHTTPConfig, rel string, dstPa
if res.StatusCode < 200 || res.StatusCode >= 300 {
return errors.New("upstream request failed: " + res.Status)
}
contentType = strings.TrimSpace(res.Header.Get("Content-Type"))
finalURL = ""
if res.Request != nil && res.Request.URL != nil {
finalURL = res.Request.URL.String()
}
tempPath = dstPath + ".mirror.tmp"
out, err = os.Create(tempPath)
if err != nil {
return err
}
defer out.Close()
hash = newHashWriter()
hash, err = newHashWriter(checksumType)
if err != nil {
_ = os.Remove(tempPath)
return err
}
copied, err = io.Copy(io.MultiWriter(out, hash), res.Body)
_ = copied
if err != nil {
@@ -464,9 +586,17 @@ func mirrorDownload(client *http.Client, cfg mirrorHTTPConfig, rel string, dstPa
return err
}
if strings.TrimSpace(checksum) != "" {
if !strings.EqualFold(hash.Sum(), strings.TrimSpace(checksum)) {
actualSum = hash.Sum()
if !strings.EqualFold(actualSum, strings.TrimSpace(checksum)) {
_ = os.Remove(tempPath)
return errors.New("download checksum mismatch for " + rel)
return errors.New(
"download checksum mismatch for " + rel +
" type=" + normalizeChecksumAlgo(checksumType) +
" expected=" + strings.TrimSpace(checksum) +
" actual=" + actualSum +
" bytes=" + int64ToString(copied) +
" content_type=" + contentType +
" url=" + finalURL)
}
}
err = os.Rename(tempPath, dstPath)
@@ -478,10 +608,27 @@ func mirrorDownload(client *http.Client, cfg mirrorHTTPConfig, rel string, dstPa
}
func joinRemoteURL(base string, rel string) string {
var baseURL *url.URL
var relURL *url.URL
var cleanRel string
cleanRel = strings.TrimLeft(strings.ReplaceAll(rel, "\\", "/"), "/")
var err error
cleanRel = strings.ReplaceAll(rel, "\\", "/")
baseURL, err = url.Parse(strings.TrimSpace(base))
if err != nil || baseURL == nil {
cleanRel = strings.TrimLeft(cleanRel, "/")
return strings.TrimRight(base, "/") + "/" + cleanRel
}
// Treat base as a directory root for repository-relative href resolution.
if !strings.HasSuffix(baseURL.Path, "/") {
baseURL.Path = baseURL.Path + "/"
}
relURL, err = url.Parse(strings.TrimSpace(cleanRel))
if err != nil || relURL == nil {
cleanRel = strings.TrimLeft(cleanRel, "/")
return strings.TrimRight(base, "/") + "/" + cleanRel
}
return baseURL.ResolveReference(relURL).String()
}
func parseRepomdPrimaryHref(data []byte) (string, error) {
var doc repomdDoc
@@ -503,18 +650,24 @@ func parseRepomdPrimaryHref(data []byte) (string, error) {
return "", errors.New("primary metadata not found in repomd")
}
func parsePrimaryPackages(data []byte) (map[string]string, error) {
func parsePrimaryPackages(data []byte) (map[string]mirrorChecksum, int, error) {
var doc primaryDoc
var out map[string]string
var out map[string]mirrorChecksum
var i int
var path string
var checksum string
var checksumType string
var fileTime int64
var buildTime int64
var existing mirrorChecksum
var ok bool
var duplicates int
var err error
err = xml.Unmarshal(data, &doc)
if err != nil {
return nil, err
return nil, 0, err
}
out = make(map[string]string)
out = make(map[string]mirrorChecksum)
for i = 0; i < len(doc.Packages); i++ {
path = strings.TrimSpace(doc.Packages[i].Location.Href)
if path == "" {
@@ -524,9 +677,23 @@ func parsePrimaryPackages(data []byte) (map[string]string, error) {
continue
}
checksum = strings.TrimSpace(doc.Packages[i].Checksum.Value)
out[path] = strings.ToLower(checksum)
checksumType = strings.TrimSpace(doc.Packages[i].Checksum.Type)
fileTime = parseTimeAttr(doc.Packages[i].Time.File)
buildTime = parseTimeAttr(doc.Packages[i].Time.Build)
if existing, ok = out[path]; ok {
duplicates = duplicates + 1
if !shouldReplaceDuplicate(existing, buildTime, fileTime, checksum) {
continue
}
return out, nil
}
out[path] = mirrorChecksum{
Algo: normalizeChecksumAlgo(checksumType),
Value: strings.ToLower(checksum),
BuildTime: buildTime,
FileTime: fileTime,
}
}
return out, duplicates, nil
}
func listLocalRPMs(root string) (map[string]bool, error) {
@@ -569,7 +736,7 @@ func sha256HexBytes(data []byte) string {
return hex.EncodeToString(sum[:])
}
func sha256HexFile(path string) (string, error) {
func fileHexByAlgo(path string, algo string) (string, error) {
var file *os.File
var hash hashWriter
var copied int64
@@ -579,7 +746,10 @@ func sha256HexFile(path string) (string, error) {
return "", err
}
defer file.Close()
hash = newHashWriter()
hash, err = newHashWriter(algo)
if err != nil {
return "", err
}
copied, err = io.Copy(hash, file)
_ = copied
if err != nil {
@@ -615,10 +785,29 @@ type shaWriter struct {
h hash.Hash
}
func newHashWriter() hashWriter {
func newHashWriter(algo string) (hashWriter, error) {
var w *shaWriter
w = &shaWriter{h: sha256.New()}
return w
var normalized string
var h hash.Hash
normalized = normalizeChecksumAlgo(algo)
switch normalized {
case "", "sha256":
h = sha256.New()
case "sha", "sha1":
h = sha1.New()
case "sha224":
h = sha256.New224()
case "sha384":
h = sha512.New384()
case "sha512":
h = sha512.New()
case "md5":
h = md5.New()
default:
return nil, errors.New("unsupported checksum type: " + normalized)
}
w = &shaWriter{h: h}
return w, nil
}
func (w *shaWriter) Write(p []byte) (int, error) {
@@ -630,3 +819,90 @@ func (w *shaWriter) Sum() string {
raw = w.h.Sum(nil)
return hex.EncodeToString(raw)
}
func normalizeChecksumAlgo(algo string) string {
var out string
out = strings.ToLower(strings.TrimSpace(algo))
out = strings.ReplaceAll(out, "-", "")
out = strings.ReplaceAll(out, "_", "")
if out == "sha1" {
return "sha1"
}
if out == "sha" {
return "sha"
}
if out == "sha224" {
return "sha224"
}
if out == "sha256" {
return "sha256"
}
if out == "sha384" {
return "sha384"
}
if out == "sha512" {
return "sha512"
}
if out == "md5" {
return "md5"
}
return out
}
func int64ToString(v int64) string {
return strconv.FormatInt(v, 10)
}
func mirrorTaskKey(repoID string, path string) string {
return repoID + "\x00" + path
}
func ensureRepodata(task models.RPMMirrorTask, localRoot string, meta *MetaManager, logger *util.Logger) {
var repomdPath string
var statErr error
repomdPath = filepath.Join(localRoot, "repodata", "repomd.xml")
_, statErr = os.Stat(repomdPath)
if statErr == nil {
return
}
if logger != nil {
logger.Write("rpm-mirror", util.LOG_INFO, "repodata schedule repo=%s path=%s reason=missing repomd=%s", task.RepoID, task.MirrorPath, repomdPath)
}
meta.Schedule(localRoot)
}
func parseTimeAttr(value string) int64 {
var trimmed string
var parsed int64
var err error
trimmed = strings.TrimSpace(value)
if trimmed == "" {
return 0
}
parsed, err = strconv.ParseInt(trimmed, 10, 64)
if err != nil {
return 0
}
return parsed
}
func shouldReplaceDuplicate(existing mirrorChecksum, newBuildTime int64, newFileTime int64, newChecksum string) bool {
var existingChecksum string
if newBuildTime > existing.BuildTime {
return true
}
if newBuildTime < existing.BuildTime {
return false
}
if newFileTime > existing.FileTime {
return true
}
if newFileTime < existing.FileTime {
return false
}
existingChecksum = strings.TrimSpace(existing.Value)
if existingChecksum == "" && strings.TrimSpace(newChecksum) != "" {
return true
}
return false
}

View File

@@ -1,15 +1,11 @@
package rpm
import "bufio"
import "bytes"
import "errors"
import "io/fs"
import "os/exec"
import "path/filepath"
import "sort"
import "strconv"
import "strings"
import "sync"
import repokit "repokit"
type PackageSummary struct {
Filename string `json:"filename"`
@@ -30,35 +26,23 @@ type PackageDetail struct {
Files []string `json:"files"`
Requires []string `json:"requires"`
Provides []string `json:"provides"`
Changelogs []PackageChangeLog `json:"changelogs"`
}
var rpmPath string
var rpmOnce sync.Once
var rpmErr error
func ensureRPM() error {
rpmOnce.Do(func() {
var path string
path, rpmErr = exec.LookPath("rpm")
if rpmErr != nil {
return
}
rpmPath = path
})
return rpmErr
type PackageChangeLog struct {
Author string `json:"author"`
Date int64 `json:"date"`
Text string `json:"text"`
}
func ListPackages(repoPath string) ([]PackageSummary, error) {
var err error
var packages []PackageSummary
var walkErr error
err = ensureRPM()
if err != nil {
return nil, err
}
var err error
walkErr = filepath.WalkDir(repoPath, func(path string, entry fs.DirEntry, entryErr error) error {
var lower string
var rel string
var pkg *repokit.RpmPackage
var summary PackageSummary
if entryErr != nil {
return entryErr
@@ -74,11 +58,11 @@ func ListPackages(repoPath string) ([]PackageSummary, error) {
if err != nil {
return err
}
summary, err = querySummary(path)
pkg, err = repokit.RpmPackageFromRpmBase(path, 0)
if err != nil {
return nil
}
summary.Filename = filepath.ToSlash(rel)
summary = packageSummaryFromRepokit(pkg, filepath.ToSlash(rel))
packages = append(packages, summary)
return nil
})
@@ -86,137 +70,135 @@ func ListPackages(repoPath string) ([]PackageSummary, error) {
return nil, walkErr
}
sort.Slice(packages, func(i int, j int) bool {
if packages[i].Name == packages[j].Name {
return packages[i].Filename < packages[j].Filename
}
return packages[i].Name < packages[j].Name
})
return packages, nil
}
func GetPackageDetail(repoPath string, filename string) (PackageDetail, error) {
var err error
var detail PackageDetail
var fullPath string
var data []string
var fileList []string
var requires []string
var provides []string
var buildTime int64
var size int64
err = ensureRPM()
if err != nil {
return detail, err
}
var pkg *repokit.RpmPackage
var err error
fullPath = filepath.Join(repoPath, filepath.FromSlash(filename))
data, err = queryFields(fullPath, "%{NAME}\n%{VERSION}\n%{RELEASE}\n%{ARCH}\n%{SUMMARY}\n%{DESCRIPTION}\n%{LICENSE}\n%{URL}\n%{BUILDTIME}\n%{SIZE}\n")
pkg, err = repokit.RpmPackageFromRpmBase(fullPath, 256)
if err != nil {
return detail, err
}
if len(data) < 10 {
return detail, errors.New("rpm query returned incomplete metadata")
}
buildTime, _ = strconv.ParseInt(strings.TrimSpace(data[8]), 10, 64)
size, _ = strconv.ParseInt(strings.TrimSpace(data[9]), 10, 64)
fileList, _ = queryList(fullPath)
requires, _ = queryLines(fullPath, "--requires")
provides, _ = queryLines(fullPath, "--provides")
detail = PackageDetail{
PackageSummary: PackageSummary{
Filename: filename,
Name: strings.TrimSpace(data[0]),
Version: strings.TrimSpace(data[1]),
Release: strings.TrimSpace(data[2]),
Arch: strings.TrimSpace(data[3]),
Summary: strings.TrimSpace(data[4]),
},
Description: strings.TrimSpace(data[5]),
License: strings.TrimSpace(data[6]),
URL: strings.TrimSpace(data[7]),
BuildTime: buildTime,
Size: size,
Files: fileList,
Requires: requires,
Provides: provides,
}
detail = packageDetailFromRepokit(pkg, filename)
return detail, nil
}
func querySummary(path string) (PackageSummary, error) {
var fields []string
var err error
func packageSummaryFromRepokit(pkg *repokit.RpmPackage, filename string) PackageSummary {
var summary PackageSummary
fields, err = queryFields(path, "%{NAME}\n%{VERSION}\n%{RELEASE}\n%{ARCH}\n%{SUMMARY}\n")
if err != nil {
return summary, err
summary = PackageSummary{
Filename: filename,
Name: strings.TrimSpace(pkg.Name),
Version: strings.TrimSpace(pkg.Version),
Release: strings.TrimSpace(pkg.Release),
Arch: strings.TrimSpace(pkg.Arch),
Summary: strings.TrimSpace(pkg.Summary),
}
if len(fields) < 5 {
return summary, errors.New("rpm query returned incomplete metadata")
}
summary.Name = strings.TrimSpace(fields[0])
summary.Version = strings.TrimSpace(fields[1])
summary.Release = strings.TrimSpace(fields[2])
summary.Arch = strings.TrimSpace(fields[3])
summary.Summary = strings.TrimSpace(fields[4])
return summary, nil
return summary
}
func queryFields(path string, format string) ([]string, error) {
var output []byte
var err error
var list []string
output, err = runRPM(path, "-qp", "--qf", format)
if err != nil {
return nil, err
}
list = strings.Split(strings.TrimSuffix(string(output), "\n"), "\n")
return list, nil
}
func queryList(path string) ([]string, error) {
var output []byte
var err error
var scanner *bufio.Scanner
func packageDetailFromRepokit(pkg *repokit.RpmPackage, filename string) PackageDetail {
var files []string
output, err = runRPM(path, "-qlp")
if err != nil {
return nil, err
var file repokit.RpmPackageFile
var changelogs []PackageChangeLog
var changelog repokit.RpmChangelogEntry
var detail PackageDetail
files = make([]string, 0, len(pkg.Files))
for _, file = range pkg.Files {
if file.FullPath == "" {
continue
}
scanner = bufio.NewScanner(bytes.NewReader(output))
for scanner.Scan() {
files = append(files, scanner.Text())
files = append(files, file.FullPath)
}
return files, nil
changelogs = make([]PackageChangeLog, 0, len(pkg.Changelogs))
for _, changelog = range pkg.Changelogs {
changelogs = append(changelogs, PackageChangeLog{
Author: strings.TrimSpace(changelog.Author),
Date: changelog.Date,
Text: strings.TrimSpace(changelog.Changelog),
})
}
sort.SliceStable(changelogs, func(i int, j int) bool {
return changelogs[i].Date > changelogs[j].Date
})
sort.Strings(files)
detail = PackageDetail{
PackageSummary: packageSummaryFromRepokit(pkg, filename),
Description: strings.TrimSpace(pkg.Description),
License: strings.TrimSpace(pkg.RpmLicense),
URL: strings.TrimSpace(pkg.Url),
BuildTime: pkg.TimeBuild,
Size: pkg.SizePackage,
Files: files,
Requires: dependencyListToStrings(pkg.Requires),
Provides: dependencyListToStrings(pkg.Provides),
Changelogs: changelogs,
}
return detail
}
func queryLines(path string, flag string) ([]string, error) {
var output []byte
var err error
var scanner *bufio.Scanner
func dependencyListToStrings(deps []repokit.RpmDependency) []string {
var lines []string
output, err = runRPM(path, "-qp", flag)
if err != nil {
return nil, err
var dep repokit.RpmDependency
lines = make([]string, 0, len(deps))
for _, dep = range deps {
lines = append(lines, dependencyToString(dep))
}
scanner = bufio.NewScanner(bytes.NewReader(output))
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, nil
return lines
}
func runRPM(path string, args ...string) ([]byte, error) {
var err error
var cmd *exec.Cmd
var output []byte
var full []string
err = ensureRPM()
if err != nil {
return nil, err
func dependencyToString(dep repokit.RpmDependency) string {
var op string
var evr string
var line string
op = normalizeDependencyOp(dep.Flags)
evr = dependencyEVR(dep)
line = dep.Name
if op == "" || evr == "" {
return line
}
full = append([]string{}, args...)
full = append(full, path)
cmd = exec.Command(rpmPath, full...)
output, err = cmd.Output()
if err != nil {
return nil, err
line = line + " " + op + " " + evr
return line
}
return output, nil
func normalizeDependencyOp(flag string) string {
switch strings.ToUpper(strings.TrimSpace(flag)) {
case "LT":
return "<"
case "GT":
return ">"
case "EQ":
return "="
case "LE":
return "<="
case "GE":
return ">="
default:
return strings.TrimSpace(flag)
}
}
func dependencyEVR(dep repokit.RpmDependency) string {
var value string
var version string
version = strings.TrimSpace(dep.Version)
if version == "" {
return ""
}
value = version
if strings.TrimSpace(dep.Release) != "" {
value = value + "-" + strings.TrimSpace(dep.Release)
}
if strings.TrimSpace(dep.Epoch) != "" && strings.TrimSpace(dep.Epoch) != "0" {
value = strings.TrimSpace(dep.Epoch) + ":" + value
}
return value
}

View File

@@ -1,19 +1,96 @@
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_interval_sec INTEGER NOT NULL DEFAULT 300;
ALTER TABLE rpm_repo_dirs ADD COLUMN dirty INTEGER NOT NULL DEFAULT 1;
ALTER TABLE rpm_repo_dirs ADD COLUMN next_sync_at INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_running INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_status TEXT NOT NULL DEFAULT 'idle';
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_error TEXT NOT NULL DEFAULT '';
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_step TEXT NOT NULL DEFAULT '';
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_total INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_done INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_failed INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_deleted INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN last_sync_started_at INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN last_sync_finished_at INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN last_sync_success_at INTEGER NOT NULL DEFAULT 0;
ALTER TABLE rpm_repo_dirs ADD COLUMN last_synced_revision TEXT NOT NULL DEFAULT '';
PRAGMA foreign_keys = OFF;
UPDATE rpm_repo_dirs
SET dirty = 1, next_sync_at = 0
WHERE mode = 'mirror';
BEGIN TRANSACTION;
ALTER TABLE rpm_repo_dirs RENAME TO rpm_repo_dirs_old;
CREATE TABLE rpm_repo_dirs (
repo_id TEXT NOT NULL,
path TEXT NOT NULL,
mode TEXT NOT NULL DEFAULT 'local',
remote_url TEXT NOT NULL DEFAULT '',
connect_host TEXT NOT NULL DEFAULT '',
host_header TEXT NOT NULL DEFAULT '',
tls_server_name TEXT NOT NULL DEFAULT '',
tls_insecure_skip_verify INTEGER NOT NULL DEFAULT 0,
sync_interval_sec INTEGER NOT NULL DEFAULT 300,
dirty INTEGER NOT NULL DEFAULT 1,
next_sync_at INTEGER NOT NULL DEFAULT 0,
sync_running INTEGER NOT NULL DEFAULT 0,
sync_status TEXT NOT NULL DEFAULT 'idle',
sync_error TEXT NOT NULL DEFAULT '',
sync_step TEXT NOT NULL DEFAULT '',
sync_total INTEGER NOT NULL DEFAULT 0,
sync_done INTEGER NOT NULL DEFAULT 0,
sync_failed INTEGER NOT NULL DEFAULT 0,
sync_deleted INTEGER NOT NULL DEFAULT 0,
last_sync_started_at INTEGER NOT NULL DEFAULT 0,
last_sync_finished_at INTEGER NOT NULL DEFAULT 0,
last_sync_success_at INTEGER NOT NULL DEFAULT 0,
last_synced_revision TEXT NOT NULL DEFAULT '',
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (repo_id, path),
FOREIGN KEY (repo_id) REFERENCES repos(id) ON DELETE CASCADE
);
INSERT INTO rpm_repo_dirs (
repo_id,
path,
mode,
remote_url,
connect_host,
host_header,
tls_server_name,
tls_insecure_skip_verify,
sync_interval_sec,
dirty,
next_sync_at,
sync_running,
sync_status,
sync_error,
sync_step,
sync_total,
sync_done,
sync_failed,
sync_deleted,
last_sync_started_at,
last_sync_finished_at,
last_sync_success_at,
last_synced_revision,
created_at,
updated_at
)
SELECT
repo_id,
path,
mode,
remote_url,
connect_host,
host_header,
tls_server_name,
tls_insecure_skip_verify,
300,
CASE WHEN mode = 'mirror' THEN 1 ELSE 0 END,
CASE WHEN mode = 'mirror' THEN 0 ELSE 0 END,
0,
'idle',
'',
'',
0,
0,
0,
0,
0,
0,
0,
'',
created_at,
updated_at
FROM rpm_repo_dirs_old;
DROP TABLE rpm_repo_dirs_old;
COMMIT;
PRAGMA foreign_keys = ON;

View File

@@ -1,5 +1,99 @@
ALTER TABLE rpm_repo_dirs ADD COLUMN sync_enabled INTEGER NOT NULL DEFAULT 1;
PRAGMA foreign_keys = OFF;
UPDATE rpm_repo_dirs
SET sync_enabled = 1
WHERE mode = 'mirror';
BEGIN TRANSACTION;
CREATE TABLE rpm_repo_dirs_new (
repo_id TEXT NOT NULL,
path TEXT NOT NULL,
mode TEXT NOT NULL DEFAULT 'local',
remote_url TEXT NOT NULL DEFAULT '',
connect_host TEXT NOT NULL DEFAULT '',
host_header TEXT NOT NULL DEFAULT '',
tls_server_name TEXT NOT NULL DEFAULT '',
tls_insecure_skip_verify INTEGER NOT NULL DEFAULT 0,
sync_interval_sec INTEGER NOT NULL DEFAULT 300,
sync_enabled INTEGER NOT NULL DEFAULT 1,
dirty INTEGER NOT NULL DEFAULT 1,
next_sync_at INTEGER NOT NULL DEFAULT 0,
sync_running INTEGER NOT NULL DEFAULT 0,
sync_status TEXT NOT NULL DEFAULT 'idle',
sync_error TEXT NOT NULL DEFAULT '',
sync_step TEXT NOT NULL DEFAULT '',
sync_total INTEGER NOT NULL DEFAULT 0,
sync_done INTEGER NOT NULL DEFAULT 0,
sync_failed INTEGER NOT NULL DEFAULT 0,
sync_deleted INTEGER NOT NULL DEFAULT 0,
last_sync_started_at INTEGER NOT NULL DEFAULT 0,
last_sync_finished_at INTEGER NOT NULL DEFAULT 0,
last_sync_success_at INTEGER NOT NULL DEFAULT 0,
last_synced_revision TEXT NOT NULL DEFAULT '',
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (repo_id, path),
FOREIGN KEY (repo_id) REFERENCES repos(id) ON DELETE CASCADE
);
INSERT INTO rpm_repo_dirs_new (
repo_id,
path,
mode,
remote_url,
connect_host,
host_header,
tls_server_name,
tls_insecure_skip_verify,
sync_interval_sec,
sync_enabled,
dirty,
next_sync_at,
sync_running,
sync_status,
sync_error,
sync_step,
sync_total,
sync_done,
sync_failed,
sync_deleted,
last_sync_started_at,
last_sync_finished_at,
last_sync_success_at,
last_synced_revision,
created_at,
updated_at
)
SELECT
repo_id,
path,
mode,
remote_url,
connect_host,
host_header,
tls_server_name,
tls_insecure_skip_verify,
sync_interval_sec,
1,
dirty,
next_sync_at,
sync_running,
sync_status,
sync_error,
sync_step,
sync_total,
sync_done,
sync_failed,
sync_deleted,
last_sync_started_at,
last_sync_finished_at,
last_sync_success_at,
last_synced_revision,
created_at,
updated_at
FROM rpm_repo_dirs;
DROP TABLE rpm_repo_dirs;
ALTER TABLE rpm_repo_dirs_new RENAME TO rpm_repo_dirs;
COMMIT;
PRAGMA foreign_keys = ON;

View File

@@ -0,0 +1,102 @@
PRAGMA foreign_keys = OFF;
BEGIN TRANSACTION;
CREATE TABLE rpm_repo_dirs_new (
repo_id TEXT NOT NULL,
path TEXT NOT NULL,
mode TEXT NOT NULL DEFAULT 'local',
allow_delete INTEGER NOT NULL DEFAULT 0,
remote_url TEXT NOT NULL DEFAULT '',
connect_host TEXT NOT NULL DEFAULT '',
host_header TEXT NOT NULL DEFAULT '',
tls_server_name TEXT NOT NULL DEFAULT '',
tls_insecure_skip_verify INTEGER NOT NULL DEFAULT 0,
sync_interval_sec INTEGER NOT NULL DEFAULT 300,
sync_enabled INTEGER NOT NULL DEFAULT 1,
dirty INTEGER NOT NULL DEFAULT 1,
next_sync_at INTEGER NOT NULL DEFAULT 0,
sync_running INTEGER NOT NULL DEFAULT 0,
sync_status TEXT NOT NULL DEFAULT 'idle',
sync_error TEXT NOT NULL DEFAULT '',
sync_step TEXT NOT NULL DEFAULT '',
sync_total INTEGER NOT NULL DEFAULT 0,
sync_done INTEGER NOT NULL DEFAULT 0,
sync_failed INTEGER NOT NULL DEFAULT 0,
sync_deleted INTEGER NOT NULL DEFAULT 0,
last_sync_started_at INTEGER NOT NULL DEFAULT 0,
last_sync_finished_at INTEGER NOT NULL DEFAULT 0,
last_sync_success_at INTEGER NOT NULL DEFAULT 0,
last_synced_revision TEXT NOT NULL DEFAULT '',
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (repo_id, path),
FOREIGN KEY (repo_id) REFERENCES repos(id) ON DELETE CASCADE
);
INSERT INTO rpm_repo_dirs_new (
repo_id,
path,
mode,
allow_delete,
remote_url,
connect_host,
host_header,
tls_server_name,
tls_insecure_skip_verify,
sync_interval_sec,
sync_enabled,
dirty,
next_sync_at,
sync_running,
sync_status,
sync_error,
sync_step,
sync_total,
sync_done,
sync_failed,
sync_deleted,
last_sync_started_at,
last_sync_finished_at,
last_sync_success_at,
last_synced_revision,
created_at,
updated_at
)
SELECT
repo_id,
path,
mode,
0,
remote_url,
connect_host,
host_header,
tls_server_name,
tls_insecure_skip_verify,
sync_interval_sec,
sync_enabled,
dirty,
next_sync_at,
sync_running,
sync_status,
sync_error,
sync_step,
sync_total,
sync_done,
sync_failed,
sync_deleted,
last_sync_started_at,
last_sync_finished_at,
last_sync_success_at,
last_synced_revision,
created_at,
updated_at
FROM rpm_repo_dirs;
DROP TABLE rpm_repo_dirs;
ALTER TABLE rpm_repo_dirs_new RENAME TO rpm_repo_dirs;
COMMIT;
PRAGMA foreign_keys = ON;

View File

@@ -65,6 +65,7 @@ export interface RpmPackageDetail extends RpmPackageSummary {
files: string[]
requires: string[]
provides: string[]
changelogs: { author: string; date: number; text: string }[]
}
export interface DockerTagInfo {
@@ -104,6 +105,7 @@ export interface RpmRepoDirConfig {
repo_id: string
path: string
mode: 'local' | 'mirror' | ''
allow_delete: boolean
remote_url: string
connect_host: string
host_header: string
@@ -665,6 +667,7 @@ export const api = {
type: string,
parent?: string,
mode?: 'local' | 'mirror',
allow_delete?: boolean,
remote_url?: string,
connect_host?: string,
host_header?: string,
@@ -674,18 +677,19 @@ export const api = {
) =>
request<{ status: string }>(`/api/repos/${repoId}/rpm/subdirs`, {
method: 'POST',
body: JSON.stringify({ name, type, parent, mode, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec })
body: JSON.stringify({ name, type, parent, mode, allow_delete, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec })
}),
getRpmSubdir: (repoId: string, path: string) => {
const params = new URLSearchParams()
params.set('path', path)
return request<RpmRepoDirConfig>(`/api/repos/${repoId}/rpm/subdir?${params.toString()}`)
},
renameRpmSubdir: (
updateRpmSubdir: (
repoId: string,
path: string,
name: string,
name?: string,
mode?: 'local' | 'mirror',
allow_delete?: boolean,
remote_url?: string,
connect_host?: string,
host_header?: string,
@@ -693,9 +697,9 @@ export const api = {
tls_insecure_skip_verify?: boolean,
sync_interval_sec?: number
) =>
request<{ status: string }>(`/api/repos/${repoId}/rpm/subdir/rename`, {
request<{ status: string }>(`/api/repos/${repoId}/rpm/subdir/update`, {
method: 'POST',
body: JSON.stringify({ path, name, mode, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec })
body: JSON.stringify({ path, name, mode, allow_delete, remote_url, connect_host, host_header, tls_server_name, tls_insecure_skip_verify, sync_interval_sec })
}),
syncRpmSubdir: (repoId: string, path: string) => {
const params = new URLSearchParams()
@@ -718,6 +722,13 @@ export const api = {
method: 'POST'
})
},
rebuildRpmSubdirMetadata: (repoId: string, path: string) => {
const params = new URLSearchParams()
params.set('path', path)
return request<{ status: string }>(`/api/repos/${repoId}/rpm/subdir/rebuild-metadata?${params.toString()}`, {
method: 'POST'
})
},
listRpmMirrorRuns: (repoId: string, path: string, limit?: number) => {
const params = new URLSearchParams()
params.set('path', path)

View File

@@ -33,6 +33,7 @@ export default function RepoGitDetailPage(props: RepoGitDetailPageProps) {
const [defaultBranch, setDefaultBranch] = useState<string>('')
const [tree, setTree] = useState<RepoTreeEntry[]>([])
const [treeError, setTreeError] = useState<string | null>(null)
const [treeReloadTick, setTreeReloadTick] = useState(0)
const [fileQuery, setFileQuery] = useState('')
const [path, setPath] = useState('')
const [pathSegments, setPathSegments] = useState<string[]>([])
@@ -142,7 +143,7 @@ export default function RepoGitDetailPage(props: RepoGitDetailPageProps) {
if (!ref && branches.length === 0) return
if (!repo) return
if (repo && repo.type && repo.type !== 'git') return
const key = `${repoId}:${ref}:${path}`
const key = `${repoId}:${ref}:${path}:${treeReloadTick}`
if (lastTreeKey.current === key) return
lastTreeKey.current = key
api.listRepoTree(repoId, ref || undefined, path)
@@ -163,7 +164,7 @@ export default function RepoGitDetailPage(props: RepoGitDetailPageProps) {
setSelectedCommit(null)
}
})
}, [repoId, ref, path, branches])
}, [repoId, ref, path, branches, treeReloadTick])
useEffect(() => {
if (!repoId || !ref) {
@@ -270,6 +271,10 @@ export default function RepoGitDetailPage(props: RepoGitDetailPageProps) {
}
const handleBreadcrumb = (nextPath: string) => {
if (nextPath === path) {
setTreeReloadTick((prev) => prev + 1)
return
}
setPath(nextPath)
if (nextPath === '') {
setPathSegments([])

View File

@@ -54,12 +54,13 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
const [rpmMetaContent, setRpmMetaContent] = useState('')
const [rpmMetaError, setRpmMetaError] = useState<string | null>(null)
const [rpmMetaLoading, setRpmMetaLoading] = useState(false)
const [rpmTab, setRpmTab] = useState<'meta' | 'files'>('meta')
const [rpmTab, setRpmTab] = useState<'meta' | 'files' | 'changelog'>('meta')
const [sidebarOpen, setSidebarOpen] = useState(true)
const [subdirOpen, setSubdirOpen] = useState(false)
const [subdirName, setSubdirName] = useState('')
const [subdirType, setSubdirType] = useState<'container' | 'repo'>('container')
const [subdirMode, setSubdirMode] = useState<'local' | 'mirror'>('local')
const [subdirAllowDelete, setSubdirAllowDelete] = useState(false)
const [subdirSyncIntervalSec, setSubdirSyncIntervalSec] = useState('300')
const [subdirRemoteURL, setSubdirRemoteURL] = useState('')
const [subdirConnectHost, setSubdirConnectHost] = useState('')
@@ -86,6 +87,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
const [renameNewName, setRenameNewName] = useState('')
const [renameIsRepoDir, setRenameIsRepoDir] = useState(false)
const [renameMode, setRenameMode] = useState<'local' | 'mirror'>('local')
const [renameAllowDelete, setRenameAllowDelete] = useState(false)
const [renameSyncIntervalSec, setRenameSyncIntervalSec] = useState('300')
const [renameRemoteURL, setRenameRemoteURL] = useState('')
const [renameConnectHost, setRenameConnectHost] = useState('')
@@ -112,8 +114,10 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
const [clearRunsConfirmOpen, setClearRunsConfirmOpen] = useState(false)
const [rpmPath, setRpmPath] = useState('')
const [rpmPathSegments, setRpmPathSegments] = useState<string[]>([])
const [rpmFileQuery, setRpmFileQuery] = useState('')
const [rpmTree, setRpmTree] = useState<RpmTreeEntry[]>([])
const [rpmTreeError, setRpmTreeError] = useState<string | null>(null)
const [rpmTreeReloadTick, setRpmTreeReloadTick] = useState(0)
const [rpmSelectedEntry, setRpmSelectedEntry] = useState<RpmTreeEntry | null>(null)
const [canWrite, setCanWrite] = useState(false)
const initRepoRef = useRef<string | null>(null)
@@ -185,7 +189,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setRpmTreeError(message)
setRpmTree([])
})
}, [repoId, repo, rpmPath])
}, [repoId, repo, rpmPath, rpmTreeReloadTick])
const handleSelectRpm = async (pkg: RpmPackageSummary) => {
if (!repoId) return
@@ -193,7 +197,6 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setRpmDetail(null)
setRpmDetailLoading(true)
setRpmError(null)
setRpmTab('meta')
try {
const detail = await api.getRpmPackage(repoId, pkg.filename)
setRpmSelected({
@@ -264,10 +267,13 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setSubdirError(null)
setSubdirSaving(true)
try {
syncIntervalSec = 0
if (subdirType === 'repo' && subdirMode === 'mirror') {
syncIntervalSec = Number(subdirSyncIntervalSec)
if (!Number.isFinite(syncIntervalSec) || syncIntervalSec <= 0) {
syncIntervalSec = 300
}
}
const parent = rpmPath
await api.createRpmSubdir(
repoId,
@@ -275,6 +281,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
subdirType,
parent,
subdirMode,
subdirAllowDelete,
subdirRemoteURL.trim(),
subdirConnectHost.trim(),
subdirHostHeader.trim(),
@@ -295,6 +302,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setSubdirName('')
setSubdirType('container')
setSubdirMode('local')
setSubdirAllowDelete(false)
setSubdirSyncIntervalSec('300')
setSubdirRemoteURL('')
setSubdirConnectHost('')
@@ -359,21 +367,25 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setRenameError(null)
setRenaming(true)
try {
syncIntervalSec = 0
if (renameIsRepoDir && renameMode === 'mirror') {
syncIntervalSec = Number(renameSyncIntervalSec)
if (!Number.isFinite(syncIntervalSec) || syncIntervalSec <= 0) {
syncIntervalSec = 300
}
await api.renameRpmSubdir(
}
await api.updateRpmSubdir(
repoId,
renamePath,
renameNewName.trim(),
renameIsRepoDir ? renameMode : undefined,
renameIsRepoDir ? renameAllowDelete : undefined,
renameIsRepoDir ? renameRemoteURL.trim() : undefined,
renameIsRepoDir ? renameConnectHost.trim() : undefined,
renameIsRepoDir ? renameHostHeader.trim() : undefined,
renameIsRepoDir ? renameTLSServerName.trim() : undefined,
renameIsRepoDir ? renameTLSInsecureSkipVerify : undefined,
renameIsRepoDir ? syncIntervalSec : undefined
renameIsRepoDir && renameMode === 'mirror' ? syncIntervalSec : undefined
)
setRenameOpen(false)
setRenamePath('')
@@ -381,6 +393,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setRenameNewName('')
setRenameIsRepoDir(false)
setRenameMode('local')
setRenameAllowDelete(false)
setRenameSyncIntervalSec('300')
setRenameRemoteURL('')
setRenameConnectHost('')
@@ -450,26 +463,6 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
}
}
const handleStatusSyncNow = async () => {
if (!repoId || !statusPath || statusMode !== 'mirror') return
setStatusSyncBusy(true)
setStatusError(null)
try {
await api.syncRpmSubdir(repoId, statusPath)
setStatusSyncEnabled(true)
setStatusSyncStatus('scheduled')
setStatusSyncStep('queued')
setStatusSyncError('')
const runs = await api.listRpmMirrorRuns(repoId, statusPath, 10)
setStatusRuns(Array.isArray(runs) ? runs : [])
} catch (err) {
const message = err instanceof Error ? err.message : 'Failed to schedule sync'
setStatusError(message)
} finally {
setStatusSyncBusy(false)
}
}
const handleStatusToggleSyncEnabled = async () => {
if (!repoId || !statusPath || statusMode !== 'mirror') return
setStatusSyncBusy(true)
@@ -477,13 +470,10 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
try {
if (statusSyncEnabled) {
await api.suspendRpmSubdir(repoId, statusPath)
setStatusSyncEnabled(false)
setStatusSyncStatus('suspended')
} else {
await api.resumeRpmSubdir(repoId, statusPath)
setStatusSyncEnabled(true)
setStatusSyncStatus('scheduled')
}
await loadStatus(statusPath)
} catch (err) {
const message = err instanceof Error ? err.message : 'Failed to change mirror sync state'
setStatusError(message)
@@ -513,21 +503,41 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
}
}
const handleStatusRebuildMetadata = async () => {
if (!repoId || !statusPath || statusMode !== 'mirror') return
setStatusSyncBusy(true)
setStatusError(null)
try {
await api.rebuildRpmSubdirMetadata(repoId, statusPath)
} catch (err) {
const message = err instanceof Error ? err.message : 'Failed to schedule metadata rebuild'
setStatusError(message)
} finally {
setStatusSyncBusy(false)
}
}
const handleRpmBack = () => {
if (!rpmPath) return
const nextSegments = rpmPathSegments.slice(0, -1)
setRpmPath(nextSegments.join('/'))
setRpmPathSegments(nextSegments)
setRpmFileQuery('')
setRpmSelectedEntry(null)
}
const handleRpmBreadcrumb = (nextPath: string) => {
if (nextPath === rpmPath) {
setRpmTreeReloadTick((prev) => prev + 1)
return
}
setRpmPath(nextPath)
if (nextPath === '') {
setRpmPathSegments([])
} else {
setRpmPathSegments(nextPath.split('/').filter(Boolean))
}
setRpmFileQuery('')
setRpmSelectedEntry(null)
}
@@ -597,6 +607,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
if (entry.type === 'dir') {
setRpmPath(entry.path)
setRpmPathSegments(entry.path.split('/').filter(Boolean))
setRpmFileQuery('')
setRpmSelectedEntry(null)
return
}
@@ -639,6 +650,84 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
}
const rpmPathParts = rpmPathSegments
const normalizedQuery = rpmFileQuery.trim().toLowerCase()
const globToRegex = (query: string) => {
let i = 0
let out = '^'
while (i < query.length) {
const ch = query[i]
if (ch === '*') {
out += '.*'
i += 1
continue
}
if (ch === '?') {
out += '.'
i += 1
continue
}
if (ch === '[') {
const classStart = i
i += 1
if (i >= query.length) {
out += '\\['
break
}
let negate = false
if (query[i] === '!' || query[i] === '^') {
negate = true
i += 1
}
let classText = ''
let sawClass = false
while (i < query.length) {
const next = query[i]
if (next === ']') {
sawClass = true
i += 1
break
}
if (next === '\\') {
classText += '\\\\'
i += 1
continue
}
classText += next
i += 1
}
if (!sawClass) {
out += '\\['
i = classStart + 1
continue
}
out += negate ? `[^${classText}]` : `[${classText}]`
continue
}
if ('.+^$(){}|\\'.includes(ch)) {
out += `\\${ch}`
i += 1
continue
}
out += ch
i += 1
}
out += '$'
return out
}
const matchesFileQuery = (name: string, query: string) => {
if (!query) return true
if (query.includes('*') || query.includes('?') || query.includes('[')) {
try {
return new RegExp(globToRegex(query), 'i').test(name)
} catch {
return name.toLowerCase().includes(query)
}
}
return name.toLowerCase().includes(query)
}
const filteredTree = normalizedQuery
? rpmTree.filter((entry) => matchesFileQuery(entry.name, normalizedQuery))
: rpmTree
return (
<Box>
@@ -694,6 +783,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setSubdirName('')
setSubdirType('container')
setSubdirMode('local')
setSubdirAllowDelete(false)
setSubdirSyncIntervalSec('300')
setSubdirRemoteURL('')
setSubdirConnectHost('')
@@ -745,6 +835,14 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
)
})}
</Box>
<TextField
size="small"
placeholder="Search files"
value={rpmFileQuery}
onChange={(event) => setRpmFileQuery(event.target.value)}
fullWidth
sx={{ mb: 1, px: 0.5 }}
/>
{rpmTreeError ? (
<Alert severity="warning" sx={{ mb: 1 }}>
{rpmTreeError}
@@ -765,7 +863,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
</ListItemButton>
</ListItem>
) : null}
{rpmTree.map((entry) => (
{filteredTree.map((entry) => (
<ListItem key={entry.path} disablePadding>
<ListItemButton onClick={() => handleRpmEntry(entry)}>
<ListItemText
@@ -814,6 +912,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
setRenameNewName(entry.name)
setRenameIsRepoDir(Boolean(entry.is_repo_dir))
setRenameMode(entry.repo_mode === 'mirror' ? 'mirror' : 'local')
setRenameAllowDelete(false)
setRenameSyncIntervalSec('300')
setRenameRemoteURL('')
setRenameConnectHost('')
@@ -824,6 +923,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
try {
const cfg = await api.getRpmSubdir(repoId, entry.path)
setRenameMode(cfg.mode === 'mirror' ? 'mirror' : 'local')
setRenameAllowDelete(Boolean(cfg.allow_delete))
setRenameSyncIntervalSec(String(cfg.sync_interval_sec || 300))
setRenameRemoteURL(cfg.remote_url || '')
setRenameConnectHost(cfg.connect_host || '')
@@ -880,9 +980,9 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
) : null}
</ListItem>
))}
{!rpmTree.length && !rpmTreeError ? (
{!filteredTree.length && !rpmTreeError ? (
<Typography variant="body2" color="text.secondary" sx={{ px: 1, py: 1 }}>
No files found.
{normalizedQuery ? 'No matching files.' : 'No files found.'}
</Typography>
) : null}
</List>
@@ -910,6 +1010,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
<Tabs value={rpmTab} onChange={(_, value) => setRpmTab(value)}>
<Tab label="Metadata" value="meta" />
<Tab label="Files" value="files" />
<Tab label="Change Log" value="changelog" />
</Tabs>
{rpmDetailLoading ? (
<Typography variant="body2" color="text.secondary" sx={{ mt: 1 }}>
@@ -952,10 +1053,10 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
Build Time: {rpmDetail.build_time ? new Date(rpmDetail.build_time * 1000).toLocaleString() : 'n/a'}
</Typography>
<Typography variant="body2">Size: {rpmDetail.size ? `${rpmDetail.size} bytes` : 'n/a'}</Typography>
{rpmDetail.requires.length ? (
{Array.isArray(rpmDetail.requires) && rpmDetail.requires.length ? (
<Typography variant="body2">Requires: {rpmDetail.requires.join(', ')}</Typography>
) : null}
{rpmDetail.provides.length ? (
{Array.isArray(rpmDetail.provides) && rpmDetail.provides.length ? (
<Typography variant="body2">Provides: {rpmDetail.provides.join(', ')}</Typography>
) : null}
{rpmDetail.description ? (
@@ -968,13 +1069,36 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
{rpmDetail && rpmTab === 'files' ? (
<Box sx={{ mt: 1, maxHeight: '60vh', overflow: 'auto' }}>
<List dense>
{rpmDetail.files.map((file) => (
{(Array.isArray(rpmDetail.files) ? rpmDetail.files : []).map((file) => (
<ListItem key={file}>
<ListItemText primary={file} />
</ListItem>
))}
</List>
</Box>
) : null}
{rpmDetail && rpmTab === 'changelog' ? (
<Box sx={{ mt: 1, maxHeight: '60vh', overflow: 'auto' }}>
{Array.isArray(rpmDetail.changelogs) && rpmDetail.changelogs.length ? (
<List dense>
{rpmDetail.changelogs.map((item, index) => (
<ListItem key={`${item.date}-${index}`} sx={{ display: 'block' }}>
<Typography variant="body2" color="text.secondary">
{item.date ? new Date(item.date * 1000).toLocaleString() : 'n/a'}
{item.author ? ` · ${item.author}` : ''}
</Typography>
<Typography variant="body2" sx={{ whiteSpace: 'pre-wrap' }}>
{item.text || ''}
</Typography>
</ListItem>
))}
</List>
) : (
<Typography variant="body2" color="text.secondary">
No change log entries.
</Typography>
)}
</Box>
) : null}
{!rpmDetail && !rpmDetailLoading && rpmError ? (
<Alert severity="warning" sx={{ mt: 1 }}>
@@ -1058,7 +1182,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
<MenuItem value="mirror">mirror</MenuItem>
</TextField>
) : null}
{subdirType === 'repo' ? (
{subdirType === 'repo' && subdirMode === 'mirror' ? (
<TextField
label="Sync Interval (seconds)"
value={subdirSyncIntervalSec}
@@ -1097,6 +1221,10 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
helperText="Optional SNI/verify server name override"
fullWidth
/>
<FormControlLabel
control={<Checkbox checked={subdirAllowDelete} onChange={(event) => setSubdirAllowDelete(event.target.checked)} />}
label="Allow delete (files/container dirs) in mirror subtree"
/>
<FormControlLabel
control={
<Checkbox
@@ -1204,10 +1332,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
</Typography>
) : null}
<Box sx={{ pt: 1 }}>
<Button size="small" variant="outlined" onClick={handleStatusSyncNow} disabled={statusSyncBusy}>
{statusSyncBusy ? 'Scheduling...' : 'Sync now'}
</Button>
<Button size="small" variant="outlined" onClick={handleStatusToggleSyncEnabled} disabled={statusSyncBusy} sx={{ ml: 1 }}>
<Button size="small" variant="outlined" onClick={handleStatusToggleSyncEnabled} disabled={statusSyncBusy}>
{statusSyncEnabled ? 'Suspend' : 'Resume'}
</Button>
<Button
@@ -1220,6 +1345,15 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
>
Clear runs
</Button>
<Button
size="small"
variant="outlined"
onClick={handleStatusRebuildMetadata}
disabled={statusSyncBusy}
sx={{ ml: 1 }}
>
Rebuild metadata
</Button>
<Button
size="small"
variant="outlined"
@@ -1306,7 +1440,7 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
<MenuItem value="mirror">mirror</MenuItem>
</TextField>
) : null}
{renameIsRepoDir ? (
{renameIsRepoDir && renameMode === 'mirror' ? (
<TextField
label="Sync Interval (seconds)"
value={renameSyncIntervalSec}
@@ -1343,6 +1477,10 @@ export default function RepoRpmDetailPage(props: RepoRpmDetailPageProps) {
onChange={(event) => setRenameTLSServerName(event.target.value)}
fullWidth
/>
<FormControlLabel
control={<Checkbox checked={renameAllowDelete} onChange={(event) => setRenameAllowDelete(event.target.checked)} />}
label="Allow delete (files/container dirs) in mirror subtree"
/>
<FormControlLabel
control={<Checkbox checked={renameTLSInsecureSkipVerify} onChange={(event) => setRenameTLSInsecureSkipVerify(event.target.checked)} />}
label="Skip TLS certificate verification"