// goredo -- djb's redo implementation on pure Go // Copyright (C) 2020-2024 Sergey Matveev // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, version 3 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see . package main import ( "bufio" "bytes" "errors" "io" "io/fs" "os" "path" "strings" "github.com/google/uuid" "go.cypherpunks.ru/recfile" ) var DepFixHashCache map[string]Hash func recfileWrite(fdDep io.StringWriter, fields ...recfile.Field) error { w := recfile.NewWriter(fdDep) if _, err := w.RecordStart(); err != nil { return err } if _, err := w.WriteFields(fields...); err != nil { return err } return nil } func depFix(root string) error { tracef(CDebug, "depfix: entering %s", root) dir, err := os.Open(root) if err != nil { return ErrLine(err) } defer dir.Close() for { entries, err := dir.ReadDir(1 << 10) if err != nil { if err == io.EOF { break } return ErrLine(err) } for _, entry := range entries { if entry.IsDir() { if err = depFix(path.Join(root, entry.Name())); err != nil { return err } } } } dir.Close() redoDir := path.Join(root, RedoDir) dir, err = os.Open(redoDir) if err != nil { if errors.Is(err, fs.ErrNotExist) { return nil } return ErrLine(err) } defer dir.Close() for { entries, err := dir.ReadDir(1 << 10) if err != nil { if err == io.EOF { break } return ErrLine(err) } for _, entry := range entries { switch path.Ext(entry.Name()) { case DepSuffix: case ".rec": default: continue } tracef(CDebug, "depfix: checking %s/%s", root, entry.Name()) fdDepPath := path.Join(redoDir, entry.Name()) data, err := os.ReadFile(fdDepPath) if err != nil { return ErrLine(err) } fdDep, err := tempfile(redoDir, entry.Name()) if err != nil { return ErrLine(err) } defer os.Remove(fdDep.Name()) tracef( CDebug, "depfix: %s/%s: tmp %s", root, entry.Name(), fdDep.Name(), ) fdDepW := bufio.NewWriter(fdDep) switch path.Ext(entry.Name()) { case DepSuffix: if _, err = depParse(NewTgt(""), data); err != nil { return ErrLine(err) } build, data, err := depHeadParse(data) if err != nil { return ErrLine(err) } if err = depBuildWrite(fdDepW, build); err != nil { return ErrLine(err) } var typ byte var chunk []byte for len(data) > 0 { typ, chunk, data, _ = chunkRead(data) switch typ { case DepTypeAlways: err = always(fdDepW, fdDep.Name()) case DepTypeStamp: p := mustAbs(path.Join(root, strings.TrimSuffix(entry.Name(), DepSuffix))) hsh, ok := DepFixHashCache[p] if !ok { var fd *os.File fd, err = os.Open(p) if err != nil { break } hsh, err = fileHash(fd) fd.Close() if err != nil { break } DepFixHashCache[p] = hsh } err = stamp(fdDepW, fdDep.Name(), hsh) case DepTypeIfcreate: err = ifcreate(fdDepW, fdDep.Name(), string(chunk)) case DepTypeIfchange: name := string(chunk[InodeLen+HashLen:]) p := mustAbs(path.Join(root, name)) var fd *os.File fd, err = os.Open(p) if err != nil { break } var inode *Inode inode, _, err = inodeFromFileByFd(fd) if err != nil { fd.Close() break } hsh, ok := DepFixHashCache[p] if !ok { hsh, err = fileHash(fd) if err != nil { break } DepFixHashCache[p] = hsh } fd.Close() _, err = io.Copy(fdDepW, bytes.NewBuffer( chunkWrite(bytes.Join([][]byte{ {DepTypeIfchange}, inode[:], []byte(hsh), []byte(name), }, nil)))) case DepTypeIfchangeNonex: err = depWriteNonex(fdDepW, fdDep.Name(), string(chunk)) } if err != nil { return ErrLine(err) } } case ".rec": defer os.Remove(fdDepPath) fdDepPath = fdDepPath[:len(fdDepPath)-4] + DepSuffix r := recfile.NewReader(bytes.NewReader(data)) m, err := r.NextMap() if err != nil { return err } var build uuid.UUID build, err = uuid.Parse(m["Build"]) if err != nil { break } if err = depBuildWrite(fdDepW, build); err != nil { return ErrLine(err) } for { m, err := r.NextMap() if err != nil { if errors.Is(err, io.EOF) { break } return ErrLine(err) } switch m["Type"] { case "always": err = always(fdDepW, m["Target"]) case "stamp": p := mustAbs(path.Join(root, strings.TrimSuffix(entry.Name(), DepSuffix))) hsh, ok := DepFixHashCache[p] if !ok { var fd *os.File fd, err = os.Open(p) if err != nil { break } hsh, err = fileHash(fd) fd.Close() if err != nil { break } DepFixHashCache[p] = hsh } err = stamp(fdDepW, fdDep.Name(), hsh) case "ifcreate": err = ifcreate(fdDepW, fdDep.Name(), m["Target"]) case "ifchange": if m["Size"] == "" { err = depWriteNonex(fdDepW, fdDep.Name(), m["Target"]) break } name := string(m["Target"]) p := mustAbs(path.Join(root, name)) var fd *os.File fd, err = os.Open(p) if err != nil { break } var inode *Inode inode, _, err = inodeFromFileByFd(fd) if err != nil { fd.Close() break } hsh, ok := DepFixHashCache[p] if !ok { hsh, err = fileHash(fd) if err != nil { break } DepFixHashCache[p] = hsh } fd.Close() _, err = io.Copy(fdDepW, bytes.NewBuffer( chunkWrite(bytes.Join([][]byte{ {DepTypeIfchange}, inode[:], []byte(hsh), []byte(name), }, nil)))) } if err != nil { return ErrLine(err) } } } err = fdDepW.Flush() if err != nil { return ErrLine(err) } if !NoSync { if err = fdDep.Sync(); err != nil { return ErrLine(err) } } fdDep.Close() if err = os.Rename(fdDep.Name(), fdDepPath); err != nil { return ErrLine(err) } tracef(CRedo, "%s", fdDepPath) } if !NoSync { if err = syncDir(redoDir); err != nil { return err } } } return nil }