/redo-affects
/redo-always
/redo-cleanup
+/redo-dep2rec
/redo-depfix
/redo-dot
/redo-ifchange
/redo-ifcreate
+/redo-inode
/redo-log
/redo-ood
/redo-sources
}
func parseBuildLogRec(tgt *Tgt) (map[string][]string, error) {
- fd, err := os.Open(path.Join(tgt.h, RedoDir, tgt.t+LogRecSuffix))
+ h, t := path.Split(tgt.a)
+ fd, err := os.Open(path.Join(h, RedoDir, t+LogRecSuffix))
if err != nil {
return nil, ErrLine(err)
}
return err
}
}
- fd, err := os.Open(path.Join(tgt.h, RedoDir, tgt.t+LogSuffix))
+ tgtH, tgtT := path.Split(tgt.a)
+ fd, err := os.Open(path.Join(tgtH, RedoDir, tgtT+LogSuffix))
if err != nil {
return ErrLine(err)
}
defer fd.Close()
subs := make([]*BuildLogJob, 0, len(buildLogRec["Ifchange"]))
for _, depPath := range buildLogRec["Ifchange"] {
- dep := NewTgt(path.Join(tgt.h, depPath))
+ dep := NewTgt(path.Join(tgtH, depPath))
if dep.rel == tgt.rel {
continue
}
import (
"bufio"
"bytes"
- "encoding/hex"
+ "encoding/binary"
"errors"
"io"
- "log"
"os"
"path"
- "go.cypherpunks.ru/recfile"
+ "github.com/google/uuid"
"lukechampine.com/blake3"
)
+const (
+ BinMagic = "GOREDO"
+ BinVersionV1 = 0x01
+ UUIDLen = 16
+
+ DepTypeIfcreate = 0x01
+ DepTypeIfchange = 0x02
+ DepTypeAlways = 0x03
+ DepTypeStamp = 0x04
+ DepTypeIfchangeDummy = 0x05
+)
+
var (
DirPrefix string
DepCwd string
- ErrBadRecFormat = errors.New("invalid format of .rec")
- InodeCache = make(map[string][]*Inode)
- HashCache = make(map[string][]Hash)
+ IfchangeCache = make(map[string][]*Ifchange)
+ DepCache = make(map[string]*Dep)
+
+ NullUUID uuid.UUID
)
-func recfileWrite(fdDep io.StringWriter, fields ...recfile.Field) error {
- w := recfile.NewWriter(fdDep)
- if _, err := w.RecordStart(); err != nil {
- return err
- }
- if _, err := w.WriteFields(fields...); err != nil {
- return err
+func chunkWrite(in []byte) (out []byte) {
+ l := len(in) + 2
+ if l > 1<<16 {
+ panic("too long")
}
- return nil
+ out = make([]byte, l)
+ binary.BigEndian.PutUint16(out[:2], uint16(l))
+ copy(out[2:], in)
+ return
}
-func ifcreate(fdDep *os.File, tgt string) error {
- tracef(CDebug, "ifcreate: %s <- %s", fdDep.Name(), tgt)
- return recfileWrite(
- fdDep,
- recfile.Field{Name: "Type", Value: DepTypeIfcreate},
- recfile.Field{Name: "Target", Value: tgt},
- )
+type Ifchange struct {
+ tgt *Tgt
+ meta string
}
-func always(fdDep *os.File) error {
- tracef(CDebug, "always: %s", fdDep.Name())
- return recfileWrite(fdDep, recfile.Field{Name: "Type", Value: DepTypeAlways})
+func (ifchange *Ifchange) Inode() Inode {
+ return Inode(ifchange.meta[:InodeLen])
}
-func stamp(fdDep, src *os.File) error {
- hsh, err := fileHash(src)
- if err != nil {
- return err
- }
- tracef(CDebug, "stamp: %s <- %s", fdDep.Name(), hsh)
- return recfileWrite(
- fdDep,
- recfile.Field{Name: "Type", Value: DepTypeStamp},
- recfile.Field{Name: "Hash", Value: hsh.String()},
- )
+func (ifchange *Ifchange) Hash() Hash {
+ return Hash(ifchange.meta[InodeLen:])
+}
+
+type Dep struct {
+ build uuid.UUID
+ always bool
+ stamp Hash
+ ifcreates []*Tgt
+ ifchanges []*Ifchange
+}
+
+func ifcreate(w io.Writer, fdDepName string, tgt string) (err error) {
+ tracef(CDebug, "ifcreate: %s <- %s", fdDepName, tgt)
+ _, err = io.Copy(w, bytes.NewBuffer(chunkWrite(append(
+ []byte{DepTypeIfcreate}, []byte(tgt)...,
+ ))))
+ return
+}
+
+func always(w io.Writer, fdDepName string) (err error) {
+ tracef(CDebug, "always: %s", fdDepName)
+ _, err = io.Copy(w, bytes.NewBuffer(chunkWrite(
+ []byte{DepTypeAlways},
+ )))
+ return
+}
+
+func stamp(w io.Writer, fdDepName string, hsh Hash) (err error) {
+ tracef(CDebug, "stamp: %s <- %s", fdDepName, hsh)
+ _, err = io.Copy(w, bytes.NewBuffer(chunkWrite(append(
+ []byte{DepTypeStamp}, []byte(hsh)...,
+ ))))
+ return
}
-func fileHash(fd *os.File) (Hash, error) {
+func fileHash(fd io.Reader) (Hash, error) {
h := blake3.New(HashLen, nil)
if _, err := io.Copy(h, bufio.NewReader(fd)); err != nil {
return "", err
return Hash(h.Sum(nil)), nil
}
-func depWrite(fdDep *os.File, cwd string, tgt *Tgt, hsh Hash) error {
- tracef(CDebug, "ifchange: %s <- %s", fdDep.Name(), tgt)
+func depWrite(w io.Writer, fdDepName, cwd string, tgt *Tgt, hsh Hash) (err error) {
+ tracef(CDebug, "ifchange: %s <- %s", fdDepName, tgt)
fd, err := os.Open(tgt.a)
if err != nil {
return ErrLine(err)
return ErrLine(err)
}
}
- fields := []recfile.Field{
- {Name: "Type", Value: DepTypeIfchange},
- {Name: "Target", Value: tgt.RelTo(cwd)},
- {Name: "Hash", Value: hsh.String()},
- }
- fields = append(fields, inode.RecfileFields()...)
- return recfileWrite(fdDep, fields...)
+ _, err = io.Copy(w, bytes.NewBuffer(chunkWrite(bytes.Join([][]byte{
+ {DepTypeIfchange},
+ []byte(inode),
+ []byte(hsh),
+ []byte(tgt.RelTo(cwd)),
+ }, nil))))
+ return
+}
+
+func depWriteDummy(w io.Writer, fdDepName, tgtRel string) (err error) {
+ tracef(CDebug, "ifchange: %s <- %s (non-existing)", fdDepName, tgtRel)
+ _, err = io.Copy(w, bytes.NewBuffer(chunkWrite(bytes.Join([][]byte{
+ {DepTypeIfchangeDummy},
+ []byte(tgtRel),
+ }, nil))))
+ return
}
func depsWrite(fdDep *os.File, tgts []*Tgt) error {
}
var err error
var cwd string
+ fdDepW := bufio.NewWriter(fdDep)
for _, tgt := range tgts {
cwd = Cwd
if DepCwd != "" && Cwd != DepCwd {
}
tgtDir := path.Join(cwd, DirPrefix)
if _, errStat := os.Stat(tgt.a); errStat == nil {
- err = ErrLine(depWrite(fdDep, tgtDir, tgt, ""))
+ err = ErrLine(depWrite(fdDepW, fdDep.Name(), tgtDir, tgt, ""))
} else {
tgtRel := tgt.RelTo(tgtDir)
- tracef(CDebug, "ifchange: %s <- %s (non-existing)", fdDep.Name(), tgtRel)
- fields := []recfile.Field{
- {Name: "Type", Value: DepTypeIfchange},
- {Name: "Target", Value: tgtRel},
- }
- inodeDummy := Inode{}
- fields = append(fields, inodeDummy.RecfileFields()...)
- err = ErrLine(recfileWrite(fdDep, fields...))
+ err = ErrLine(depWriteDummy(fdDepW, fdDep.Name(), tgtRel))
}
if err != nil {
return err
}
}
- return nil
+ return fdDepW.Flush()
}
-type DepInfoIfchange struct {
- tgt *Tgt
- inode *Inode
- hash Hash
-}
-
-type DepInfo struct {
- build string
- always bool
- stamp Hash
- ifcreates []*Tgt
- ifchanges []DepInfoIfchange
+func depHeadParse(data []byte) (build uuid.UUID, tail []byte, err error) {
+ if len(data) < len(BinMagic)+1+UUIDLen {
+ err = errors.New("too short header")
+ return
+ }
+ if !bytes.Equal(data[:len(BinMagic)], []byte(BinMagic)) {
+ err = errors.New("bad magic")
+ return
+ }
+ data = data[len(BinMagic):]
+ switch data[0] {
+ case BinVersionV1:
+ default:
+ err = errors.New("unknown version")
+ return
+ }
+ build = uuid.Must(uuid.FromBytes(data[1 : 1+UUIDLen]))
+ tail = data[1+UUIDLen:]
+ return
}
-func mustHashDecode(s string) Hash {
- b, err := hex.DecodeString(s)
- if err != nil {
- log.Fatal(err)
+func chunkRead(data []byte) (typ byte, chunk []byte, tail []byte, err error) {
+ if len(data) < 2 {
+ err = errors.New("no length")
+ return
}
- return Hash(b)
+ l := binary.BigEndian.Uint16(data[:2])
+ if l == 0 {
+ err = errors.New("zero length chunk")
+ return
+ }
+ if len(data) < int(l) {
+ err = errors.New("not enough data")
+ return
+ }
+ typ, chunk, tail = data[2], data[3:l], data[l:]
+ return
}
-var missingBuild = errors.New(".rec missing Build:")
+func depBinIfchangeParse(tgt *Tgt, chunk []byte) (*Ifchange, string, error) {
+ if len(chunk) < InodeLen+HashLen+1 {
+ return nil, "", errors.New("too short \"ifchange\" format")
+ }
+ name := string(chunk[InodeLen+HashLen:])
+ meta := string(chunk[:InodeLen+HashLen])
-func depRead(tgt *Tgt) (*DepInfo, error) {
- data, err := os.ReadFile(tgt.Dep())
- if err != nil {
- return nil, err
+ tgtH, _ := pathSplit(tgt.a)
+ ifchange := &Ifchange{tgt: NewTgt(path.Join(tgtH, name)), meta: meta}
+ cachedFound := false
+ for _, cached := range IfchangeCache[ifchange.tgt.rel] {
+ if ifchange.meta == cached.meta {
+ ifchange = cached
+ cachedFound = true
+ break
+ }
+ }
+ if IfchangeCache != nil && !cachedFound {
+ IfchangeCache[ifchange.tgt.rel] = append(IfchangeCache[ifchange.tgt.rel], ifchange)
}
- r := recfile.NewReader(bytes.NewReader(data))
- m, err := r.NextMap()
+ return ifchange, name, nil
+}
+
+func depParse(tgt *Tgt, data []byte) (*Dep, error) {
+ build, data, err := depHeadParse(data)
if err != nil {
return nil, err
}
- depInfo := DepInfo{}
- b := m["Build"]
- if b == "" {
- return nil, missingBuild
- }
- depInfo.build = b
- for {
- m, err := r.NextMap()
+ dep := Dep{build: build}
+ var typ byte
+ var chunk []byte
+ for len(data) > 0 {
+ typ, chunk, data, err = chunkRead(data)
if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
- return nil, err
+ return nil, ErrLine(err)
}
- switch m["Type"] {
+ switch typ {
case DepTypeAlways:
- depInfo.always = true
+ if len(chunk) != 0 {
+ return nil, ErrLine(errors.New("bad \"always\" format"))
+ }
+ dep.always = true
+ case DepTypeStamp:
+ if len(chunk) != HashLen {
+ return nil, ErrLine(errors.New("bad \"stamp\" format"))
+ }
+ dep.stamp = Hash(chunk)
case DepTypeIfcreate:
- dep := m["Target"]
- if dep == "" {
- return nil, ErrBadRecFormat
+ if len(chunk) < 1 {
+ return nil, ErrLine(errors.New("too short \"ifcreate\" format"))
}
- depInfo.ifcreates = append(depInfo.ifcreates,
- NewTgt(path.Join(tgt.h, dep)))
+ tgtH, _ := pathSplit(tgt.a)
+ dep.ifcreates = append(dep.ifcreates, NewTgt(path.Join(tgtH, string(chunk))))
case DepTypeIfchange:
- depRaw := m["Target"]
- if depRaw == "" {
- return nil, ErrBadRecFormat
- }
- inode, err := inodeFromRec(m)
+ ifchange, _, err := depBinIfchangeParse(tgt, chunk)
if err != nil {
- log.Print(err)
- return nil, ErrBadRecFormat
- }
- dep := NewTgt(path.Join(tgt.h, depRaw))
-
- cachedFound := false
- for _, cachedInode := range InodeCache[dep.a] {
- if inode.Equals(cachedInode) {
- inode = cachedInode
- cachedFound = true
- break
- }
- }
- if InodeCache != nil && !cachedFound {
- InodeCache[dep.a] = append(InodeCache[dep.a], inode)
- }
-
- hsh := mustHashDecode(m["Hash"])
- cachedFound = false
- for _, cachedHash := range HashCache[dep.a] {
- if hsh == cachedHash {
- hsh = cachedHash
- cachedFound = true
- break
- }
+ return nil, ErrLine(err)
}
- if HashCache != nil && !cachedFound {
- HashCache[dep.a] = append(HashCache[dep.a], hsh)
+ dep.ifchanges = append(dep.ifchanges, ifchange)
+ case DepTypeIfchangeDummy:
+ if len(chunk) < 1 {
+ return nil, ErrLine(errors.New("too short \"ifchange\" format"))
}
-
- depInfo.ifchanges = append(depInfo.ifchanges, DepInfoIfchange{
- tgt: dep, inode: inode, hash: hsh,
- })
- case DepTypeStamp:
- hsh := m["Hash"]
- if hsh == "" {
- return nil, ErrBadRecFormat
- }
- depInfo.stamp = mustHashDecode(hsh)
+ dep.ifchanges = append(dep.ifchanges, &Ifchange{tgt: NewTgt(string(chunk))})
default:
- return nil, ErrBadRecFormat
+ return nil, ErrLine(errors.New("unknown type"))
}
}
- return &depInfo, nil
+ return &dep, nil
+}
+
+func depRead(tgt *Tgt) (*Dep, error) {
+ data, err := os.ReadFile(tgt.dep)
+ if err != nil {
+ return nil, err
+ }
+ return depParse(tgt, data)
}
func depReadOnlyIfchanges(pth string) (ifchanges []string, err error) {
if err != nil {
return
}
- r := recfile.NewReader(bytes.NewReader(data))
- var m map[string]string
- for {
- m, err = r.NextMap()
+ _, data, err = depHeadParse(data)
+ if err != nil {
+ return nil, err
+ }
+ var typ byte
+ var chunk []byte
+ var tgt string
+ tgtDummy := NewTgt("")
+ for len(data) > 0 {
+ typ, chunk, data, err = chunkRead(data)
if err != nil {
- if errors.Is(err, io.EOF) {
- err = nil
- break
- }
- return
+ return nil, ErrLine(err)
}
- if m["Type"] == DepTypeIfchange {
- ifchanges = append(ifchanges, m["Target"])
+ switch typ {
+ case DepTypeIfchange:
+ _, tgt, err = depBinIfchangeParse(tgtDummy, chunk)
+ if err != nil {
+ return nil, ErrLine(err)
+ }
+ ifchanges = append(ifchanges, tgt)
+ case DepTypeIfchangeDummy:
+ ifchanges = append(ifchanges, string(chunk))
}
}
return
}
-func depReadBuild(pth string) (string, error) {
+func depBuildRead(pth string) (uuid.UUID, error) {
fd, err := os.Open(pth)
if err != nil {
- return "", err
+ return NullUUID, err
}
- r := recfile.NewReader(fd)
- m, err := r.NextMap()
+ data := make([]byte, len(BinMagic)+1+UUIDLen)
+ _, err = io.ReadFull(fd, data)
fd.Close()
if err != nil {
- return "", err
- }
- build := m["Build"]
- if build == "" {
- err = missingBuild
+ return NullUUID, err
}
+ build, _, err := depHeadParse(data)
return build, err
}
+
+func depBuildWrite(w io.Writer, build uuid.UUID) (err error) {
+ _, err = io.Copy(w, bytes.NewBuffer(bytes.Join([][]byte{
+ []byte(BinMagic),
+ {BinVersionV1},
+ build[:],
+ }, nil)))
+ return
+}
package main
import (
+ "bufio"
+ "bytes"
"errors"
"io"
"io/fs"
"path"
"strings"
+ "github.com/google/uuid"
"go.cypherpunks.ru/recfile"
)
+var DepFixHashCache map[string]Hash
+
+func recfileWrite(fdDep io.StringWriter, fields ...recfile.Field) error {
+ w := recfile.NewWriter(fdDep)
+ if _, err := w.RecordStart(); err != nil {
+ return err
+ }
+ if _, err := w.WriteFields(fields...); err != nil {
+ return err
+ }
+ return nil
+}
+
func depFix(root string) error {
tracef(CDebug, "depfix: entering %s", root)
dir, err := os.Open(root)
return ErrLine(err)
}
defer dir.Close()
- redoDirChanged := false
for {
entries, err := dir.ReadDir(1 << 10)
if err != nil {
return ErrLine(err)
}
for _, entry := range entries {
- if !strings.HasSuffix(entry.Name(), DepSuffix) {
+ switch path.Ext(entry.Name()) {
+ case DepSuffix:
+ case ".rec":
+ default:
continue
}
tracef(CDebug, "depfix: checking %s/%s", root, entry.Name())
fdDepPath := path.Join(redoDir, entry.Name())
- fdDep, err := os.Open(fdDepPath)
+ data, err := os.ReadFile(fdDepPath)
if err != nil {
return ErrLine(err)
}
- defer fdDep.Close()
- r := recfile.NewReader(fdDep)
- var fieldses [][]recfile.Field
- depChanged := false
- for {
- fields, err := r.Next()
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
- return ErrLine(err)
- }
- fieldses = append(fieldses, fields)
- m := make(map[string]string, len(fields))
- for _, f := range fields {
- m[f.Name] = f.Value
- }
- if m["Type"] != DepTypeIfchange {
- continue
- }
- dep := m["Target"]
- if dep == "" {
- return ErrMissingTarget
- }
- tracef(CDebug, "depfix: checking %s/%s -> %s", root, entry.Name(), dep)
- theirInode, err := inodeFromRec(m)
- if err != nil {
+ fdDep, err := tempfile(redoDir, entry.Name())
+ if err != nil {
+ return ErrLine(err)
+ }
+ defer os.Remove(fdDep.Name())
+ tracef(
+ CDebug, "depfix: %s/%s: tmp %s",
+ root, entry.Name(), fdDep.Name(),
+ )
+ fdDepW := bufio.NewWriter(fdDep)
+ switch path.Ext(entry.Name()) {
+ case DepSuffix:
+ if _, err = depParse(NewTgt(""), data); err != nil {
return ErrLine(err)
}
- theirHsh := mustHashDecode(m["Hash"])
- fd, err := os.Open(path.Join(root, dep))
+ build, data, err := depHeadParse(data)
if err != nil {
- if errors.Is(err, fs.ErrNotExist) {
- tracef(
- CDebug, "depfix: %s/%s -> %s: not exists",
- root, entry.Name(), dep,
- )
- continue
- }
return ErrLine(err)
}
- inode, _, err := inodeFromFileByFd(fd)
- if err != nil {
- fd.Close()
+ if err = depBuildWrite(fdDepW, build); err != nil {
return ErrLine(err)
}
- if inode.Size != theirInode.Size {
- tracef(
- CDebug, "depfix: %s/%s -> %s: size differs",
- root, entry.Name(), dep,
- )
- fd.Close()
- continue
+ var typ byte
+ var chunk []byte
+ for len(data) > 0 {
+ typ, chunk, data, _ = chunkRead(data)
+ switch typ {
+ case DepTypeAlways:
+ err = always(fdDepW, fdDep.Name())
+ case DepTypeStamp:
+ p := mustAbs(path.Join(root,
+ strings.TrimSuffix(entry.Name(), DepSuffix)))
+ hsh, ok := DepFixHashCache[p]
+ if !ok {
+ var fd *os.File
+ fd, err = os.Open(p)
+ if err != nil {
+ break
+ }
+ hsh, err = fileHash(fd)
+ fd.Close()
+ if err != nil {
+ break
+ }
+ DepFixHashCache[p] = hsh
+ }
+ err = stamp(fdDepW, fdDep.Name(), hsh)
+ case DepTypeIfcreate:
+ err = ifcreate(fdDepW, fdDep.Name(), string(chunk))
+ case DepTypeIfchange:
+ name := string(chunk[InodeLen+HashLen:])
+ p := mustAbs(path.Join(root, name))
+ var fd *os.File
+ fd, err = os.Open(p)
+ if err != nil {
+ break
+ }
+ var inode Inode
+ inode, _, err = inodeFromFileByFd(fd)
+ if err != nil {
+ fd.Close()
+ break
+ }
+ hsh, ok := DepFixHashCache[p]
+ if !ok {
+ hsh, err = fileHash(fd)
+ if err != nil {
+ break
+ }
+ DepFixHashCache[p] = hsh
+ }
+ fd.Close()
+ _, err = io.Copy(fdDepW, bytes.NewBuffer(
+ chunkWrite(bytes.Join([][]byte{
+ {DepTypeIfchange},
+ []byte(inode),
+ []byte(hsh),
+ []byte(name),
+ }, nil))))
+ case DepTypeIfchangeDummy:
+ err = depWriteDummy(fdDepW, fdDep.Name(), string(chunk))
+ }
+ if err != nil {
+ return ErrLine(err)
+ }
}
- if inode.Equals(theirInode) {
- tracef(
- CDebug, "depfix: %s/%s -> %s: inode is equal",
- root, entry.Name(), dep,
- )
- fd.Close()
- continue
+ case ".rec":
+ defer os.Remove(fdDepPath)
+ fdDepPath = fdDepPath[:len(fdDepPath)-4] + DepSuffix
+ r := recfile.NewReader(bytes.NewReader(data))
+ m, err := r.NextMap()
+ if err != nil {
+ return err
}
- hsh, err := fileHash(fd)
- fd.Close()
+ var build uuid.UUID
+ build, err = uuid.Parse(m["Build"])
if err != nil {
- return ErrLine(err)
+ break
}
- if hsh != theirHsh {
- tracef(
- CDebug, "depfix: %s/%s -> %s: hash differs",
- root, entry.Name(), dep,
- )
- continue
+ if err = depBuildWrite(fdDepW, build); err != nil {
+ return ErrLine(err)
}
- fields = []recfile.Field{
- {Name: "Type", Value: DepTypeIfchange},
- {Name: "Target", Value: dep},
- {Name: "Hash", Value: hsh.String()},
+ for {
+ m, err := r.NextMap()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return ErrLine(err)
+ }
+ switch m["Type"] {
+ case "always":
+ err = always(fdDepW, m["Target"])
+ case "stamp":
+ p := mustAbs(path.Join(root,
+ strings.TrimSuffix(entry.Name(), DepSuffix)))
+ hsh, ok := DepFixHashCache[p]
+ if !ok {
+ var fd *os.File
+ fd, err = os.Open(p)
+ if err != nil {
+ break
+ }
+ hsh, err = fileHash(fd)
+ fd.Close()
+ if err != nil {
+ break
+ }
+ DepFixHashCache[p] = hsh
+ }
+ err = stamp(fdDepW, fdDep.Name(), hsh)
+ case "ifcreate":
+ err = ifcreate(fdDepW, fdDep.Name(), m["Target"])
+ case "ifchange":
+ if m["Size"] == "" {
+ err = depWriteDummy(fdDepW, fdDep.Name(), m["Target"])
+ break
+ }
+ name := string(m["Target"])
+ p := mustAbs(path.Join(root, name))
+ var fd *os.File
+ fd, err = os.Open(p)
+ if err != nil {
+ break
+ }
+ var inode Inode
+ inode, _, err = inodeFromFileByFd(fd)
+ if err != nil {
+ fd.Close()
+ break
+ }
+ hsh, ok := DepFixHashCache[p]
+ if !ok {
+ hsh, err = fileHash(fd)
+ if err != nil {
+ break
+ }
+ DepFixHashCache[p] = hsh
+ }
+ fd.Close()
+ _, err = io.Copy(fdDepW, bytes.NewBuffer(
+ chunkWrite(bytes.Join([][]byte{
+ {DepTypeIfchange},
+ []byte(inode),
+ []byte(hsh),
+ []byte(name),
+ }, nil))))
+ }
+ if err != nil {
+ return ErrLine(err)
+ }
}
- fields = append(fields, inode.RecfileFields()...)
- fieldses[len(fieldses)-1] = fields
- tracef(
- CDebug, "depfix: %s/%s -> %s: inode updated",
- root, entry.Name(), dep,
- )
- depChanged = true
- }
- fdDep.Close()
- if !depChanged {
- continue
}
- redoDirChanged = true
- fdDep, err = tempfile(redoDir, entry.Name())
+ err = fdDepW.Flush()
if err != nil {
return ErrLine(err)
}
- defer fdDep.Close()
- tracef(
- CDebug, "depfix: %s/%s: tmp %s",
- root, entry.Name(), fdDep.Name(),
- )
- w := recfile.NewWriter(fdDep)
- if _, err := w.WriteFields(fieldses[0]...); err != nil {
- return ErrLine(err)
- }
- fieldses = fieldses[1:]
- for _, fields := range fieldses {
- if _, err := w.RecordStart(); err != nil {
- return ErrLine(err)
- }
- if _, err := w.WriteFields(fields...); err != nil {
- return ErrLine(err)
- }
- }
if !NoSync {
if err = fdDep.Sync(); err != nil {
return ErrLine(err)
}
tracef(CRedo, "%s", fdDepPath)
}
- }
- if redoDirChanged && !NoSync {
- if err = syncDir(redoDir); err != nil {
- return err
+ if !NoSync {
+ if err = syncDir(redoDir); err != nil {
+ return err
+ }
}
}
return nil
package main
import (
- "os"
+ "io"
"path"
"strings"
)
var TopDir string
-func existsDo(fdDep *os.File, cwd, pth string) (bool, error) {
+func existsDo(w io.Writer, fdDepName, cwd, pth string) (bool, error) {
if FileExists(path.Join(cwd, pth)) {
return true, nil
}
- return false, ifcreate(fdDep, pth)
+ return false, ifcreate(w, fdDepName, pth)
}
-func findDo(fdDep *os.File, cwd, tgt string) (string, int, error) {
+func findDo(w io.Writer, fdDepName, cwd, tgt string) (string, int, error) {
doFile := tgt + ".do"
- exists, err := existsDo(fdDep, cwd, doFile)
+ exists, err := existsDo(w, fdDepName, cwd, doFile)
if err != nil {
return "", 0, err
}
[]string{"default"}, append(exts, "do")...,
), ".")
if len(levels) > 0 || (doFile != doFileOrig && doFile != tgt) {
- exists, err = existsDo(fdDep, cwd, path.Join(updir, doFile))
+ exists, err = existsDo(w, fdDepName, cwd, path.Join(updir, doFile))
if err != nil {
return "", 0, err
}
}
doFile = "default.do"
if len(levels) > 0 || (doFile != doFileOrig && doFile != tgt) {
- exists, err = existsDo(fdDep, cwd, path.Join(updir, doFile))
+ exists, err = existsDo(w, fdDepName, cwd, path.Join(updir, doFile))
if err != nil {
return "", 0, err
}
package main
import (
- "errors"
"fmt"
- "io"
"os"
"path"
-
- "go.cypherpunks.ru/recfile"
)
type DotNodes struct {
}
func dotWalker(data map[DotNodes]bool, tgt *Tgt) (map[DotNodes]bool, error) {
- fdDep, err := os.Open(tgt.Dep())
+ raw, err := os.ReadFile(tgt.dep)
+ if err != nil {
+ return nil, ErrLine(err)
+ }
+ _, raw, err = depHeadParse(raw)
if err != nil {
return nil, ErrLine(err)
}
- defer fdDep.Close()
+ var typ byte
+ var name string
var dep *Tgt
- r := recfile.NewReader(fdDep)
- for {
- m, err := r.NextMap()
+ var chunk []byte
+ tgtH, _ := pathSplit(tgt.a)
+ for len(raw) > 0 {
+ typ, chunk, raw, err = chunkRead(raw)
if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
return nil, ErrLine(err)
}
- switch m["Type"] {
+ switch typ {
case DepTypeIfcreate:
- data[DotNodes{tgt.rel, NewTgt(string(chunk)).rel}] = true
- case DepTypeIfchange:
- dep = NewTgt(path.Join(tgt.h, m["Target"]))
+ data[DotNodes{tgt.rel, NewTgt(path.Join(tgtH, string(chunk))).rel}] = true
+ case DepTypeIfchange, DepTypeIfchangeDummy:
+ if typ == DepTypeIfchangeDummy {
+ name = string(chunk)
+ } else {
+ name = string(chunk[InodeLen+HashLen:])
+ }
+ dep = NewTgt(path.Join(tgtH, name))
if dep.a == tgt.a {
continue
}
go 1.20
require (
+ github.com/google/uuid v1.3.1
go.cypherpunks.ru/recfile v0.7.0
go.cypherpunks.ru/tai64n/v2 v2.0.1
golang.org/x/sys v0.11.0
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
go.cypherpunks.ru/recfile v0.7.0 h1:0R1UnDGKccp7JnC66msslJMlSY02jHx/XkW+ISl0GuY=
level int,
deps map[string]map[string]*Tgt,
includeSrc bool,
- seen map[string]struct{},
) []*Tgt {
- if _, ok := seen[tgt.a]; ok {
+ if _, ok := DepCache[tgt.rel]; ok {
return nil
}
dep, err := depRead(tgt)
if err != nil {
return nil
}
- DepInfoCache[tgt.Dep()] = depInfo
- seen[tgt.a] = struct{}{}
+ DepCache[tgt.rel] = dep
var alwayses []*Tgt
returnReady := false
if dep.always {
deps[ifchange.tgt.rel] = map[string]*Tgt{tgt.rel: tgt}
}
alwayses = append(alwayses,
- collectDeps(dep.tgt, level+1, deps, includeSrc, seen)...)
+ collectDeps(ifchange.tgt, level+1, deps, includeSrc)...)
}
}
return alwayses
tracef(CDebug, "collecting deps")
seen := make(map[string]*Tgt)
deps := make(map[string]map[string]*Tgt)
- {
- collectDepsSeen := make(map[string]struct{})
- for _, tgtInitial := range tgts {
- for _, tgt := range collectDeps(tgtInitial, 0, deps, false, collectDepsSeen) {
- if tgt.a != tgtInitial.a {
- seen[tgt.a] = tgt
- }
+ for _, tgtInitial := range tgts {
+ for _, tgt := range collectDeps(tgtInitial, 0, deps, false) {
+ if tgt.rel != tgtInitial.rel {
+ seen[tgt.rel] = tgt
}
}
- InodeCache = make(map[string][]*Inode)
}
TgtCache = nil
- HashCache = nil
- InodeCache = nil
+ IfchangeCache = nil
if len(seen) == 0 {
return seen
}
package main
import (
- "errors"
+ "encoding/binary"
"os"
"strconv"
"golang.org/x/sys/unix"
)
+const InodeLen = 6 * 8
+
type InodeTrustType int
//go:generate stringer -type=InodeTrustType
var InodeTrust InodeTrustType
-type Inode struct {
- Size int64
- InodeNum uint64
- CtimeSec int64
- CtimeNsec int64
- MtimeSec int64
- MtimeNsec int64
-}
+// It is big-endian 64-bit unsigned integers: size, inodeNum,
+// ctime sec, ctime nsec, mtime sec, mtime nsec.
+type Inode string
-func (our *Inode) Equals(their *Inode) bool {
- if our.Size != their.Size {
- return false
- }
- if our.InodeNum != their.InodeNum {
+func (our Inode) Equals(their Inode) bool {
+ if our[:2*8] != their[:2*8] {
return false
}
switch InodeTrust {
case InodeTrustCtime:
- if our.CtimeSec != their.CtimeSec || our.CtimeNsec != their.CtimeNsec {
+ if our[2*8:4*8] != their[2*8:4*8] {
return false
}
case InodeTrustMtime:
- if our.MtimeSec == 0 || our.MtimeNsec == 0 {
- return false
- }
- if our.MtimeSec != their.MtimeSec || our.MtimeNsec != their.MtimeNsec {
+ if our[4*8:6*8] != their[4*8:6*8] {
return false
}
}
return true
}
-func (inode *Inode) RecfileFields() []recfile.Field {
+func (inode Inode) RecfileFields() []recfile.Field {
return []recfile.Field{
- {Name: "Size", Value: strconv.FormatInt(inode.Size, 10)},
- {Name: "InodeNum", Value: strconv.FormatUint(inode.InodeNum, 10)},
- {Name: "CtimeSec", Value: strconv.FormatInt(inode.CtimeSec, 10)},
- {Name: "CtimeNsec", Value: strconv.FormatInt(inode.CtimeNsec, 10)},
- {Name: "MtimeSec", Value: strconv.FormatInt(inode.MtimeSec, 10)},
- {Name: "MtimeNsec", Value: strconv.FormatInt(inode.MtimeNsec, 10)},
+ {Name: "Size", Value: strconv.FormatUint(binary.BigEndian.Uint64(
+ []byte(inode[0*8:1*8])), 10)},
+ {Name: "InodeNum", Value: strconv.FormatUint(binary.BigEndian.Uint64(
+ []byte(inode[1*8:2*8])), 10)},
+ {Name: "CtimeSec", Value: strconv.FormatUint(binary.BigEndian.Uint64(
+ []byte(inode[2*8:3*8])), 10)},
+ {Name: "CtimeNsec", Value: strconv.FormatUint(binary.BigEndian.Uint64(
+ []byte(inode[3*8:4*8])), 10)},
+ {Name: "MtimeSec", Value: strconv.FormatUint(binary.BigEndian.Uint64(
+ []byte(inode[4*8:5*8])), 10)},
+ {Name: "MtimeNsec", Value: strconv.FormatUint(binary.BigEndian.Uint64(
+ []byte(inode[5*8:6*8])), 10)},
}
}
-func inodeFromFileStat(fi os.FileInfo, stat unix.Stat_t) *Inode {
+func inodeFromFileStat(fi os.FileInfo, stat unix.Stat_t) Inode {
ctimeSec, ctimeNsec := stat.Ctim.Unix()
mtimeSec := fi.ModTime().Unix()
mtimeNsec := fi.ModTime().UnixNano()
- return &Inode{
- Size: fi.Size(),
- InodeNum: uint64(stat.Ino),
- CtimeSec: ctimeSec, CtimeNsec: ctimeNsec,
- MtimeSec: mtimeSec, MtimeNsec: mtimeNsec,
- }
+ buf := make([]byte, InodeLen)
+ binary.BigEndian.PutUint64(buf[0*8:1*8], uint64(fi.Size()))
+ binary.BigEndian.PutUint64(buf[1*8:2*8], uint64(stat.Ino))
+ binary.BigEndian.PutUint64(buf[2*8:3*8], uint64(ctimeSec))
+ binary.BigEndian.PutUint64(buf[3*8:4*8], uint64(ctimeNsec))
+ binary.BigEndian.PutUint64(buf[4*8:5*8], uint64(mtimeSec))
+ binary.BigEndian.PutUint64(buf[5*8:6*8], uint64(mtimeNsec))
+ return Inode(buf)
}
-func inodeFromFileByFd(fd *os.File) (inode *Inode, isDir bool, err error) {
+func inodeFromFileByFd(fd *os.File) (inode Inode, isDir bool, err error) {
fi, err := fd.Stat()
if err != nil {
return
return
}
-func inodeFromFileByPath(p string) (*Inode, error) {
+func inodeFromFileByPath(p string) (Inode, error) {
fi, err := os.Stat(p)
if err != nil {
- return nil, err
+ return "", err
}
var stat unix.Stat_t
err = unix.Stat(p, &stat)
if err != nil {
- return nil, err
+ return "", err
}
return inodeFromFileStat(fi, stat), nil
}
-
-func inodeFromRec(m map[string]string) (*Inode, error) {
- size := m["Size"]
- inodeNum := m["InodeNum"]
- ctimeSec := m["CtimeSec"]
- ctimeNsec := m["CtimeNsec"]
- mtimeSec := m["MtimeSec"]
- mtimeNsec := m["MtimeNsec"]
- if size == "" {
- return nil, errors.New("Size is missing")
- }
- if ctimeSec == "" {
- return nil, errors.New("CtimeSec is missing")
- }
- if ctimeNsec == "" {
- return nil, errors.New("CtimeNsec is missing")
- }
- inode := Inode{}
- var err error
- inode.Size, err = strconv.ParseInt(size, 10, 64)
- if err != nil {
- return nil, err
- }
- if inodeNum != "" {
- inode.InodeNum, err = strconv.ParseUint(inodeNum, 10, 64)
- if err != nil {
- return nil, err
- }
- }
- inode.CtimeSec, err = strconv.ParseInt(ctimeSec, 10, 64)
- if err != nil {
- return nil, err
- }
- inode.CtimeNsec, err = strconv.ParseInt(ctimeNsec, 10, 64)
- if err != nil {
- return nil, err
- }
- if mtimeSec != "" {
- if mtimeNsec == "" {
- return nil, errors.New("MtimeNsec is missing")
- }
- inode.MtimeSec, err = strconv.ParseInt(mtimeSec, 10, 64)
- if err != nil {
- return nil, err
- }
- inode.MtimeNsec, err = strconv.ParseInt(mtimeNsec, 10, 64)
- if err != nil {
- return nil, err
- }
- }
- return &inode, nil
-}
import (
"bufio"
"bytes"
- "crypto/rand"
- "errors"
+ "encoding/hex"
"flag"
"fmt"
"io"
"strconv"
"syscall"
+ "github.com/google/uuid"
"go.cypherpunks.ru/recfile"
"golang.org/x/sys/unix"
)
CmdNameRedoAffects = "redo-affects"
CmdNameRedoAlways = "redo-always"
CmdNameRedoCleanup = "redo-cleanup"
+ CmdNameRedoDep2Rec = "redo-dep2rec"
CmdNameRedoDepFix = "redo-depfix"
CmdNameRedoDot = "redo-dot"
CmdNameRedoIfchange = "redo-ifchange"
var (
Cwd string
- BuildUUID string
+ BuildUUID uuid.UUID
IsTopRedo bool // is it the top redo instance
UmaskCur int
)
CmdNameRedoAffects,
CmdNameRedoAlways,
CmdNameRedoCleanup,
+ CmdNameRedoDep2Rec,
CmdNameRedoDepFix,
CmdNameRedoDot,
CmdNameRedoIfchange,
for _, arg := range flag.Args() {
tgts = append(tgts, NewTgt(arg))
}
- BuildUUID = os.Getenv(EnvBuildUUID)
tgtsWasEmpty := len(tgts) == 0
- if BuildUUID == "" {
+ if BuildUUIDStr := os.Getenv(EnvBuildUUID); BuildUUIDStr == "" {
IsTopRedo = true
- raw := new([16]byte)
- if _, err = io.ReadFull(rand.Reader, raw[:]); err != nil {
- log.Fatal(err)
- }
- raw[6] = (raw[6] & 0x0F) | uint8(4<<4) // version 4
- BuildUUID = fmt.Sprintf(
- "%x-%x-%x-%x-%x",
- raw[0:4], raw[4:6], raw[6:8], raw[8:10], raw[10:],
- )
+ BuildUUID = uuid.New()
if tgtsWasEmpty {
tgts = append(tgts, NewTgt("all"))
}
tracef(CDebug, "inode-trust: %s", InodeTrust)
+ } else {
+ BuildUUID, err = uuid.Parse(BuildUUIDStr)
+ if err != nil {
+ log.Fatal(err)
+ }
}
if cmdName == CmdNameRedo {
tracef(CDebug, "[%s] run: %s %s cwd:%s dirprefix:%s",
BuildUUID, cmdName, tgts, Cwd, DirPrefix)
-CmdSwitch:
switch cmdName {
case CmdNameRedo:
for _, tgt := range tgts {
if fdDep == nil {
log.Fatalln("no", EnvDepFd)
}
+ fdDepW := bufio.NewWriter(fdDep)
for _, tgt := range tgts {
- err = ifcreate(fdDep, tgt.RelTo(path.Join(Cwd, DirPrefix)))
+ err = ifcreate(fdDepW, fdDep.Name(), tgt.RelTo(path.Join(Cwd, DirPrefix)))
if err != nil {
break
}
}
+ err = fdDepW.Flush()
case CmdNameRedoAlways:
if fdDep == nil {
log.Fatalln("no", EnvDepFd)
}
- err = always(fdDep)
+ err = always(fdDep, fdDep.Name())
case CmdNameRedoCleanup:
for _, what := range tgts {
- err = cleanupWalker(Cwd, what.t)
+ err = cleanupWalker(Cwd, path.Base(what.a))
if err != nil {
break
}
if fdDep == nil {
log.Fatalln("no", EnvDepFd)
}
- err = stamp(fdDep, os.Stdin)
+ var hsh Hash
+ hsh, err = fileHash(os.Stdin)
+ if err != nil {
+ break
+ }
+ err = stamp(fdDep, fdDep.Name(), hsh)
case CmdNameRedoLog:
if len(tgts) != 1 {
log.Fatal("single target expected")
if len(tgts) != 1 {
log.Fatal("single target expected")
}
- var fdTmp *os.File
- fdTmp, err = os.CreateTemp("", "whichdo")
+ var dos []string
+ dos, err = whichdo(tgts[0])
if err != nil {
- err = ErrLine(err)
- break
- }
- err = ErrLine(os.Remove(fdTmp.Name()))
- if err != nil {
- break
- }
- tgt := tgts[0]
- var doFile string
- var upLevels int
- doFile, upLevels, err = findDo(fdTmp, tgt.h, tgt.t)
- if err != nil {
- err = ErrLine(err)
- break
- }
- _, err = fdTmp.Seek(0, io.SeekStart)
- if err != nil {
- err = ErrLine(err)
break
}
- r := recfile.NewReader(fdTmp)
- for {
- m, err := r.NextMap()
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
- err = ErrLine(err)
- break CmdSwitch
- }
- fmt.Println(cwdMustRel(tgt.h, m["Target"]))
- }
- if doFile == "" {
- ok = false
- } else {
- p := make([]string, 0, upLevels+2)
- p = append(p, tgt.h)
- for i := 0; i < upLevels; i++ {
- p = append(p, "..")
- }
- p = append(p, doFile)
- rel := mustRel(Cwd, path.Join(p...))
- fmt.Println(rel)
+ sort.Strings(dos)
+ for _, do := range dos {
+ fmt.Println(do)
}
case CmdNameRedoTargets:
raws := make([]string, 0, len(tgts))
}
deps := make(map[string]map[string]*Tgt)
for _, tgt := range tgtsKnown {
- collectDeps(NewTgt(tgt), 0, deps, true, make(map[string]struct{}))
+ collectDeps(NewTgt(tgt), 0, deps, true)
}
seen := make(map[string]*Tgt)
for _, tgt := range tgts {
fmt.Println(src)
}
case CmdNameRedoDepFix:
+ IfchangeCache = nil
+ DepFixHashCache = make(map[string]Hash)
err = depFix(Cwd)
case CmdNameRedoInode:
- var inode *Inode
+ var inode Inode
for _, tgt := range tgts {
inode, err = inodeFromFileByPath(tgt.a)
if err != nil {
break
}
}
+ case CmdNameRedoDep2Rec:
+ var data []byte
+ data, err = os.ReadFile(tgts[0].a)
+ if err != nil {
+ break
+ }
+ var build uuid.UUID
+ build, data, err = depHeadParse(data)
+ if err != nil {
+ break
+ }
+ w := bufio.NewWriter(os.Stdout)
+ err = recfileWrite(w, []recfile.Field{
+ {Name: "Build", Value: build.String()},
+ }...)
+ if err != nil {
+ break
+ }
+ var typ byte
+ var chunk []byte
+ for len(data) > 0 {
+ typ, chunk, data, _ = chunkRead(data)
+ switch typ {
+ case DepTypeAlways:
+ err = recfileWrite(w, []recfile.Field{
+ {Name: "Type", Value: "always"},
+ }...)
+ case DepTypeStamp:
+ err = recfileWrite(w, []recfile.Field{
+ {Name: "Type", Value: "stamp"},
+ {Name: "Hash", Value: hex.EncodeToString(chunk)},
+ }...)
+ case DepTypeIfcreate:
+ err = recfileWrite(w, []recfile.Field{
+ {Name: "Type", Value: "ifcreate"},
+ {Name: "Target", Value: string(chunk)},
+ }...)
+ case DepTypeIfchange:
+ name := string(chunk[InodeLen+HashLen:])
+ meta := string(chunk[:InodeLen+HashLen])
+ fields := []recfile.Field{
+ {Name: "Type", Value: "ifchange"},
+ {Name: "Target", Value: name},
+ }
+ fields = append(fields, recfile.Field{
+ Name: "Hash", Value: Hash(meta[InodeLen:]).String(),
+ })
+ fields = append(fields, Inode(meta[:InodeLen]).RecfileFields()...)
+ err = recfileWrite(w, fields...)
+ case DepTypeIfchangeDummy:
+ err = recfileWrite(w, []recfile.Field{
+ {Name: "Type", Value: "ifchange"},
+ {Name: "Target", Value: string(chunk)},
+ }...)
+ }
+ if err != nil {
+ break
+ }
+ }
+ err = w.Flush()
default:
log.Fatalln("unknown command", cmdName)
}
)
const (
- DepTypeIfcreate = "ifcreate"
- DepTypeIfchange = "ifchange"
- DepTypeAlways = "always"
- DepTypeStamp = "stamp"
-
EnvOODTgtsFd = "REDO_OOD_TGTS_FD"
EnvOODTgtsLockFd = "REDO_OOD_TGTS_LOCK_FD"
)
OODCache = make(map[string]bool)
FileExistsCache = make(map[string]bool)
- DepInfoCache = make(map[string]*DepInfo)
-
- ErrMissingTarget = errors.New("invalid format of .rec: missing Target")
)
func FileExists(p string) bool {
if FileExists(tgt.a + ".do") {
return false
}
- if FileExists(tgt.Dep()) {
+ if FileExists(tgt.dep) {
return false
}
return true
}
func isOODByBuildUUID(tgt *Tgt) bool {
- build, err := depReadBuild(tgt.Dep())
+ build, err := depBuildRead(tgt.dep)
return err != nil || build != BuildUUID
}
tracef(CDebug, "ood: %s%s -> is source", indent, tgt)
} else {
ood = true
- tracef(CDebug, "ood: %s%s -> no dep: %s", indent, tgt, tgt.Dep())
+ tracef(CDebug, "ood: %s%s -> no dep: %s", indent, tgt, tgt.dep)
}
OODCache[tgt.rel] = ood
return ood, nil
return ood, TgtError{tgt, ErrLine(err)}
}
- if inode.Size != dep.inode.Size {
+ if inode[:8] != ifchange.Inode()[:8] {
tracef(CDebug, "ood: %s%s -> %s: size differs", indent, tgt, ifchange.tgt)
ood = true
- OODCache[dep.tgt.rel] = ood
+ OODCache[ifchange.tgt.rel] = ood
goto Done
}
- if InodeTrust != InodeTrustNone && inode.Equals(dep.inode) {
+ if InodeTrust != InodeTrustNone && inode.Equals(ifchange.Inode()) {
tracef(CDebug, "ood: %s%s -> %s: same inode", indent, tgt, ifchange.tgt)
} else {
tracef(CDebug, "ood: %s%s -> %s: inode differs", indent, tgt, ifchange.tgt)
if err != nil {
return ood, TgtError{tgt, ErrLine(err)}
}
- if dep.hash != hsh {
+ if ifchange.Hash() != hsh {
tracef(CDebug, "ood: %s%s -> %s: hash differs", indent, tgt, ifchange.tgt)
ood = true
OODCache[ifchange.tgt.rel] = ood
RedoDir = ".redo"
LockSuffix = ".lock"
- DepSuffix = ".rec"
+ DepSuffix = ".dep"
TmpPrefix = ".redo."
LogSuffix = ".log"
LogRecSuffix = ".log-rec"
}
func isModified(dep *Dep, tgt *Tgt) (
- modified bool, ourInode *Inode, hshPrev Hash, err error,
+ modified bool, ourInode Inode, hshPrev Hash, err error,
) {
if dep == nil {
return
err = ErrLine(err)
return
}
- hshPrev = dep.hash
- modified = !ourInode.Equals(dep.inode)
+ hshPrev = ifchange.Hash()
+ modified = !ourInode.Equals(ifchange.Inode())
break
}
return
}
func runScript(tgt *Tgt, errs chan error, forced, traced bool) error {
- redoDir := path.Join(tgt.h, RedoDir)
+ tgtH, tgtT := pathSplit(tgt.a)
+ redoDir := path.Join(tgtH, RedoDir)
if err := mkdirs(redoDir); err != nil {
return TgtError{tgt, ErrLine(err)}
}
- shCtx := fmt.Sprintf("sh: %s: cwd:%s", tgt, tgt.h)
+ shCtx := fmt.Sprintf("sh: %s: cwd:%s", tgt, tgtH)
jsToken := jsAcquire(shCtx)
jsNeedsRelease := true
defer func() {
// Acquire lock
fdLock, err := os.OpenFile(
- path.Join(redoDir, tgt.t+LockSuffix),
+ path.Join(redoDir, tgtT+LockSuffix),
os.O_WRONLY|os.O_TRUNC|os.O_CREATE,
os.FileMode(0666),
)
log.Fatal(err)
}
}
- build, err := depReadBuild(tgt.Dep())
+ build, err := depBuildRead(tgt.dep)
if err == nil {
if build != BuildUUID {
err = errors.New("was not built: build differs")
}
} else {
if errors.Is(err, fs.ErrNotExist) {
- err = errors.New("was not built: no .rec")
+ err = errors.New("was not built: no .dep")
}
}
if err != nil {
return nil
}
- // It scans the whole .rec file while searching for the single target,
+ // It scans the whole .dep file while searching for the single target,
// but that one is always located at the very end
dep, err := depRead(tgt)
if err != nil {
}
dep = nil
- // Start preparing .rec
- fdDep, err := tempfile(redoDir, tgt.t+DepSuffix)
+ // Start preparing .dep
+ fdDep, err := tempfile(redoDir, tgtT+DepSuffix)
if err != nil {
lockRelease()
return TgtError{tgt, ErrLine(err)}
}
fdDepOpened := true
fdDepExists := true
+ fdDepW := bufio.NewWriter(fdDep)
cleanup := func() {
lockRelease()
if fdDepOpened {
os.Remove(fdDep.Name())
}
}
- if _, err = recfile.NewWriter(fdDep).WriteFields(
- recfile.Field{Name: "Build", Value: BuildUUID},
- ); err != nil {
+ if err = depBuildWrite(fdDepW, BuildUUID); err != nil {
cleanup()
return TgtError{tgt, ErrLine(err)}
}
var cwd string
var dirPrefix string
var doFile *Tgt
- basename := tgt.t
+ var doFileT string
+ basename := tgtT
runErr := RunError{Tgt: tgt}
// Determine basename and DIRPREFIX
{
- doFileRelPath, upLevels, err := findDo(fdDep, tgt.h, tgt.t)
+ doFileRelPath, upLevels, err := findDo(fdDepW, fdDep.Name(), tgtH, tgtT)
if err != nil {
cleanup()
return TgtError{tgt, ErrLine(err)}
cleanup()
return TgtError{tgt, errors.New("no .do found")}
}
- // ents := strings.Split(strings.TrimSuffix(tgt.h, "/"), "/")
- ents := strings.Split(tgt.h, "/")
+ ents := strings.Split(tgtH, "/")
ents = ents[len(ents)-upLevels:]
dirPrefix = path.Join(ents...)
ups := make([]string, 0, upLevels+2)
- ups = append(ups, tgt.h)
+ ups = append(ups, tgtH)
for i := 0; i < upLevels; i++ {
ups = append(ups, "..")
}
ups = append(ups, doFileRelPath)
cwd = path.Clean(path.Join(ups[:len(ups)-1]...))
doFile = NewTgt(path.Join(ups...))
- if strings.HasPrefix(doFile.t, "default.") {
- basename = basename[:len(basename)-(len(doFile.t)-len("default.")-len(".do"))-1]
+ doFileT = path.Base(doFile.a)
+ if strings.HasPrefix(doFileT, "default.") {
+ basename = basename[:len(basename)-(len(doFileT)-len("default.")-len(".do"))-1]
runErr.DoFile = doFile.rel
}
}
- if err = depWrite(fdDep, tgt.h, doFile, ""); err != nil {
+ if err = depWrite(fdDepW, fdDep.Name(), tgtH, doFile, ""); err != nil {
+ cleanup()
+ return TgtError{tgt, ErrLine(err)}
+ }
+ if err = fdDepW.Flush(); err != nil {
cleanup()
return TgtError{tgt, ErrLine(err)}
}
var cmdName string
var args []string
if err = unix.Access(doFile.rel, unix.X_OK); err == nil {
- cmdName = doFile.t
+ cmdName = doFileT
args = make([]string, 0, 3)
} else {
cmdName = "/bin/sh"
} else {
args = append(args, "-e")
}
- args = append(args, doFile.t)
+ args = append(args, doFileT)
}
// Temporary file for stdout
- fdStdout, err := tempfile(tgt.h, tgt.t)
+ fdStdout, err := tempfile(tgtH, tgtT)
if err != nil {
cleanup()
return TgtError{tgt, ErrLine(err)}
tmpPathRel := mustRel(cwd, tmpPath)
args = append(
args,
- path.Join(dirPrefix, tgt.t),
+ path.Join(dirPrefix, tgtT),
path.Join(dirPrefix, basename),
tmpPathRel,
)
var fdStderr *os.File
if StderrKeep {
fdStderr, err = os.OpenFile(
- path.Join(redoDir, tgt.t+LogSuffix),
+ path.Join(redoDir, tgtT+LogSuffix),
os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
os.FileMode(0666),
)
return
}
fdDepOpened = true
+ fdDepW.Reset(fdDep)
cmd.ExtraFiles = append(cmd.ExtraFiles, fdDep)
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%d", EnvDepFd, 3+fdNum))
fdNum++
fdStdout.Close()
if fdStderr != nil {
fdStderr.Close()
- logRecPath := path.Join(redoDir, tgt.t+LogRecSuffix)
+ logRecPath := path.Join(redoDir, tgtT+LogRecSuffix)
if fdStderr, err = os.OpenFile(
logRecPath,
os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
os.FileMode(0666),
); err == nil {
fields := []recfile.Field{
- {Name: "Build", Value: BuildUUID},
+ {Name: "Build", Value: BuildUUID.String()},
{Name: "PPID", Value: strconv.Itoa(os.Getpid())},
{Name: "Cwd", Value: cwd},
}
w := bufio.NewWriter(fdStderr)
{
var ifchanges []string
- ifchanges, err = depReadOnlyIfchanges(tgt.Dep())
+ ifchanges, err = depReadOnlyIfchanges(tgt.dep)
if err != nil {
err = ErrLine(err)
goto Err
// Was $1 touched?
if inode, err := inodeFromFileByPath(tgt.a); err == nil {
- if inodePrev == nil {
+ if inodePrev == "" {
runErr.Err = Err1WasTouched
errs <- runErr
return
// Do we need to ifcreate it, or ifchange with renaming?
if fd == nil {
os.Remove(tgt.a)
- err = ifcreate(fdDep, tgt.t)
+ err = ifcreate(fdDepW, fdDep.Name(), tgtT)
if err != nil {
err = ErrLine(err)
goto Finish
goto Finish
}
if !NoSync {
- err = ErrLine(syncDir(tgt.h))
+ err = ErrLine(syncDir(tgtH))
if err != nil {
goto Finish
}
}
- err = ErrLine(depWrite(fdDep, tgt.h, tgt, hsh))
+ err = ErrLine(depWrite(fdDepW, fdDep.Name(), tgtH, tgt, hsh))
if err != nil {
goto Finish
}
goto Finish
}
if !NoSync {
- err = ErrLine(syncDir(tgt.h))
+ err = ErrLine(syncDir(tgtH))
if err != nil {
goto Finish
}
}
- err = ErrLine(depWrite(fdDep, tgt.h, tgt, hsh))
+ err = ErrLine(depWrite(fdDepW, fdDep.Name(), tgtH, tgt, hsh))
if err != nil {
goto Finish
}
}
RecCommit:
- // Commit .rec
+ // Commit .dep
+ err = ErrLine(fdDepW.Flush())
+ if err != nil {
+ goto Finish
+ }
if !NoSync {
err = ErrLine(fdDep.Sync())
if err != nil {
goto Finish
}
}
- err = ErrLine(os.Rename(fdDep.Name(), tgt.Dep()))
+ err = ErrLine(os.Rename(fdDep.Name(), tgt.dep))
if err != nil {
goto Finish
}
fdDep.Close()
fdDepOpened = false
- // Post-commit .rec sanitizing
+ // Post-commit .dep sanitizing
dep, err = depRead(tgt)
if err == nil {
ifchangeSeen := make(map[string]struct{}, len(dep.ifchanges))
stat2=`stat sub/index.html`
test_expect_success "Was not rebuild" '[ "$stat1" = "$stat2" ]'
-tgts=`sed -n "s/^Target: //p" sub/.redo/index.html.rec | sort`
+tgts=`redo-dep2rec sub/.redo/index.html.dep | sed -n "s/^Target: //p" | sort`
tgts=`echo $tgts`
tgts_expected="../default.html.do" # .do itself
tgts_expected="$tgts_expected default.do" # ifcreate
return mustRel(Cwd, path.Join(paths...))
}
+func pathSplit(a string) (h, t string) {
+ h, t = path.Split(a)
+ if len(h) > 1 {
+ h = h[:len(h)-1]
+ }
+ return
+}
+
type Tgt struct {
- // a/h/t resemble zsh'es :a, :h, :t modifiers
a string // absolute path
- h string // head of the path, directory
- t string // tail of the path, only name
rel string // relative to Cwd
dep string // path to dependency file
}
return t
}
}
- t := Tgt{a: a}
- t.h, t.t = path.Split(t.a)
- if len(t.h) > 1 {
- t.h = t.h[:len(t.h)-1]
+ h, t := pathSplit(a)
+ res := Tgt{
+ a: a,
+ rel: mustRel(Cwd, a),
+ dep: path.Join(h, RedoDir, t+DepSuffix),
}
- t.rel = mustRel(Cwd, t.a)
if TgtCache != nil {
- TgtCache[a] = &t
+ TgtCache[a] = &res
}
- return &t
+ return &res
}
func (tgt *Tgt) String() string {
return tgt.rel
}
-func (tgt *Tgt) Dep() string {
- if tgt.dep == "" {
- tgt.dep = path.Join(tgt.h, RedoDir, tgt.t+DepSuffix)
- }
- return tgt.dep
-}
-
func (tgt *Tgt) RelTo(cwd string) string {
return mustRel(cwd, tgt.a)
}
)
const (
- Version = "1.33.0"
+ Version = "2.0.0"
Warranty = `Copyright (C) 2020-2023 Sergey Matveev
This program is free software: you can redistribute it and/or modify
case CmdNameRedoDepFix:
d = `Usage: redo-depfix
-Traverse over all .redo directories beneath and check if inode's information
-(ctime/mtime) differs. Update dependency if file's content is still the same.`
+Traverse over all .redo directories beneath and recalculate each target's
+inode and hash information, rewriting dependency files. If dependency has
+legacy .rec format, then it will be converted to .dep one.`
case CmdNameRedoInode:
d = `Usage: redo-inode target [...]
-Just calcuate inode information about each target and print in recfile format.`
+Just calculate inode information about each target and print in recfile format.`
default:
d = `Usage: goredo -symlinks
goredo expects to be called through the symbolic link to it.
Available commands: redo, redo-affects, redo-always, redo-cleanup,
-redo-depfix, redo-dot, redo-ifchange, redo-ifcreate, redo-log,
-redo-ood, redo-sources, redo-stamp, redo-targets, redo-whichdo.`
+redo-dep2rec, redo-depfix, redo-dot, redo-ifchange, redo-ifcreate,
+redo-inode, redo-log, redo-ood, redo-sources, redo-stamp,
+redo-targets, redo-whichdo.`
}
fmt.Fprintf(os.Stderr, "%s\n\nCommon options:\n", d)
flag.PrintDefaults()
--- /dev/null
+package main
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path"
+)
+
+func whichdo(tgt *Tgt) (dos []string, err error) {
+ var fdTmp *os.File
+ fdTmp, err = os.CreateTemp("", "whichdo")
+ if err != nil {
+ return
+ }
+ err = os.Remove(fdTmp.Name())
+ if err != nil {
+ return
+ }
+ err = depBuildWrite(fdTmp, BuildUUID)
+ if err != nil {
+ return
+ }
+ var doFile string
+ var upLevels int
+ tgtH, tgtT := path.Split(tgt.a)
+ doFile, upLevels, err = findDo(fdTmp, fdTmp.Name(), tgtH, tgtT)
+ if err != nil {
+ return
+ }
+ _, err = fdTmp.Seek(0, io.SeekStart)
+ if err != nil {
+ return
+ }
+ var data []byte
+ data, err = io.ReadAll(fdTmp)
+ if err != nil {
+ return
+ }
+ {
+ var dep *Dep
+ dep, err = depParse(tgt, data)
+ if err != nil {
+ return
+ }
+ for _, ifcreate := range dep.ifcreates {
+ dos = append(dos, ifcreate.RelTo(tgtH))
+ }
+ for _, ifchange := range dep.ifchanges {
+ dos = append(dos, ifchange.tgt.RelTo(tgtH))
+ }
+ }
+ if doFile == "" {
+ err = errors.New("no .do found")
+ return
+ }
+ p := make([]string, 0, upLevels+2)
+ p = append(p, tgtH)
+ for i := 0; i < upLevels; i++ {
+ p = append(p, "..")
+ }
+ p = append(p, doFile)
+ dos = append(dos, mustRel(Cwd, path.Join(p...)))
+ return
+}