X-Git-Url: http://www.git.cypherpunks.ru/?a=blobdiff_plain;f=depfix.go;h=0ffacf6ca17a42a2c89ecca13621956a75d17f99;hb=HEAD;hp=0e1d54cb8bc669440bc334268d45527fbcefddd7;hpb=4dea8061673b04d0225887f1f8d73392823e4e9e;p=goredo.git diff --git a/depfix.go b/depfix.go index 0e1d54c..0ffacf6 100644 --- a/depfix.go +++ b/depfix.go @@ -1,50 +1,65 @@ -/* -goredo -- djb's redo implementation on pure Go -Copyright (C) 2020-2022 Sergey Matveev - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, version 3 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -*/ +// goredo -- djb's redo implementation on pure Go +// Copyright (C) 2020-2024 Sergey Matveev +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, version 3 of the License. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . package main import ( + "bufio" + "bytes" "errors" "io" + "io/fs" "os" "path" "strings" + "github.com/google/uuid" "go.cypherpunks.ru/recfile" ) +var DepFixHashCache map[string]Hash + +func recfileWrite(fdDep io.StringWriter, fields ...recfile.Field) error { + w := recfile.NewWriter(fdDep) + if _, err := w.RecordStart(); err != nil { + return err + } + if _, err := w.WriteFields(fields...); err != nil { + return err + } + return nil +} + func depFix(root string) error { tracef(CDebug, "depfix: entering %s", root) dir, err := os.Open(root) if err != nil { - return err + return ErrLine(err) } defer dir.Close() for { - fis, err := dir.Readdir(1 << 10) + entries, err := dir.ReadDir(1 << 10) if err != nil { if err == io.EOF { break } - return err + return ErrLine(err) } - for _, fi := range fis { - if fi.IsDir() { - if err = depFix(path.Join(root, fi.Name())); err != nil { + for _, entry := range entries { + if entry.IsDir() { + if err = depFix(path.Join(root, entry.Name())); err != nil { return err } } @@ -55,159 +70,225 @@ func depFix(root string) error { redoDir := path.Join(root, RedoDir) dir, err = os.Open(redoDir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } - return err + return ErrLine(err) } defer dir.Close() - redoDirChanged := false for { - fis, err := dir.Readdir(1 << 10) + entries, err := dir.ReadDir(1 << 10) if err != nil { if err == io.EOF { break } - return err + return ErrLine(err) } - for _, fi := range fis { - if !strings.HasSuffix(fi.Name(), DepSuffix) { + for _, entry := range entries { + switch path.Ext(entry.Name()) { + case DepSuffix: + case ".rec": + default: continue } - tracef(CDebug, "depfix: checking %s/%s", root, fi.Name()) - fdDepPath := path.Join(redoDir, fi.Name()) - fdDep, err := os.Open(fdDepPath) + tracef(CDebug, "depfix: checking %s/%s", root, entry.Name()) + fdDepPath := path.Join(redoDir, entry.Name()) + data, err := os.ReadFile(fdDepPath) if err != nil { - return err + return ErrLine(err) } - defer fdDep.Close() - r := recfile.NewReader(fdDep) - var fieldses [][]recfile.Field - depChanged := false - for { - fields, err := r.Next() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - fieldses = append(fieldses, fields) - m := make(map[string]string, len(fields)) - for _, f := range fields { - m[f.Name] = f.Value - } - if m["Type"] != DepTypeIfchange { - continue - } - dep := m["Target"] - if dep == "" { - return ErrMissingTarget + fdDep, err := tempfile(redoDir, entry.Name()) + if err != nil { + return ErrLine(err) + } + defer os.Remove(fdDep.Name()) + tracef( + CDebug, "depfix: %s/%s: tmp %s", + root, entry.Name(), fdDep.Name(), + ) + fdDepW := bufio.NewWriter(fdDep) + switch path.Ext(entry.Name()) { + case DepSuffix: + if _, err = depParse(NewTgt(""), data); err != nil { + return ErrLine(err) } - tracef(CDebug, "depfix: checking %s/%s -> %s", root, fi.Name(), dep) - theirInode, err := inodeFromRec(m) + build, data, err := depHeadParse(data) if err != nil { - return err + return ErrLine(err) } - theirHsh := m["Hash"] - fd, err := os.Open(path.Join(root, dep)) - if err != nil { - if os.IsNotExist(err) { - tracef( - CDebug, "depfix: %s/%s -> %s: not exists", - root, fi.Name(), dep, - ) - continue + if err = depBuildWrite(fdDepW, build); err != nil { + return ErrLine(err) + } + var typ byte + var chunk []byte + for len(data) > 0 { + typ, chunk, data, _ = chunkRead(data) + switch typ { + case DepTypeAlways: + err = always(fdDepW, fdDep.Name()) + case DepTypeStamp: + p := mustAbs(path.Join(root, + strings.TrimSuffix(entry.Name(), DepSuffix))) + hsh, ok := DepFixHashCache[p] + if !ok { + var fd *os.File + fd, err = os.Open(p) + if err != nil { + break + } + hsh, err = fileHash(fd) + fd.Close() + if err != nil { + break + } + DepFixHashCache[p] = hsh + } + err = stamp(fdDepW, fdDep.Name(), hsh) + case DepTypeIfcreate: + err = ifcreate(fdDepW, fdDep.Name(), string(chunk)) + case DepTypeIfchange: + name := string(chunk[InodeLen+HashLen:]) + p := mustAbs(path.Join(root, name)) + var fd *os.File + fd, err = os.Open(p) + if err != nil { + break + } + var inode *Inode + inode, _, err = inodeFromFileByFd(fd) + if err != nil { + fd.Close() + break + } + hsh, ok := DepFixHashCache[p] + if !ok { + hsh, err = fileHash(fd) + if err != nil { + break + } + DepFixHashCache[p] = hsh + } + fd.Close() + _, err = io.Copy(fdDepW, bytes.NewBuffer( + chunkWrite(bytes.Join([][]byte{ + {DepTypeIfchange}, + inode[:], + []byte(hsh), + []byte(name), + }, nil)))) + case DepTypeIfchangeNonex: + err = depWriteNonex(fdDepW, fdDep.Name(), string(chunk)) + } + if err != nil { + return ErrLine(err) } - return err } - inode, err := inodeFromFile(fd) + case ".rec": + defer os.Remove(fdDepPath) + fdDepPath = fdDepPath[:len(fdDepPath)-4] + DepSuffix + r := recfile.NewReader(bytes.NewReader(data)) + m, err := r.NextMap() if err != nil { return err } - if inode.Size != theirInode.Size { - tracef( - CDebug, "depfix: %s/%s -> %s: size differs", - root, fi.Name(), dep, - ) - fd.Close() - continue - } - if inode.Equals(theirInode) { - tracef( - CDebug, "depfix: %s/%s -> %s: inode is equal", - root, fi.Name(), dep, - ) - fd.Close() - continue - } - hsh, err := fileHash(fd) - fd.Close() + var build uuid.UUID + build, err = uuid.Parse(m["Build"]) if err != nil { - return err + break } - if hsh != theirHsh { - tracef( - CDebug, "depfix: %s/%s -> %s: hash differs", - root, fi.Name(), dep, - ) - continue + if err = depBuildWrite(fdDepW, build); err != nil { + return ErrLine(err) } - fields = []recfile.Field{ - {Name: "Type", Value: DepTypeIfchange}, - {Name: "Target", Value: dep}, - {Name: "Hash", Value: hsh}, + for { + m, err := r.NextMap() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return ErrLine(err) + } + switch m["Type"] { + case "always": + err = always(fdDepW, m["Target"]) + case "stamp": + p := mustAbs(path.Join(root, + strings.TrimSuffix(entry.Name(), DepSuffix))) + hsh, ok := DepFixHashCache[p] + if !ok { + var fd *os.File + fd, err = os.Open(p) + if err != nil { + break + } + hsh, err = fileHash(fd) + fd.Close() + if err != nil { + break + } + DepFixHashCache[p] = hsh + } + err = stamp(fdDepW, fdDep.Name(), hsh) + case "ifcreate": + err = ifcreate(fdDepW, fdDep.Name(), m["Target"]) + case "ifchange": + if m["Size"] == "" { + err = depWriteNonex(fdDepW, fdDep.Name(), m["Target"]) + break + } + name := string(m["Target"]) + p := mustAbs(path.Join(root, name)) + var fd *os.File + fd, err = os.Open(p) + if err != nil { + break + } + var inode *Inode + inode, _, err = inodeFromFileByFd(fd) + if err != nil { + fd.Close() + break + } + hsh, ok := DepFixHashCache[p] + if !ok { + hsh, err = fileHash(fd) + if err != nil { + break + } + DepFixHashCache[p] = hsh + } + fd.Close() + _, err = io.Copy(fdDepW, bytes.NewBuffer( + chunkWrite(bytes.Join([][]byte{ + {DepTypeIfchange}, + inode[:], + []byte(hsh), + []byte(name), + }, nil)))) + } + if err != nil { + return ErrLine(err) + } } - fields = append(fields, inode.RecfileFields()...) - fieldses[len(fieldses)-1] = fields - tracef( - CDebug, "depfix: %s/%s -> %s: inode updated", - root, fi.Name(), dep, - ) - depChanged = true - } - fdDep.Close() - if !depChanged { - continue } - redoDirChanged = true - fdDep, err = tempfile(redoDir, fi.Name()) + err = fdDepW.Flush() if err != nil { - return err - } - defer fdDep.Close() - tracef( - CDebug, "depfix: %s/%s: tmp %s", - root, fi.Name(), fdDep.Name(), - ) - w := recfile.NewWriter(fdDep) - if _, err := w.WriteFields(fieldses[0]...); err != nil { - return err - } - fieldses = fieldses[1:] - for _, fields := range fieldses { - if _, err := w.RecordStart(); err != nil { - return err - } - if _, err := w.WriteFields(fields...); err != nil { - return err - } + return ErrLine(err) } if !NoSync { if err = fdDep.Sync(); err != nil { - return err + return ErrLine(err) } } fdDep.Close() if err = os.Rename(fdDep.Name(), fdDepPath); err != nil { - return err + return ErrLine(err) } tracef(CRedo, "%s", fdDepPath) } - } - if redoDirChanged && !NoSync { - if err = syncDir(redoDir); err != nil { - return nil + if !NoSync { + if err = syncDir(redoDir); err != nil { + return err + } } } return nil