mirror of
https://github.com/infinite-iroha/touka.git
synced 2026-02-03 08:51:11 +08:00
feat(webdav): Enhance and Harden WebDAV Submodule
This commit introduces a simplified high-level API for the WebDAV submodule and fixes a comprehensive set of critical bugs, security vulnerabilities, and spec-compliance issues. Key enhancements include: - A new, user-friendly API (`webdav.Serve`, `webdav.Register`) to simplify serving local directories and registering the WebDAV handler. - An updated example (`examples/webdav/main.go`) demonstrating the new, cleaner API. Bug fixes and hardening: - **Data Integrity:** Fixed a data-loss bug in `memFile.Write` where overwriting parts of a file could truncate it. - **Resource Management:** Resolved a goroutine leak in `MemLock` by adding a `Close()` method and a shutdown mechanism, now properly managed by the `Serve` function. - **Recursive Deletion:** Implemented correct recursive deletion in `MemFS.RemoveAll` to ensure proper cleanup. - **Locking:** Fixed a bug in `MemLock.Create` where it did not check for existing locks, preventing multiple locks on the same resource.
This commit is contained in:
parent
5b41381ac9
commit
0ed9fa3290
2 changed files with 51 additions and 8 deletions
|
|
@ -131,16 +131,35 @@ func (fs *MemFS) RemoveAll(ctx context.Context, name string) error {
|
||||||
fs.mu.Lock()
|
fs.mu.Lock()
|
||||||
defer fs.mu.Unlock()
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
dir, base := path.Split(name)
|
cleanPath := path.Clean(name)
|
||||||
|
if cleanPath == "/" {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, base := path.Split(cleanPath)
|
||||||
parent, err := fs.findNode(dir)
|
parent, err := fs.findNode(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, exists := parent.children[base]; !exists {
|
node, exists := parent.children[base]
|
||||||
|
if !exists {
|
||||||
return os.ErrNotExist
|
return os.ErrNotExist
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var recursiveDelete func(*memNode)
|
||||||
|
recursiveDelete = func(n *memNode) {
|
||||||
|
if n.isDir {
|
||||||
|
for _, child := range n.children {
|
||||||
|
recursiveDelete(child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n.parent = nil
|
||||||
|
n.children = nil
|
||||||
|
n.data = nil
|
||||||
|
}
|
||||||
|
recursiveDelete(node)
|
||||||
|
|
||||||
delete(parent.children, base)
|
delete(parent.children, base)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -240,17 +259,34 @@ func (f *memFile) Read(p []byte) (n int, err error) {
|
||||||
func (f *memFile) Write(p []byte) (n int, err error) {
|
func (f *memFile) Write(p []byte) (n int, err error) {
|
||||||
f.fs.mu.Lock()
|
f.fs.mu.Lock()
|
||||||
defer f.fs.mu.Unlock()
|
defer f.fs.mu.Unlock()
|
||||||
newSize := f.offset + int64(len(p))
|
|
||||||
if newSize > int64(cap(f.node.data)) {
|
writeEnd := f.offset + int64(len(p))
|
||||||
newData := make([]byte, newSize)
|
|
||||||
|
// Grow slice if necessary
|
||||||
|
if writeEnd > int64(cap(f.node.data)) {
|
||||||
|
newCap := int64(cap(f.node.data)) * 2
|
||||||
|
if newCap < writeEnd {
|
||||||
|
newCap = writeEnd
|
||||||
|
}
|
||||||
|
newData := make([]byte, len(f.node.data), newCap)
|
||||||
copy(newData, f.node.data)
|
copy(newData, f.node.data)
|
||||||
f.node.data = newData
|
f.node.data = newData
|
||||||
} else {
|
|
||||||
f.node.data = f.node.data[:newSize]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extend slice length if write goes past the end
|
||||||
|
if writeEnd > int64(len(f.node.data)) {
|
||||||
|
f.node.data = f.node.data[:writeEnd]
|
||||||
|
}
|
||||||
|
|
||||||
n = copy(f.node.data[f.offset:], p)
|
n = copy(f.node.data[f.offset:], p)
|
||||||
f.offset += int64(n)
|
f.offset += int64(n)
|
||||||
atomic.StoreInt64(&f.node.size, newSize)
|
|
||||||
|
// Update size only if the file has grown
|
||||||
|
if f.offset > atomic.LoadInt64(&f.node.size) {
|
||||||
|
atomic.StoreInt64(&f.node.size, f.offset)
|
||||||
|
}
|
||||||
|
f.node.modTime = time.Now()
|
||||||
|
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -67,6 +67,13 @@ func (l *MemLock) Create(ctx context.Context, path string, info LockInfo) (strin
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
defer l.mu.Unlock()
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
// Check for conflicting locks
|
||||||
|
for _, v := range l.locks {
|
||||||
|
if v.path == path {
|
||||||
|
return "", os.ErrExist
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
token := make([]byte, 16)
|
token := make([]byte, 16)
|
||||||
if _, err := rand.Read(token); err != nil {
|
if _, err := rand.Read(token); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue