]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/frontends/magic_folder.py
52b305c35b705ee6376786f8f03edb267fd38caf
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / frontends / magic_folder.py
1
2 import sys, os
3 import os.path
4 from collections import deque
5 import time
6
7 from twisted.internet import defer, reactor, task
8 from twisted.python.failure import Failure
9 from twisted.python import runtime
10 from twisted.application import service
11
12 from allmydata.util import fileutil
13 from allmydata.interfaces import IDirectoryNode
14 from allmydata.util import log
15 from allmydata.util.fileutil import precondition_abspath, get_pathinfo
16 from allmydata.util.assertutil import precondition
17 from allmydata.util.deferredutil import HookMixin
18 from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
19      extend_filepath, unicode_from_filepath, unicode_segments_from, \
20      quote_filepath, quote_local_unicode_path, quote_output, FilenameEncodingError
21 from allmydata.immutable.upload import FileName, Data
22 from allmydata import magicfolderdb, magicpath
23
24
25 IN_EXCL_UNLINK = 0x04000000L
26
27 def get_inotify_module():
28     try:
29         if sys.platform == "win32":
30             from allmydata.windows import inotify
31         elif runtime.platform.supportsINotify():
32             from twisted.internet import inotify
33         else:
34             raise NotImplementedError("filesystem notification needed for Magic Folder is not supported.\n"
35                                       "This currently requires Linux or Windows.")
36         return inotify
37     except (ImportError, AttributeError) as e:
38         log.msg(e)
39         if sys.platform == "win32":
40             raise NotImplementedError("filesystem notification needed for Magic Folder is not supported.\n"
41                                       "Windows support requires at least Vista, and has only been tested on Windows 7.")
42         raise
43
44
45 class MagicFolder(service.MultiService):
46     name = 'magic-folder'
47
48     def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile,
49                  pending_delay=1.0, clock=reactor):
50         precondition_abspath(local_path_u)
51
52         service.MultiService.__init__(self)
53
54         db = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1))
55         if db is None:
56             return Failure(Exception('ERROR: Unable to load magic folder db.'))
57
58         # for tests
59         self._client = client
60         self._db = db
61
62         self.is_ready = False
63
64         self.uploader = Uploader(client, local_path_u, db, upload_dircap, pending_delay, clock)
65         self.downloader = Downloader(client, local_path_u, db, collective_dircap, clock)
66
67     def startService(self):
68         # TODO: why is this being called more than once?
69         if self.running:
70             return defer.succeed(None)
71         print "%r.startService" % (self,)
72         service.MultiService.startService(self)
73         return self.uploader.start_monitoring()
74
75     def ready(self):
76         """ready is used to signal us to start
77         processing the upload and download items...
78         """
79         self.is_ready = True
80         d = self.uploader.start_scanning()
81         d2 = self.downloader.start_scanning()
82         d.addCallback(lambda ign: d2)
83         return d
84
85     def finish(self):
86         print "finish"
87         d = self.uploader.stop()
88         d2 = self.downloader.stop()
89         d.addCallback(lambda ign: d2)
90         return d
91
92     def remove_service(self):
93         return service.MultiService.disownServiceParent(self)
94
95
96 class QueueMixin(HookMixin):
97     def __init__(self, client, local_path_u, db, name, clock):
98         self._client = client
99         self._local_path_u = local_path_u
100         self._local_filepath = to_filepath(local_path_u)
101         self._db = db
102         self._name = name
103         self._clock = clock
104         self._hooks = {'processed': None, 'started': None}
105         self.started_d = self.set_hook('started')
106
107         if not self._local_filepath.exists():
108             raise AssertionError("The '[magic_folder] local.directory' parameter was %s "
109                                  "but there is no directory at that location."
110                                  % quote_local_unicode_path(self._local_path_u))
111         if not self._local_filepath.isdir():
112             raise AssertionError("The '[magic_folder] local.directory' parameter was %s "
113                                  "but the thing at that location is not a directory."
114                                  % quote_local_unicode_path(self._local_path_u))
115
116         self._deque = deque()
117         self._lazy_tail = defer.succeed(None)
118         self._pending = set()
119         self._stopped = False
120         self._turn_delay = 0
121
122     def _get_filepath(self, relpath_u):
123         return extend_filepath(self._local_filepath, relpath_u.split(u"/"))
124
125     def _get_relpath(self, filepath):
126         print "_get_relpath(%r)" % (filepath,)
127         segments = unicode_segments_from(filepath, self._local_filepath)
128         print "segments = %r" % (segments,)
129         return u"/".join(segments)
130
131     def _count(self, counter_name, delta=1):
132         ctr = 'magic_folder.%s.%s' % (self._name, counter_name)
133         print "%r += %r" % (ctr, delta)
134         self._client.stats_provider.count(ctr, delta)
135
136     def _log(self, msg):
137         s = "Magic Folder %s %s: %s" % (quote_output(self._client.nickname), self._name, msg)
138         self._client.log(s)
139         print s
140         #open("events", "ab+").write(msg)
141
142     def _append_to_deque(self, relpath_u):
143         print "_append_to_deque(%r)" % (relpath_u,)
144         if relpath_u in self._pending or magicpath.should_ignore_file(relpath_u):
145             return
146         self._deque.append(relpath_u)
147         self._pending.add(relpath_u)
148         self._count('objects_queued')
149         if self.is_ready:
150             self._clock.callLater(0, self._turn_deque)
151
152     def _turn_deque(self):
153         if self._stopped:
154             return
155         try:
156             item = self._deque.pop()
157             self._count('objects_queued', -1)
158         except IndexError:
159             self._log("deque is now empty")
160             self._lazy_tail.addCallback(lambda ign: self._when_queue_is_empty())
161         else:
162             self._lazy_tail.addCallback(lambda ign: self._process(item))
163             self._lazy_tail.addBoth(self._call_hook, 'processed')
164             self._lazy_tail.addErrback(log.err)
165             self._lazy_tail.addCallback(lambda ign: task.deferLater(self._clock, self._turn_delay, self._turn_deque))
166
167
168 class Uploader(QueueMixin):
169     def __init__(self, client, local_path_u, db, upload_dircap, pending_delay, clock):
170         QueueMixin.__init__(self, client, local_path_u, db, 'uploader', clock)
171
172         self.is_ready = False
173
174         # TODO: allow a path rather than a cap URI.
175         self._upload_dirnode = self._client.create_node_from_uri(upload_dircap)
176         if not IDirectoryNode.providedBy(self._upload_dirnode):
177             raise AssertionError("The URI in 'private/magic_folder_dircap' does not refer to a directory.")
178         if self._upload_dirnode.is_unknown() or self._upload_dirnode.is_readonly():
179             raise AssertionError("The URI in 'private/magic_folder_dircap' is not a writecap to a directory.")
180
181         self._inotify = get_inotify_module()
182         self._notifier = self._inotify.INotify()
183
184         if hasattr(self._notifier, 'set_pending_delay'):
185             self._notifier.set_pending_delay(pending_delay)
186
187         # We don't watch for IN_CREATE, because that would cause us to read and upload a
188         # possibly-incomplete file before the application has closed it. There should always
189         # be an IN_CLOSE_WRITE after an IN_CREATE (I think).
190         # TODO: what about IN_MOVE_SELF, IN_MOVED_FROM, or IN_UNMOUNT?
191         #
192         self.mask = ( self._inotify.IN_CLOSE_WRITE
193                     | self._inotify.IN_MOVED_TO
194                     | self._inotify.IN_MOVED_FROM
195                     | self._inotify.IN_DELETE
196                     | self._inotify.IN_ONLYDIR
197                     | IN_EXCL_UNLINK
198                     )
199         self._notifier.watch(self._local_filepath, mask=self.mask, callbacks=[self._notify],
200                              recursive=True)
201
202     def start_monitoring(self):
203         self._log("start_monitoring")
204         d = defer.succeed(None)
205         d.addCallback(lambda ign: self._notifier.startReading())
206         d.addCallback(lambda ign: self._count('dirs_monitored'))
207         d.addBoth(self._call_hook, 'started')
208         return d
209
210     def stop(self):
211         self._log("stop")
212         self._notifier.stopReading()
213         self._count('dirs_monitored', -1)
214         if hasattr(self._notifier, 'wait_until_stopped'):
215             d = self._notifier.wait_until_stopped()
216         else:
217             d = defer.succeed(None)
218         d.addCallback(lambda ign: self._lazy_tail)
219         return d
220
221     def start_scanning(self):
222         self._log("start_scanning")
223         self.is_ready = True
224         self._pending = self._db.get_all_relpaths()
225         print "all_files %r" % (self._pending)
226         d = self._scan(u"")
227         def _add_pending(ign):
228             # This adds all of the files that were in the db but not already processed
229             # (normally because they have been deleted on disk).
230             print "adding %r" % (self._pending)
231             self._deque.extend(self._pending)
232         d.addCallback(_add_pending)
233         d.addCallback(lambda ign: self._turn_deque())
234         return d
235
236     def _scan(self, reldir_u):
237         self._log("scan %r" % (reldir_u,))
238         fp = self._get_filepath(reldir_u)
239         try:
240             children = listdir_filepath(fp)
241         except EnvironmentError:
242             raise Exception("WARNING: magic folder: permission denied on directory %s"
243                             % quote_filepath(fp))
244         except FilenameEncodingError:
245             raise Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error"
246                             % quote_filepath(fp))
247
248         d = defer.succeed(None)
249         for child in children:
250             assert isinstance(child, unicode), child
251             d.addCallback(lambda ign, child=child:
252                           ("%s/%s" % (reldir_u, child) if reldir_u else child))
253             def _add_pending(relpath_u):
254                 if magicpath.should_ignore_file(relpath_u):
255                     return None
256
257                 self._pending.add(relpath_u)
258                 return relpath_u
259             d.addCallback(_add_pending)
260             # This call to _process doesn't go through the deque, and probably should.
261             d.addCallback(self._process)
262             d.addBoth(self._call_hook, 'processed')
263             d.addErrback(log.err)
264
265         return d
266
267     def _notify(self, opaque, path, events_mask):
268         self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
269         relpath_u = self._get_relpath(path)
270         self._append_to_deque(relpath_u)
271
272     def _when_queue_is_empty(self):
273         return defer.succeed(None)
274
275     def _process(self, relpath_u):
276         self._log("_process(%r)" % (relpath_u,))
277         if relpath_u is None:
278             return
279         precondition(isinstance(relpath_u, unicode), relpath_u)
280
281         d = defer.succeed(None)
282
283         def _maybe_upload(val, now=None):
284             if now is None:
285                 now = time.time()
286             fp = self._get_filepath(relpath_u)
287             pathinfo = get_pathinfo(unicode_from_filepath(fp))
288
289             print "pending = %r, about to remove %r" % (self._pending, relpath_u)
290             self._pending.remove(relpath_u)
291             encoded_path_u = magicpath.path2magic(relpath_u)
292
293             if not pathinfo.exists:
294                 self._log("notified object %s disappeared (this is normal)" % quote_filepath(fp))
295                 self._count('objects_disappeared')
296                 d2 = defer.succeed(None)
297                 if self._db.check_file_db_exists(relpath_u):
298                     last_downloaded_timestamp = now
299                     d2.addCallback(lambda ign: self._get_metadata(encoded_path_u))
300                     current_version = self._db.get_local_file_version(relpath_u) + 1
301                     new_metadata = {}
302                     def set_deleted(metadata):
303                         last_downloaded_uri = metadata.get('last_downloaded_uri', None)
304                         new_metadata['last_downloaded_uri'] = last_downloaded_uri # XXX this has got to be wrong
305                         new_metadata['version'] = current_version
306                         new_metadata['deleted'] = True
307                         empty_uploadable = Data("", self._client.convergence)
308                         return self._upload_dirnode.add_file(encoded_path_u, empty_uploadable, overwrite=True, metadata=metadata)
309                     d2.addCallback(set_deleted)
310                     def add_db_entry(filenode):
311                         filecap = filenode.get_uri()
312
313                         self._db.did_upload_version(relpath_u, current_version, filecap, last_downloaded_uri, last_downloaded_timestamp, pathinfo)
314                         self._count('files_uploaded')
315
316                     # FIXME consider whether it's correct to retrieve the filenode again.
317                     d2.addCallback(lambda x: self._get_filenode(encoded_path_u))
318                     d2.addCallback(add_db_entry)
319
320                 d2.addCallback(lambda x: Exception("file does not exist"))  # FIXME wrong
321                 return d2
322             elif pathinfo.islink:
323                 self.warn("WARNING: cannot upload symlink %s" % quote_filepath(fp))
324                 return None
325             elif pathinfo.isdir:
326                 self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
327                 uploadable = Data("", self._client.convergence)
328                 encoded_path_u += magicpath.path2magic(u"/")
329                 upload_d = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata={"version":0}, overwrite=True)
330                 def _succeeded(ign):
331                     self._log("created subdirectory %r" % (relpath_u,))
332                     self._count('directories_created')
333                 def _failed(f):
334                     self._log("failed to create subdirectory %r" % (relpath_u,))
335                     return f
336                 upload_d.addCallbacks(_succeeded, _failed)
337                 upload_d.addCallback(lambda ign: self._scan(relpath_u))
338                 return upload_d
339             elif pathinfo.isfile:
340                 version = self._db.get_local_file_version(relpath_u)
341                 last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
342                 if version is None:
343                     version = 0
344                 elif self._db.is_new_file(pathinfo, relpath_u):
345                     version += 1
346                 else:
347                     return None
348
349                 uploadable = FileName(unicode_from_filepath(fp), self._client.convergence)
350                 metadata = { "version":version }
351                 if last_downloaded_uri is not None:
352                     metadata["last_downloaded_uri"] = last_downloaded_uri
353                     metadata["last_downloaded_timestamp"] = now
354                 d2 = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata=metadata, overwrite=True)
355                 def add_db_entry(filenode):
356                     filecap = filenode.get_uri()
357                     last_downloaded_uri = metadata.get('last_downloaded_uri', None)
358                     last_downloaded_timestamp = now
359                     self._db.did_upload_version(relpath_u, version, filecap, last_downloaded_uri, last_downloaded_timestamp, pathinfo)
360                 d2.addCallback(add_db_entry)
361                 return d2
362             else:
363                 self.warn("WARNING: cannot process special file %s" % quote_filepath(fp))
364                 return None
365
366         d.addCallback(_maybe_upload)
367
368         def _succeeded(res):
369             self._count('objects_succeeded')
370             return res
371         def _failed(f):
372             print f
373             self._count('objects_failed')
374             self._log("%r while processing %r" % (f, relpath_u))
375             return f
376         d.addCallbacks(_succeeded, _failed)
377         return d
378
379     def _get_metadata(self, encoded_path_u):
380         try:
381             d = self._upload_dirnode.get_metadata_for(encoded_path_u)
382         except KeyError:
383             return Failure()
384         return d
385
386     def _get_filenode(self, encoded_path_u):
387         try:
388             d = self._upload_dirnode.get(encoded_path_u)
389         except KeyError:
390             return Failure()
391         return d
392
393
394 class Downloader(QueueMixin):
395     REMOTE_SCAN_INTERVAL = 3  # facilitates tests
396
397     def __init__(self, client, local_path_u, db, collective_dircap, clock):
398         QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
399
400         # TODO: allow a path rather than a cap URI.
401         self._collective_dirnode = self._client.create_node_from_uri(collective_dircap)
402
403         if not IDirectoryNode.providedBy(self._collective_dirnode):
404             raise AssertionError("The URI in 'private/collective_dircap' does not refer to a directory.")
405         if self._collective_dirnode.is_unknown() or not self._collective_dirnode.is_readonly():
406             raise AssertionError("The URI in 'private/collective_dircap' is not a readonly cap to a directory.")
407
408         self._turn_delay = self.REMOTE_SCAN_INTERVAL
409         self._download_scan_batch = {} # path -> [(filenode, metadata)]
410
411     def start_scanning(self):
412         self._log("\nstart_scanning")
413         files = self._db.get_all_relpaths()
414         self._log("all files %s" % files)
415
416         d = self._scan_remote_collective()
417         self._turn_deque()
418         return d
419
420     def stop(self):
421         self._stopped = True
422         d = defer.succeed(None)
423         d.addCallback(lambda ign: self._lazy_tail)
424         return d
425
426     def _should_download(self, relpath_u, remote_version):
427         """
428         _should_download returns a bool indicating whether or not a remote object should be downloaded.
429         We check the remote metadata version against our magic-folder db version number;
430         latest version wins.
431         """
432         if magicpath.should_ignore_file(relpath_u):
433             return False
434         v = self._db.get_local_file_version(relpath_u)
435         return (v is None or v < remote_version)
436
437     def _get_local_latest(self, relpath_u):
438         """
439         _get_local_latest takes a unicode path string checks to see if this file object
440         exists in our magic-folder db; if not then return None
441         else check for an entry in our magic-folder db and return the version number.
442         """
443         if not self._get_filepath(relpath_u).exists():
444             return None
445         return self._db.get_local_file_version(relpath_u)
446
447     def _get_collective_latest_file(self, filename):
448         """
449         _get_collective_latest_file takes a file path pointing to a file managed by
450         magic-folder and returns a deferred that fires with the two tuple containing a
451         file node and metadata for the latest version of the file located in the
452         magic-folder collective directory.
453         """
454         collective_dirmap_d = self._collective_dirnode.list()
455         def scan_collective(result):
456             list_of_deferreds = []
457             for dir_name in result.keys():
458                 # XXX make sure it's a directory
459                 d = defer.succeed(None)
460                 d.addCallback(lambda x, dir_name=dir_name: result[dir_name][0].get_child_and_metadata(filename))
461                 list_of_deferreds.append(d)
462             deferList = defer.DeferredList(list_of_deferreds, consumeErrors=True)
463             return deferList
464         collective_dirmap_d.addCallback(scan_collective)
465         def highest_version(deferredList):
466             max_version = 0
467             metadata = None
468             node = None
469             for success, result in deferredList:
470                 if success:
471                     if result[1]['version'] > max_version:
472                         node, metadata = result
473                         max_version = result[1]['version']
474             return node, metadata
475         collective_dirmap_d.addCallback(highest_version)
476         return collective_dirmap_d
477
478     def _append_to_batch(self, name, file_node, metadata):
479         if self._download_scan_batch.has_key(name):
480             self._download_scan_batch[name] += [(file_node, metadata)]
481         else:
482             self._download_scan_batch[name] = [(file_node, metadata)]
483
484     def _scan_remote(self, nickname, dirnode):
485         self._log("_scan_remote nickname %r" % (nickname,))
486         d = dirnode.list()
487         def scan_listing(listing_map):
488             for name in listing_map.keys():
489                 file_node, metadata = listing_map[name]
490                 local_version = self._get_local_latest(name)
491                 remote_version = metadata.get('version', None)
492                 self._log("%r has local version %r, remote version %r" % (name, local_version, remote_version))
493                 if local_version is None or remote_version is None or local_version < remote_version:
494                     self._log("added to download queue\n")
495                     self._append_to_batch(name, file_node, metadata)
496         d.addCallback(scan_listing)
497         return d
498
499     def _scan_remote_collective(self):
500         self._log("_scan_remote_collective")
501         self._download_scan_batch = {} # XXX
502
503         if self._collective_dirnode is None:
504             return
505         collective_dirmap_d = self._collective_dirnode.list()
506         def do_list(result):
507             others = [x for x in result.keys()]
508             return result, others
509         collective_dirmap_d.addCallback(do_list)
510         def scan_collective(result):
511             d = defer.succeed(None)
512             collective_dirmap, others_list = result
513             for dir_name in others_list:
514                 d.addCallback(lambda x, dir_name=dir_name: self._scan_remote(dir_name, collective_dirmap[dir_name][0]))
515                 # XXX todo add errback
516             return d
517         collective_dirmap_d.addCallback(scan_collective)
518         collective_dirmap_d.addCallback(self._filter_scan_batch)
519         collective_dirmap_d.addCallback(self._add_batch_to_download_queue)
520         return collective_dirmap_d
521
522     def _add_batch_to_download_queue(self, result):
523         print "result = %r" % (result,)
524         print "deque = %r" % (self._deque,)
525         self._deque.extend(result)
526         print "deque after = %r" % (self._deque,)
527         self._count('objects_queued', len(result))
528         print "pending = %r" % (self._pending,)
529         self._pending.update(map(lambda x: x[0], result))
530         print "pending after = %r" % (self._pending,)
531
532     def _filter_scan_batch(self, result):
533         extension = [] # consider whether this should be a dict
534         for relpath_u in self._download_scan_batch.keys():
535             if relpath_u in self._pending:
536                 continue
537             file_node, metadata = max(self._download_scan_batch[relpath_u], key=lambda x: x[1]['version'])
538             if self._should_download(relpath_u, metadata['version']):
539                 extension += [(relpath_u, file_node, metadata)]
540         return extension
541
542     def _when_queue_is_empty(self):
543         d = task.deferLater(self._clock, self._turn_delay, self._scan_remote_collective)
544         d.addCallback(lambda ign: self._turn_deque())
545         return d
546
547     def _process(self, item):
548         (relpath_u, file_node, metadata) = item
549         d = file_node.download_best_version()
550         def succeeded(res):
551             fp = self._get_filepath(relpath_u)
552             abspath_u = unicode_from_filepath(fp)
553             d2 = defer.succeed(res)
554             d2.addCallback(lambda result: self._write_downloaded_file(abspath_u, result, is_conflict=False))
555             def do_update_db(written_abspath_u):
556                 filecap = file_node.get_uri()
557                 written_pathinfo = get_pathinfo(written_abspath_u)
558                 if not written_pathinfo.exists:
559                     raise Exception("downloaded file %s disappeared" % quote_local_unicode_path(written_abspath_u))
560
561                 self._db.did_upload_version(filecap, relpath_u, metadata['version'], written_pathinfo)
562             d2.addCallback(do_update_db)
563             # XXX handle failure here with addErrback...
564             self._count('objects_downloaded')
565             return d2
566         def failed(f):
567             self._log("download failed: %s" % (str(f),))
568             self._count('objects_download_failed')
569             return f
570         d.addCallbacks(succeeded, failed)
571         def remove_from_pending(res):
572             self._pending.remove(relpath_u)
573             return res
574         d.addBoth(remove_from_pending)
575         return d
576
577     FUDGE_SECONDS = 10.0
578
579     @classmethod
580     def _write_downloaded_file(cls, abspath_u, file_contents, is_conflict=False, now=None):
581         # 1. Write a temporary file, say .foo.tmp.
582         # 2. is_conflict determines whether this is an overwrite or a conflict.
583         # 3. Set the mtime of the replacement file to be T seconds before the
584         #    current local time.
585         # 4. Perform a file replacement with backup filename foo.backup,
586         #    replaced file foo, and replacement file .foo.tmp. If any step of
587         #    this operation fails, reclassify as a conflict and stop.
588         #
589         # Returns the path of the destination file.
590
591         precondition_abspath(abspath_u)
592         replacement_path_u = abspath_u + u".tmp"  # FIXME more unique
593         backup_path_u = abspath_u + u".backup"
594         if now is None:
595             now = time.time()
596
597         # ensure parent directory exists
598         head, tail = os.path.split(abspath_u)
599         mode = 0777 # XXX
600         fileutil.make_dirs(head, mode)
601
602         fileutil.write(replacement_path_u, file_contents)
603         os.utime(replacement_path_u, (now, now - cls.FUDGE_SECONDS))
604         if is_conflict:
605             return cls._rename_conflicted_file(abspath_u, replacement_path_u)
606         else:
607             try:
608                 fileutil.replace_file(abspath_u, replacement_path_u, backup_path_u)
609                 return abspath_u
610             except fileutil.ConflictError:
611                 return cls._rename_conflicted_file(abspath_u, replacement_path_u)
612
613     @classmethod
614     def _rename_conflicted_file(self, abspath_u, replacement_path_u):
615         conflict_path_u = abspath_u + u".conflict"
616         fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u)
617         return conflict_path_u